diff --git a/.bumpversion.cfg b/.bumpversion.cfg index ac3a003ed93..fe31cacfb1a 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.8.6-beta.1 +current_version = 0.9.6-beta.6 tag = False tag_name = {new_version} commit = True @@ -38,20 +38,12 @@ first_value = 1 [bumpversion:file:packages/grid/VERSION] -[bumpversion:file:packages/grid/backend/worker_cpu.dockerfile] +[bumpversion:file:packages/grid/backend/grid/images/worker_cpu.dockerfile] [bumpversion:file:packages/grid/frontend/package.json] [bumpversion:file:packages/grid/helm/syft/Chart.yaml] -[bumpversion:file:packages/grid/podman/podman-kube/podman-syft-kube.yaml] - -[bumpversion:file:packages/grid/podman/podman-kube/podman-syft-kube-config.yaml] - [bumpversion:file:packages/grid/helm/syft/values.yaml] -[bumpversion:file:packages/hagrid/hagrid/manifest_template.yml] - -[bumpversion:file:packages/hagrid/hagrid/deps.py] - [bumpversion:file:packages/syftcli/manifest.yml] diff --git a/.bumpversion_stable.cfg b/.bumpversion_stable.cfg index fd7e8aa4551..3c8380eb3b7 100644 --- a/.bumpversion_stable.cfg +++ b/.bumpversion_stable.cfg @@ -1,19 +1,15 @@ [bumpversion] -current_version = 0.8.5 +current_version = 0.9.5 tag = False tag_name = {new_version} commit = True -parse = +parse = (?P\d+) \. (?P\d+) \. (?P\d+) -serialize = +serialize = {major}.{minor}.{patch} [bumpversion:file:packages/syft/src/syft/stable_version.py] - -[bumpversion:file:packages/hagrid/hagrid/stable_version.py] - -[bumpversion:file:packages/hagrid/hagrid/cache.py] diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 36bccce0507..00000000000 --- a/.dockerignore +++ /dev/null @@ -1,11 +0,0 @@ -.tox -.git -.vscode -scripts -.mypy_cache -.benchmarks -docker -packages/syft/src/target -packages/grid/apps/domain/src/nodedatabase.db -packages/grid/apps/network/src/nodedatabase.db -packages/grid/apps/worker/src/nodedatabase.db diff --git a/.github/file-filters.yml b/.github/file-filters.yml index 1d0a44134cc..0d677dcb4e5 100644 --- a/.github/file-filters.yml +++ b/.github/file-filters.yml @@ -27,17 +27,6 @@ backend: - packages/grid/backend/**/*.sh - packages/grid/backend/**/*.mako -hagrid: - - .github/workflows/pr-tests-hagrid.yml - - packages/hagrid/**/*.py - - packages/hagrid/**/*.cfg - - packages/hagrid/**/*.yml - - packages/hagrid/**/*.dockerfile - - packages/hagrid/**/*.toml - - packages/hagrid/**/*.txt - - packages/hagrid/**/*.ini - - packages/hagrid/**/*.sh - syft: - .github/workflows/pr-tests-syft.yml - packages/syft/**/*.py @@ -87,3 +76,17 @@ notebooks: - packages/syft/**/*.ini - packages/syft/**/*.sh - packages/syft/**/*.mako + +notebooks_scenario: + - .github/workflows/pr-tests-syft.yml + - notebooks/scenarios/**/*.ipynb + - packages/syft/**/*.py + - packages/syft/**/*.capnp + - packages/syft/**/*.yml + - packages/syft/**/*.cfg + - packages/syft/**/*.dockerfile + - packages/syft/**/*.toml + - packages/syft/**/*.txt + - packages/syft/**/*.ini + - packages/syft/**/*.sh + - packages/syft/**/*.mako diff --git a/.github/workflows/cd-docs.yml b/.github/workflows/cd-docs.yml index 7d0e32913f1..3931d2bdc16 100644 --- a/.github/workflows/cd-docs.yml +++ b/.github/workflows/cd-docs.yml @@ -5,12 +5,12 @@ on: none: description: "Deploy Syft Documentation" required: false - pull_request: - branches: [dev] - paths: [docs/] - push: - branches: [dev] - paths: [docs/] + # pull_request: + # branches: [dev] + # paths: [docs/] + # push: + # branches: [dev] + # paths: [docs/] jobs: cd-docs: @@ -25,9 +25,10 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install tox + - name: Install pip dependencies run: | - pip install --upgrade pip uv==0.1.18 tox tox-uv==1.5.1 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - name: Build the docs diff --git a/.github/workflows/cd-feature-branch.yml b/.github/workflows/cd-feature-branch.yml new file mode 100644 index 00000000000..b617e14cc40 --- /dev/null +++ b/.github/workflows/cd-feature-branch.yml @@ -0,0 +1,360 @@ +#TODO: Due to lack of time, this could not be de-duplicated +# from cd-syft.yml, which have a similar structure +name: CD - Feature Branch + +on: + workflow_dispatch: + inputs: + release_version: + description: "Syft Version to Release" + required: true + type: string + + release_branch: + description: "Branch to Release from" + required: true + type: string + + release_platform: + description: "Release Platform" + required: true + default: "TEST_PYPI" + type: choice + options: + - TEST_PYPI + # - REAL_PYPI + # - REAL_AND_TEST_PYPI + +# Prevents concurrent runs of the same workflow +# while the previous run is still in progress +concurrency: + group: "CD - Feature Branch" + cancel-in-progress: false + +jobs: + build-and-push-docker-images: + strategy: + matrix: + runner: [sh-arc-linux-x64, sh-arc-linux-arm64] + runs-on: ${{ matrix.runner }} + + outputs: + server_version: ${{ steps.release_metadata.outputs.server_version }} + + steps: + - name: Setup Python on ${{ matrix.runner }} + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install Git + run: | + sudo apt-get update + sudo apt-get install git -y + + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.release_branch }} + + - name: Check python version + run: | + python --version + python3 --version + which python + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 bump2version==1.0.1 + uv --version + + - name: Generate Release Metadata + id: release_metadata + run: | + if [[ ${{matrix.runner}} == *"x64"* ]]; then + echo "release_platform=linux/amd64" >> $GITHUB_OUTPUT + echo "short_release_platform=amd64" >> $GITHUB_OUTPUT + else + echo "release_platform=linux/arm64" >> $GITHUB_OUTPUT + echo "short_release_platform=arm64" >> $GITHUB_OUTPUT + fi + echo "server_version=${{ github.event.inputs.release_version }}" >> $GITHUB_OUTPUT + + - name: Bump to Final Release version + run: | + python scripts/bump_version.py --bump-to-stable ${{ steps.release_metadata.outputs.server_version}} + + - name: Update Commit Hash in Syft + run: | + python packages/syft/src/syft/util/update_commit.py packages/syft/src/syft/util/commit.py + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_LOGIN }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push `syft-backend` image to DockerHub + id: syft-backend-build + uses: docker/build-push-action@v6 + with: + context: ./packages + file: ./packages/grid/backend/backend.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + target: backend + outputs: type=image,name=openmined/syft-backend,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max + + - name: Export digest for syft-backend + run: | + mkdir -p /tmp/digests/syft-backend + digest="${{ steps.syft-backend-build.outputs.digest }}" + touch "/tmp/digests/syft-backend/${digest#sha256:}" + + - name: Build and push `syft-frontend` image to DockerHub + id: syft-frontend-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/frontend + file: ./packages/grid/frontend/frontend.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-frontend,push-by-digest=true,name-canonical=true,push=true + target: syft-ui-development + cache-from: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-frontend + run: | + mkdir -p /tmp/digests/syft-frontend + digest="${{ steps.syft-frontend-build.outputs.digest }}" + touch "/tmp/digests/syft-frontend/${digest#sha256:}" + + - name: Build and push `syft-seaweedfs` image to DockerHub + id: syft-seaweedfs-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/seaweedfs + file: ./packages/grid/seaweedfs/seaweedfs.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-seaweedfs,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-seaweedfs + run: | + mkdir -p /tmp/digests/syft-seaweedfs + digest="${{ steps.syft-seaweedfs-build.outputs.digest }}" + touch "/tmp/digests/syft-seaweedfs/${digest#sha256:}" + + # Some of the dependencies of syft-enclave-attestation are not available for arm64 + # Hence, we are building syft-enclave-attestation only for x64 (see the `if` conditional) + - name: Build and push `syft-enclave-attestation` image to DockerHub + if: ${{ endsWith(matrix.runner, '-x64') }} + id: syft-enclave-attestation-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/enclave/attestation + file: ./packages/grid/enclave/attestation/attestation.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-enclave-attestation,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-enclave-attestation + if: ${{ endsWith(matrix.runner, '-x64') }} + run: | + mkdir -p /tmp/digests/syft-enclave-attestation + digest="${{ steps.syft-enclave-attestation-build.outputs.digest }}" + touch "/tmp/digests/syft-enclave-attestation/${digest#sha256:}" + + - name: Build and push `syft` image to registry + id: syft-build + uses: docker/build-push-action@v6 + with: + context: ./packages/ + file: ./packages/grid/syft-client/syft.Dockerfile + outputs: type=image,name=openmined/syft-client,push-by-digest=true,name-canonical=true,push=true + platforms: ${{ steps.release_metadata.outputs.release_platform }} + cache-from: type=registry,ref=openmined/syft-client:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-client:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max + + - name: Export digest for `syft` image + run: | + mkdir -p /tmp/digests/syft + digest="${{ steps.syft-build.outputs.digest }}" + touch "/tmp/digests/syft/${digest#sha256:}" + + - name: Upload digests + uses: actions/upload-artifact@v4 + with: + name: digests-${{ steps.release_metadata.outputs.server_version }}-${{ steps.release_metadata.outputs.short_release_platform }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + #Used to merge x64 and arm64 into one docker image + merge-docker-images: + needs: [build-and-push-docker-images] + if: always() && (needs.build-and-push-docker-images.result == 'success') + + runs-on: sh-arc-linux-x64 + + outputs: + server_version: ${{ needs.build-and-push-docker-images.outputs.server_version }} + + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-${{ needs.build-and-push-docker-images.outputs.server_version }}-* + merge-multiple: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_LOGIN }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Create manifest list and push for syft-backend + working-directory: /tmp/digests/syft-backend + run: | + docker buildx imagetools create \ + -t openmined/syft-backend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-backend@sha256:%s ' *) + + - name: Create manifest list and push for syft-frontend + working-directory: /tmp/digests/syft-frontend + run: | + docker buildx imagetools create \ + -t openmined/syft-frontend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-frontend@sha256:%s ' *) + + - name: Create manifest list and push for syft-seaweedfs + working-directory: /tmp/digests/syft-seaweedfs + run: | + docker buildx imagetools create \ + -t openmined/syft-seaweedfs:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-seaweedfs@sha256:%s ' *) + + - name: Create manifest list and push for syft-enclave-attestation + working-directory: /tmp/digests/syft-enclave-attestation + run: | + docker buildx imagetools create \ + -t openmined/syft-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-enclave-attestation@sha256:%s ' *) + + - name: Create manifest list and push for syft client + working-directory: /tmp/digests/syft + run: | + docker buildx imagetools create \ + -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-client@sha256:%s ' *) + + deploy-syft: + needs: [merge-docker-images] + if: always() && needs.merge-docker-images.result == 'success' + + runs-on: ubuntu-latest + + steps: + - name: Permission to home directory + run: | + sudo chown -R $USER:$USER $HOME + + - uses: actions/checkout@v4 + with: + token: ${{ secrets.SYFT_BOT_COMMIT_TOKEN }} + ref: ${{ github.event.inputs.release_branch }} + + # free 10GB of space + - name: Remove unnecessary files + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 setuptools wheel twine bump2version PyYAML + uv --version + + - name: Bump to Final Release version + run: | + python scripts/bump_version.py --bump-to-stable ${{ needs.merge-docker-images.outputs.server_version }} + + - name: Update Commit Hash in Syft + run: | + python packages/syft/src/syft/util/update_commit.py packages/syft/src/syft/util/commit.py + + - name: Build Helm Chart + shell: bash + run: | + # install k3d + K3D_VERSION=v5.6.3 + wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + mv k3d-linux-amd64 k3d + chmod +x k3d + export PATH=`pwd`:$PATH + k3d version + + #Install Devspace + DEVSPACE_VERSION=v6.3.12 + curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + chmod +x devspace + devspace version + + # Install helm + curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + helm version + + tox -e syft.build.helm + tox -e syft.package.helm + + - name: Linting + run: | + tox -e lint || true + + - name: Manual Build and Publish + run: | + tox -e syft.publish + if [[ "${{ github.event.inputs.release_platform }}" == "TEST_PYPI" ]]; then + twine upload -r testpypi -u __token__ -p ${{ secrets.OM_SYFT_TEST_PYPI_TOKEN }} packages/syft/dist/* + fi + + # Checkout to gh-pages and update helm repo + - name: Checkout to gh-pages + uses: actions/checkout@v4 + with: + ref: gh-pages + token: ${{ secrets.SYFT_BOT_COMMIT_TOKEN }} + path: ghpages + + - name: Copy helm repo files from Syft Repo + run: | + cp packages/grid/helm/repo/syft-${{ needs.merge-docker-images.outputs.server_version }}.tgz ghpages/helm/ + cd ghpages/helm && helm repo index . --url https://openmined.github.io/PySyft/helm + + - name: Commit changes to gh-pages + uses: EndBug/add-and-commit@v9 + with: + author_name: ${{ secrets.OM_BOT_NAME }} + author_email: ${{ secrets.OM_BOT_EMAIL }} + message: "Add Helm package for Syft Version: ${{ needs.merge-docker-images.outputs.server_version }}" + add: "helm/" + push: "origin gh-pages" + cwd: "./ghpages/" diff --git a/.github/workflows/cd-hagrid.yml b/.github/workflows/cd-hagrid.yml deleted file mode 100644 index a17f61ec519..00000000000 --- a/.github/workflows/cd-hagrid.yml +++ /dev/null @@ -1,108 +0,0 @@ -name: CD - HAGrid - -on: - schedule: - - cron: "00 10 * * */3" # At 10:00 UTC on every three days - - workflow_dispatch: - inputs: - skip_tests: - description: "If true, skip tests" - required: false - default: "false" - -# Prevents concurrent runs of the same workflow -# while the previous run is still in progress -concurrency: - group: "CD - Hagrid" - cancel-in-progress: false - -jobs: - call-pr-tests-linting: - if: github.repository == 'OpenMined/PySyft' && (github.event.inputs.skip_tests == 'false' || github.event_name == 'schedule') # don't run on forks - uses: OpenMined/PySyft/.github/workflows/pr-tests-linting.yml@dev - - call-pr-tests-syft: - if: github.repository == 'OpenMined/PySyft' && (github.event.inputs.skip_tests == 'false' || github.event_name == 'schedule') # don't run on forks - uses: OpenMined/PySyft/.github/workflows/pr-tests-syft.yml@dev - - call-pr-tests-stack: - if: github.repository == 'OpenMined/PySyft' && (github.event.inputs.skip_tests == 'false' || github.event_name == 'schedule') # don't run on forks - uses: OpenMined/PySyft/.github/workflows/pr-tests-stack.yml@dev - secrets: inherit - - call-hagrid-tests: - if: github.repository == 'OpenMined/PySyft' && (github.event.inputs.skip_tests == 'false' || github.event_name == 'schedule') # don't run on forks - uses: OpenMined/PySyft/.github/workflows/pr-tests-hagrid.yml@dev - - deploy-hagrid: - needs: - [ - call-pr-tests-linting, - call-pr-tests-syft, - call-pr-tests-stack, - call-hagrid-tests, - ] - if: always() && (needs.call-pr-tests-linting.result == 'success' && needs.call-pr-tests-syft.result == 'success' && needs.call-pr-tests-stack.result == 'success' && needs.call-hagrid-tests.result == 'success' || github.event.inputs.skip_tests == 'true') - runs-on: ubuntu-latest - - outputs: - current_hash: ${{ steps.get_hash.outputs.current_hash }} - previous_hash: ${{ steps.get_hash.outputs.previous_hash }} - steps: - - uses: actions/checkout@v4 - with: - token: ${{ secrets.SYFT_BOT_COMMIT_TOKEN }} - - name: Install checksumdir - run: | - pip install --upgrade checksumdir - - name: Get the hashes - id: get-hashes - shell: bash - run: | - current_hash=$(checksumdir ./packages/hagrid) - echo "current_hash=$current_hash" >> $GITHUB_OUTPUT - previous_hash=$(cat ./scripts/hagrid_hash) - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install dependencies - if: ${{needs.hagrid-deploy.outputs.current_hash}} != ${{needs.hagrid-deploy.outputs.previous_hash}} - run: | - python -m pip install --upgrade pip - pip install --upgrade tox setuptools wheel twine bump2version PyYAML - - - name: Bump the Version - if: ${{needs.hagrid-deploy.outputs.current_hash}} != ${{needs.hagrid-deploy.outputs.previous_hash}} - run: | - python3 hagrid/version.py - python3 scripts/update_manifest.py - bump2version patch --allow-dirty --no-commit - tox -e lint || true - python3 hagrid/version.py - working-directory: ./packages/hagrid - - - name: Write the new hash - if: ${{needs.hagrid-deploy.outputs.current_hash}} != ${{needs.hagrid-deploy.outputs.previous_hash}} - run: echo $(checksumdir packages/hagrid) > ./scripts/hagrid_hash - - - name: Commit changes - if: ${{needs.hagrid-deploy.outputs.current_hash}} != ${{needs.hagrid-deploy.outputs.previous_hash}} - uses: EndBug/add-and-commit@v9 - with: - author_name: ${{ secrets.OM_BOT_NAME }} - author_email: ${{ secrets.OM_BOT_EMAIL }} - message: "[hagrid] bump version" - add: "['./packages/hagrid/.bumpversion.cfg','./packages/hagrid/setup.py','./packages/hagrid/hagrid/version.py', './scripts/hagrid_hash', './packages/hagrid/hagrid/manifest_template.yml']" - - - name: Build and publish - if: ${{needs.hagrid-deploy.outputs.current_hash}} != ${{needs.hagrid-deploy.outputs.previous_hash}} - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.HAGRID_BUMP_TOKEN }} - run: | - tox -e hagrid.publish - twine upload packages/hagrid/dist/* diff --git a/.github/workflows/cd-post-release-tests.yml b/.github/workflows/cd-post-release-tests.yml index 7f19b5c397d..88d0be46bb6 100644 --- a/.github/workflows/cd-post-release-tests.yml +++ b/.github/workflows/cd-post-release-tests.yml @@ -8,6 +8,14 @@ on: required: true type: string + release_platform: + description: "Release Platform" + required: true + type: choice + options: + - "REAL_PYPI" + - "TEST_PYPI" + workflow_call: inputs: syft_version: @@ -15,27 +23,27 @@ on: required: true type: string + release_platform: + description: "Release Platform" + required: true + type: string + default: "REAL_PYPI" + jobs: - notebook-test-hagrid: + syft-install-check: strategy: max-parallel: 99 matrix: - os: [ubuntu-latest] - python-version: ["3.12"] - + os: [ubuntu-latest, macos-latest, windows-latest, macos-14] + python-version: ["3.12", "3.11", "3.10"] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 - - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' + - name: System Architecture run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force + echo "System Architecture: $(uname -m)" + echo "System Version: $(uname -a)" + + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 @@ -45,7 +53,7 @@ jobs: - name: Upgrade pip run: | - python -m pip install --upgrade --user pip + python -m pip install --upgrade pip - name: Get pip cache dir id: pip-cache @@ -53,7 +61,7 @@ jobs: run: | echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} @@ -61,126 +69,94 @@ jobs: restore-keys: | ${{ runner.os }}-pip-py${{ matrix.python-version }}- - - name: Install Hagrid, tox and uv - run: | - pip install -U hagrid - pip install --upgrade pip uv==0.1.18 tox tox-uv==1.5.1 - - - name: Hagrid Version - run: | - hagrid version - - - name: Remove existing containers - continue-on-error: true - shell: bash - run: | - docker rm $(docker ps -aq) --force || true - docker volume prune -f || true - docker buildx use default || true - - - name: Launch Domain - run: | - hagrid launch test-domain-1 to docker:8081 --tag=${{ inputs.syft_version }} --low-side - - - name: Run tests - env: - NODE_PORT: "8081" - SYFT_VERSION: ${{ inputs.syft_version }} - EXCLUDE_NOTEBOOKS: "not 11-container-images-k8s.ipynb" - run: | - tox -e e2e.test.notebook - - #Run log collector python script - - name: Run log collector - timeout-minutes: 5 - if: failure() + - name: Install Syft shell: bash run: | - python ./scripts/container_log_collector.py + if [[ ${{inputs.release_platform}} == "REAL_PYPI" ]]; then + pip install syft==${{inputs.syft_version}} + fi + if [[ ${{ inputs.release_platform }} == "TEST_PYPI" ]]; then + pip install --extra-index-url https://test.pypi.org/simple/ syft==${{inputs.syft_version}} + fi - # Get Job name and url - - name: Get job name and url - id: job_name - if: failure() - shell: bash + - name: Check Syft version run: | - echo "job_name=$(echo ${{ github.job }})" >> $GITHUB_OUTPUT - echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Upload logs to GitHub - uses: actions/upload-artifact@master - if: failure() - with: - name: ${{ matrix.os }}-${{ steps.job_name.outputs.job_name }}-logs-${{ steps.job_name.outputs.date }} - path: ./logs/${{ steps.job_name.outputs.job_name}}/ + python -c "import syft; print(syft.__version__)" - syft-install-check: + notebook-test-k8s-k3d: + if: github.event.inputs.release_platform == 'REAL_PYPI' strategy: max-parallel: 99 matrix: - os: [ubuntu-latest, macos-latest, windows-latest, macos-14] - python-version: ["3.12", "3.11", "3.10"] + os: [ubuntu-latest] + python-version: ["3.12"] + runs-on: ${{ matrix.os }} steps: - - name: System Architecture - run: | - echo "System Architecture: $(uname -m)" - echo "System Version: $(uname -a)" - - uses: actions/checkout@v4 + # free 10GB of space + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install K3d run: | - python -m pip install --upgrade --user pip + K3D_VERSION=v5.6.3 + wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=$K3D_VERSION bash - - name: Get pip cache dir + - name: Install pip dependencies + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir id: pip-cache shell: bash run: | - echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} restore-keys: | - ${{ runner.os }}-pip-py${{ matrix.python-version }}- + ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - name: Install Syft - run: | - pip install syft==${{ inputs.syft_version }} - - - name: Check Syft version + - name: Run K8s tests + env: + SYFT_VERSION: ${{ inputs.syft_version }} run: | - python -c "import syft; print(syft.__version__)" + tox -e syft.test.helm - notebook-test-k8s-k3d: + # This job is used to test the syft unit tests on Test PyPi + syft-unit-tests: strategy: max-parallel: 99 matrix: - os: [ubuntu-latest] - python-version: ["3.12"] - + os: [ubuntu-latest, macos-latest] + python-version: ["3.12", "3.11", "3.10"] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 - - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' + - name: System Architecture run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force + echo "System Architecture: $(uname -m)" + echo "System Version: $(uname -a)" + + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 @@ -188,10 +164,9 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install K3d + - name: Upgrade pip run: | - K3D_VERSION=v5.6.0 - wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=$K3D_VERSION bash + python -m pip install --upgrade pip - name: Get pip cache dir id: pip-cache @@ -199,7 +174,7 @@ jobs: run: | echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} @@ -207,12 +182,21 @@ jobs: restore-keys: | ${{ runner.os }}-pip-py${{ matrix.python-version }}- - - name: Install tox - run: | - pip install --upgrade pip uv==0.1.18 tox tox-uv==1.5.1 - - - name: Run K8s tests + - name: Install Syft env: SYFT_VERSION: ${{ inputs.syft_version }} run: | - tox -e syft.test.helm + if [[ ${{ inputs.release_platform }} == "TEST_PYPI" ]]; then + pip install --extra-index-url https://test.pypi.org/simple/ syft[data_science,dev]==${{ inputs.syft_version }} + fi + if [[ ${{ inputs.release_platform }} == "REAL_PYPI" ]]; then + pip install syft[data_science,dev]==${{ inputs.syft_version }} + fi + + - name: Install tox and uv + run: | + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 tox-current-env + + - name: Run unit tests + run: | + tox -e syft.test.unit --current-env diff --git a/.github/workflows/cd-syft-dev.yml b/.github/workflows/cd-syft-dev.yml index d8941739d69..04b03817b35 100644 --- a/.github/workflows/cd-syft-dev.yml +++ b/.github/workflows/cd-syft-dev.yml @@ -77,67 +77,75 @@ jobs: username: ${{ secrets.ACR_USERNAME }} password: ${{ secrets.ACR_PASSWORD }} - - name: Set Grid package version - id: grid + - name: Set Server package version + id: server shell: bash run: | - echo "GRID_VERSION=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT - echo "SEAWEEDFS_VERSION=$(grep 'SEAWEEDFS_VERSION' packages/grid/default.env | cut -d '=' -f2)" >> $GITHUB_OUTPUT + echo "SERVER_VERSION=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT - - name: Build and push `grid-backend` image to registry - uses: docker/build-push-action@v5 + - name: Build and push `syft` image to registry + uses: docker/build-push-action@v6 + with: + context: ./packages + file: ./packages/grid/syft-client/syft.Dockerfile + push: true + tags: | + ${{ secrets.ACR_SERVER }}/openmined/syft-client:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-client:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-client:${{ steps.server.outputs.SERVER_VERSION }} + + - name: Build and push `syft-backend` image to registry + uses: docker/build-push-action@v6 with: context: ./packages file: ./packages/grid/backend/backend.dockerfile push: true target: backend tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-backend:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-backend:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-backend:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-backend:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-backend:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-backend:${{ steps.server.outputs.SERVER_VERSION }} - - name: Build and push `grid-frontend` image to registry - uses: docker/build-push-action@v5 + - name: Build and push `syft-frontend` image to registry + uses: docker/build-push-action@v6 with: context: ./packages/grid/frontend file: ./packages/grid/frontend/frontend.dockerfile push: true tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-frontend:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-frontend:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-frontend:${{ steps.grid.outputs.GRID_VERSION }} - target: grid-ui-development + ${{ secrets.ACR_SERVER }}/openmined/syft-frontend:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-frontend:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-frontend:${{ steps.server.outputs.SERVER_VERSION }} + target: syft-ui-development - - name: Build and push `grid-seaweedfs` image to registry - uses: docker/build-push-action@v5 + - name: Build and push `syft-seaweedfs` image to registry + uses: docker/build-push-action@v6 with: context: ./packages/grid/seaweedfs file: ./packages/grid/seaweedfs/seaweedfs.dockerfile - build-args: | - SEAWEEDFS_VERSION=${{ steps.grid.outputs.SEAWEEDFS_VERSION }} push: true tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-seaweedfs:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-seaweedfs:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-seaweedfs:${{ steps.server.outputs.SERVER_VERSION }} - - name: Build and push `grid-veilid` image to registry - uses: docker/build-push-action@v5 + - name: Build and push `syft-enclave-attestation` image to registry + uses: docker/build-push-action@v6 with: - context: ./packages/grid/veilid - file: ./packages/grid/veilid/veilid.dockerfile + context: ./packages/grid/enclave/attestation + file: ./packages/grid/enclave/attestation/attestation.dockerfile push: true tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-veilid:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-veilid:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-veilid:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-enclave-attestation:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-enclave-attestation:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-enclave-attestation:${{ steps.server.outputs.SERVER_VERSION }} - name: Build Helm Chart & Copy to infra if: github.ref == 'refs/heads/dev' || github.event.inputs.deploy-helm == 'true' shell: bash run: | - K3D_VERSION=v5.6.0 - DEVSPACE_VERSION=v6.3.4 + K3D_VERSION=v5.6.3 + DEVSPACE_VERSION=v6.3.12 # install k3d wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 diff --git a/.github/workflows/cd-syft.yml b/.github/workflows/cd-syft.yml index a6b42dcf0ea..c97bcfa1897 100644 --- a/.github/workflows/cd-syft.yml +++ b/.github/workflows/cd-syft.yml @@ -12,8 +12,8 @@ on: default: "false" type: choice options: - - false - - true + - "false" + - "true" release_platform: description: "Release Platform" @@ -90,36 +90,16 @@ jobs: outputs: release_tag: ${{ steps.get_release_tag.outputs.release_tag }} - grid_version: ${{ steps.release_metadata.outputs.grid_version }} + server_version: ${{ steps.release_metadata.outputs.server_version }} steps: - uses: actions/checkout@v4 - # actions/setup-python doesn't yet support ARM - - name: Setup Python on x64 - if: ${{ !endsWith(matrix.runner, '-arm64') }} + - name: Setup Python on ${{ matrix.runner }} uses: actions/setup-python@v5 with: python-version: "3.12" - # Currently psutil package requires gcc to be installed on arm - # for building psutil from source - # as linux/aarch64 wheels not avaialble for psutil - # We could remove once we have aarch64 wheels - # https://github.com/giampaolo/psutil/issues/1972 - - name: Install Metadata packages for arm64 - if: ${{ endsWith(matrix.runner, '-arm64') }} - run: | - sudo apt update -y - sudo apt install software-properties-common -y - sudo apt install gcc -y - - - name: Setup Python on arm64 - if: ${{ endsWith(matrix.runner, '-arm64') }} - uses: deadsnakes/action@v3.1.0 - with: - python-version: "3.12" - - name: Install Git run: | sudo apt-get update @@ -133,7 +113,8 @@ jobs: - name: Install dependencies run: | - pip install --upgrade pip uv==0.1.18 bump2version tox tox-uv==1.5.1 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 bump2version==1.0.1 uv --version - name: Get Release tag @@ -164,8 +145,7 @@ jobs: echo "release_platform=linux/arm64" >> $GITHUB_OUTPUT echo "short_release_platform=arm64" >> $GITHUB_OUTPUT fi - echo "grid_version=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT - echo "seaweedfs_version=$(grep 'SEAWEEDFS_VERSION' packages/grid/default.env | cut -d '=' -f2)" >> $GITHUB_OUTPUT + echo "server_version=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT # TODO: Optimize redundant bump protocol version checks - name: Check and Bump Protocol Version @@ -184,94 +164,118 @@ jobs: username: ${{ secrets.DOCKER_LOGIN }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Build and push `grid-backend` image to DockerHub - id: grid-backend-build - uses: docker/build-push-action@v5 + - name: Build and push `syft-backend` image to DockerHub + id: syft-backend-build + uses: docker/build-push-action@v6 with: context: ./packages file: ./packages/grid/backend/backend.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} target: backend - outputs: type=image,name=openmined/grid-backend,push-by-digest=true,name-canonical=true,push=true - cache-from: type=registry,ref=openmined/grid-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max - - # - name: Build and push `syft-base-cpu` image to DockerHub - # id: syft-base-cpu-build - # uses: docker/build-push-action@v5 - # with: - # context: ./packages - # file: ./packages/grid/backend/worker_cpu.dockerfile - # platforms: ${{ steps.release_metadata.outputs.release_platform }} - # target: backend - # outputs: type=image,name=openmined/syft-base-cpu,push-by-digest=true,name-canonical=true,push=true - # cache-from: type=registry,ref=openmined/syft-base-cpu:cache-${{ steps.release_metadata.outputs.short_release_platform }} - # cache-to: type=registry,ref=openmined/syft-base-cpu:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max - - - name: Export digest for grid-backend - run: | - mkdir -p /tmp/digests/grid-backend - digest="${{ steps.grid-backend-build.outputs.digest }}" - touch "/tmp/digests/grid-backend/${digest#sha256:}" - - - name: Build and push `grid-frontend` image to DockerHub - id: grid-frontend-build - uses: docker/build-push-action@v5 + outputs: type=image,name=openmined/syft-backend,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max + + - name: Export digest for syft-backend + run: | + mkdir -p /tmp/digests/syft-backend + digest="${{ steps.syft-backend-build.outputs.digest }}" + touch "/tmp/digests/syft-backend/${digest#sha256:}" + + - name: Build and push `syft-frontend` image to DockerHub + id: syft-frontend-build + uses: docker/build-push-action@v6 with: context: ./packages/grid/frontend file: ./packages/grid/frontend/frontend.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} - outputs: type=image,name=openmined/grid-frontend,push-by-digest=true,name-canonical=true,push=true - target: grid-ui-development - cache-from: type=registry,ref=openmined/grid-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + outputs: type=image,name=openmined/syft-frontend,push-by-digest=true,name-canonical=true,push=true + target: syft-ui-development + cache-from: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max - - name: Export digest for grid-frontend + - name: Export digest for syft-frontend run: | - mkdir -p /tmp/digests/grid-frontend - digest="${{ steps.grid-frontend-build.outputs.digest }}" - touch "/tmp/digests/grid-frontend/${digest#sha256:}" + mkdir -p /tmp/digests/syft-frontend + digest="${{ steps.syft-frontend-build.outputs.digest }}" + touch "/tmp/digests/syft-frontend/${digest#sha256:}" - - name: Build and push `grid-seaweedfs` image to DockerHub - id: grid-seaweedfs-build - uses: docker/build-push-action@v5 + - name: Build and push `syft-seaweedfs` image to DockerHub + id: syft-seaweedfs-build + uses: docker/build-push-action@v6 with: context: ./packages/grid/seaweedfs file: ./packages/grid/seaweedfs/seaweedfs.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} - build-args: | - SEAWEEDFS_VERSION=${{ steps.release_metadata.outputs.seaweedfs_version }} - outputs: type=image,name=openmined/grid-seaweedfs,push-by-digest=true,name-canonical=true,push=true - cache-from: type=registry,ref=openmined/grid-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + outputs: type=image,name=openmined/syft-seaweedfs,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max - - name: Export digest for grid-seaweedfs + - name: Export digest for syft-seaweedfs run: | - mkdir -p /tmp/digests/grid-seaweedfs - digest="${{ steps.grid-seaweedfs-build.outputs.digest }}" - touch "/tmp/digests/grid-seaweedfs/${digest#sha256:}" + mkdir -p /tmp/digests/syft-seaweedfs + digest="${{ steps.syft-seaweedfs-build.outputs.digest }}" + touch "/tmp/digests/syft-seaweedfs/${digest#sha256:}" - - name: Build and push `grid-veilid` image to DockerHub - id: grid-veilid-build - uses: docker/build-push-action@v5 + - name: Build and push `syft-rathole` image to Dockerhub + id: syft-rathole-build + uses: docker/build-push-action@v6 with: - context: ./packages/grid/veilid - file: ./packages/grid/veilid/veilid.dockerfile + context: ./packages/grid/rathole + file: ./packages/grid/rathole/rathole.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} - outputs: type=image,name=openmined/grid-veilid,push-by-digest=true,name-canonical=true,push=true - cache-from: type=registry,ref=openmined/grid-veilid:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-veilid:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + outputs: type=image,name=openmined/syft-rathole,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-rathole:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-rathole:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-rathole + run: | + mkdir -p /tmp/digests/syft-rathole + digest="${{ steps.syft-rathole-build.outputs.digest }}" + touch "/tmp/digests/syft-rathole/${digest#sha256:}" + + # Some of the dependencies of syft-enclave-attestation are not available for arm64 + # Hence, we are building syft-enclave-attestation only for x64 (see the `if` conditional) + - name: Build and push `syft-enclave-attestation` image to DockerHub + if: ${{ endsWith(matrix.runner, '-x64') }} + id: syft-enclave-attestation-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/enclave/attestation + file: ./packages/grid/enclave/attestation/attestation.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-enclave-attestation,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max - - name: Export digest for grid-veilid + - name: Export digest for syft-enclave-attestation + if: ${{ endsWith(matrix.runner, '-x64') }} run: | - mkdir -p /tmp/digests/grid-veilid - digest="${{ steps.grid-veilid-build.outputs.digest }}" - touch "/tmp/digests/grid-veilid/${digest#sha256:}" + mkdir -p /tmp/digests/syft-enclave-attestation + digest="${{ steps.syft-enclave-attestation-build.outputs.digest }}" + touch "/tmp/digests/syft-enclave-attestation/${digest#sha256:}" + + - name: Build and push `syft` image to registry + id: syft-build + uses: docker/build-push-action@v6 + with: + context: ./packages/ + file: ./packages/grid/syft-client/syft.Dockerfile + outputs: type=image,name=openmined/syft-client,push-by-digest=true,name-canonical=true,push=true + platforms: ${{ steps.release_metadata.outputs.release_platform }} + cache-from: type=registry,ref=openmined/syft-client:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-client:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max - - name: Upload digest for grid-backend, grid-frontend and grid-seaweedfs, grid-veilid + - name: Export digest for `syft` image + run: | + mkdir -p /tmp/digests/syft + digest="${{ steps.syft-build.outputs.digest }}" + touch "/tmp/digests/syft/${digest#sha256:}" + + - name: Upload digests uses: actions/upload-artifact@v4 with: - name: digests-${{ steps.release_metadata.outputs.grid_version }}-${{ steps.release_metadata.outputs.short_release_platform }} + name: digests-${{ steps.release_metadata.outputs.server_version }}-${{ steps.release_metadata.outputs.short_release_platform }} path: /tmp/digests/* if-no-files-found: error retention-days: 1 @@ -291,7 +295,7 @@ jobs: uses: actions/download-artifact@v4 with: path: /tmp/digests - pattern: digests-${{ needs.build-and-push-docker-images.outputs.grid_version }}-* + pattern: digests-${{ needs.build-and-push-docker-images.outputs.server_version }}-* merge-multiple: true - name: Set up Docker Buildx @@ -303,37 +307,53 @@ jobs: username: ${{ secrets.DOCKER_LOGIN }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Create manifest list and push for grid-backend - working-directory: /tmp/digests/grid-backend + - name: Create manifest list and push for syft-backend + working-directory: /tmp/digests/syft-backend + run: | + docker buildx imagetools create \ + -t openmined/syft-backend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-backend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-backend@sha256:%s ' *) + + - name: Create manifest list and push for syft-frontend + working-directory: /tmp/digests/syft-frontend run: | docker buildx imagetools create \ - -t openmined/grid-backend:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-backend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-backend@sha256:%s ' *) + -t openmined/syft-frontend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-frontend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-frontend@sha256:%s ' *) - - name: Create manifest list and push for grid-frontend - working-directory: /tmp/digests/grid-frontend + - name: Create manifest list and push for syft-seaweedfs + working-directory: /tmp/digests/syft-seaweedfs run: | docker buildx imagetools create \ - -t openmined/grid-frontend:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-frontend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-frontend@sha256:%s ' *) + -t openmined/syft-seaweedfs:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-seaweedfs:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-seaweedfs@sha256:%s ' *) - - name: Create manifest list and push for grid-seaweedfs - working-directory: /tmp/digests/grid-seaweedfs + - name: Create manifest list and push for syft-rathole + working-directory: /tmp/digests/syft-rathole run: | docker buildx imagetools create \ - -t openmined/grid-seaweedfs:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-seaweedfs:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-seaweedfs@sha256:%s ' *) + -t openmined/syft-rathole:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-rathole:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-rathole@sha256:%s ' *) - - name: Create manifest list and push for grid-veilid - working-directory: /tmp/digests/grid-veilid + - name: Create manifest list and push for syft-enclave-attestation + working-directory: /tmp/digests/syft-enclave-attestation run: | docker buildx imagetools create \ - -t openmined/grid-veilid:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-veilid:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-veilid@sha256:%s ' *) + -t openmined/syft-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-enclave-attestation@sha256:%s ' *) + + - name: Create manifest list and push for syft + working-directory: /tmp/digests/syft + run: | + docker buildx imagetools create \ + -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-client@sha256:%s ' *) deploy-syft: needs: [merge-docker-images] @@ -345,6 +365,7 @@ jobs: outputs: syft_version: ${{ steps.release_checks.outputs.syft_version }} + release_platform: ${{ github.event.inputs.release_platform }} steps: - name: Permission to home directory @@ -370,7 +391,8 @@ jobs: python-version: "3.12" - name: Install dependencies run: | - pip install --upgrade pip uv==0.1.18 tox tox-uv==1.5.1 setuptools wheel twine bump2version PyYAML + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 setuptools wheel twine bump2version PyYAML uv --version - name: Bump the Version @@ -381,7 +403,6 @@ jobs: bump2version prenum --allow-dirty --no-commit ls **/VERSION | xargs -I {} python {} cat packages/grid/devspace.yaml | grep '0\.' - python packages/hagrid/scripts/update_manifest.py $(python packages/grid/VERSION) - name: Generate Release Metadata id: release_checks @@ -403,7 +424,7 @@ jobs: shell: bash run: | # install k3d - K3D_VERSION=v5.6.0 + K3D_VERSION=v5.6.3 wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 mv k3d-linux-amd64 k3d chmod +x k3d @@ -411,7 +432,7 @@ jobs: k3d version #Install Devspace - DEVSPACE_VERSION=v6.3.3 + DEVSPACE_VERSION=v6.3.12 curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace chmod +x devspace devspace version @@ -434,6 +455,13 @@ jobs: run: | python scripts/convert_to_pypi_readme.py --input-file packages/syft/README.md --output-file packages/syft/PYPI.md --version ${{ steps.release_checks.outputs.future_stable_version }} + - name: Take API Snapshot + run: | + if [[ "${{ needs.merge-docker-images.outputs.release_tag }}" == "latest" ]]; then + export STABLE_RELEASE=True + fi + SAVE_SNAP=True tox --recreate -e syft.api.snapshot + - name: Linting run: | tox -e lint || true @@ -445,7 +473,7 @@ jobs: author_name: ${{ secrets.OM_BOT_NAME }} author_email: ${{ secrets.OM_BOT_EMAIL }} message: "[syft]bump version" - add: "['.bumpversion.cfg', 'VERSION', 'packages/grid/VERSION','packages/syft/PYPI.md', 'packages/grid/devspace.yaml', 'packages/syft/src/syft/VERSION', 'packages/syft/setup.cfg', 'packages/grid/frontend/package.json', 'packages/syft/src/syft/__init__.py', 'packages/hagrid/hagrid/manifest_template.yml', 'packages/grid/helm/syft/Chart.yaml','packages/grid/helm/repo', 'packages/hagrid/hagrid/deps.py', 'packages/grid/podman/podman-kube/podman-syft-kube.yaml' ,'packages/grid/podman/podman-kube/podman-syft-kube-config.yaml', 'packages/syftcli/manifest.yml', 'packages/syft/src/syft/protocol/protocol_version.json', 'packages/syft/src/syft/protocol/releases/', 'packages/grid/backend/worker_cpu.dockerfile','packages/grid/helm/syft/values.yaml','packages/grid/helm/syft']" + add: "['.bumpversion.cfg', 'VERSION', 'packages/grid/VERSION','packages/syft/PYPI.md', 'packages/grid/devspace.yaml', 'packages/syft/src/syft/VERSION', 'packages/syft/setup.cfg', 'packages/grid/frontend/package.json', 'packages/syft/src/syft/__init__.py', 'packages/grid/helm/syft/Chart.yaml','packages/grid/helm/repo', 'packages/syftcli/manifest.yml', 'packages/syft/src/syft/protocol/protocol_version.json', 'packages/syft/src/syft/protocol/releases/', 'packages/grid/backend/grid/images/worker_cpu.dockerfile','packages/grid/helm/syft/values.yaml','packages/grid/helm/syft','packages/syft/src/syft/util/api_snapshot/syft_api_spec_beta.json']" - name: Changes to commit to Syft Repo during stable release if: needs.merge-docker-images.outputs.release_tag == 'latest' @@ -454,7 +482,7 @@ jobs: author_name: ${{ secrets.OM_BOT_NAME }} author_email: ${{ secrets.OM_BOT_EMAIL }} message: "[syft] bump protocol version" - add: "['packages/syft/src/syft/protocol/protocol_version.json', 'packages/syft/src/syft/protocol/releases/','packages/syft/PYPI.md','packages/grid/helm/repo']" + add: "['packages/syft/src/syft/protocol/protocol_version.json', 'packages/syft/src/syft/protocol/releases/','packages/syft/PYPI.md','packages/grid/helm/repo','packages/syft/src/syft/util/api_snapshot/syft_api_spec_stable.json']" - name: Scheduled Build and Publish if: github.event_name == 'schedule' @@ -524,7 +552,6 @@ jobs: files: | ./packages/syftcli/manifest.yml ./build/syftcli-config/* - ./packages/hagrid/hagrid/manifest_template.yml tag_name: v${{ steps.release_checks.outputs.github_release_version }} # Checkout to gh-pages and update helm repo @@ -537,15 +564,15 @@ jobs: - name: Copy helm repo files from Syft Repo run: | - rm -rf ghpages/helm/* - cp -R packages/grid/helm/repo/. ghpages/helm/ + cp packages/grid/helm/repo/syft-${{ steps.release_checks.outputs.syft_version }}.tgz ghpages/helm/ + cd ghpages/helm && helm repo index . --url https://openmined.github.io/PySyft/helm - name: Commit changes to gh-pages uses: EndBug/add-and-commit@v9 with: author_name: ${{ secrets.OM_BOT_NAME }} author_email: ${{ secrets.OM_BOT_EMAIL }} - message: "Update Helm package from Syft Repo" + message: "Add Helm package for Syft Version: ${{ steps.release_checks.outputs.syft_version }}" add: "helm/" push: "origin gh-pages" cwd: "./ghpages/" @@ -554,7 +581,7 @@ jobs: # we need to wait for PyPI to update before running the post-release tests - name: Wait for PyPI to update run: | - sleep 60 + sleep 120 # Can we remove the always flag from the below job? call-cd-post-release-tests: @@ -563,3 +590,4 @@ jobs: uses: OpenMined/PySyft/.github/workflows/cd-post-release-tests.yml@dev with: syft_version: ${{ needs.deploy-syft.outputs.syft_version }} + release_platform: ${{ github.event.inputs.release_platform}} diff --git a/.github/workflows/cd-syftcli.yml b/.github/workflows/cd-syftcli.yml index 65f2c37662e..c0229d5c777 100644 --- a/.github/workflows/cd-syftcli.yml +++ b/.github/workflows/cd-syftcli.yml @@ -66,7 +66,7 @@ jobs: if: ${{steps.get-hashes.outputs.current_hash != steps.get-hashes.outputs.previous_hash }} run: | python -m pip install --upgrade pip - pip install --upgrade tox setuptools wheel twine bump2version PyYAML + pip install --upgrade tox setuptools wheel twine bump2version==1.0.1 PyYAML==6.0.1 - name: Bump the Version if: ${{steps.get-hashes.outputs.current_hash != steps.get-hashes.outputs.previous_hash }} @@ -119,13 +119,11 @@ jobs: with: python-version: "3.12" - - name: Install build dependencies for syftcli + - name: Install pip dependencies run: | - pip install --upgrade pip - - - name: Install Tox - run: | - pip install -U tox + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version - name: Build syftcli env: diff --git a/.github/workflows/conda-install.yml b/.github/workflows/conda-install.yml new file mode 100644 index 00000000000..9498ef77b91 --- /dev/null +++ b/.github/workflows/conda-install.yml @@ -0,0 +1,57 @@ +name: Conda Install - PySyft + +on: + workflow_call: + + workflow_dispatch: + inputs: + none: + description: "Run Version Tests Manually" + required: false + +jobs: + constall-install-syft: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ["3.12"] + fail-fast: false + + runs-on: ${{matrix.os}} + + steps: + - uses: actions/checkout@v4 + - uses: conda-incubator/setup-miniconda@v3 + with: + auto-update-conda: true + activate-environment: syft_conda_env + python-version: ${{ matrix.python-version }} + - name: Install syft (Windows) + if: matrix.os == 'windows-latest' + shell: pwsh + run: | + python -m pip install ./packages/syft + $expectedVersion = python packages/grid/VERSION + $syftVersion = python -c 'import syft; print(syft.__version__)' + # Compare the versions + if ($expectedVersion -ne $syftVersion) { + Write-Output "Expected version: $expectedVersion" + Write-Output "Actual version: $syftVersion" + Write-Output "Version mismatch." + exit 1 + } + - name: Install syft (MacOS or Linux) + if: matrix.os != 'windows-latest' + shell: bash -el {0} + run: | + python -m pip install ./packages/syft + EXPECTED_VERSION=$(python packages/grid/VERSION) + SYFT_VERSION=$(python -c 'import syft; print(syft.__version__)') + # Compare the versions + if [ "$EXPECTED_VERSION" != "$SYFT_VERSION" ]; then + echo "Expected version: $EXPECTED_VERSION" + echo "Actual version: $SYFT_VERSION" + echo "Version mismatch." + exit 1 + fi diff --git a/.github/workflows/container-scan.yml b/.github/workflows/container-scan.yml index dbead4eeadd..211a8022f62 100644 --- a/.github/workflows/container-scan.yml +++ b/.github/workflows/container-scan.yml @@ -224,7 +224,7 @@ jobs: name: syft.sbom.json path: syft.sbom.json - scan-mongo-latest-trivy: + scan-postgres-latest-trivy: permissions: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results @@ -238,24 +238,24 @@ jobs: continue-on-error: true uses: aquasecurity/trivy-action@master with: - image-ref: "mongo:7.0.0" + image-ref: "postgres:16.1" format: "cyclonedx" - output: "mongo-trivy-results.sbom.json" + output: "postgres-trivy-results.sbom.json" timeout: "10m0s" #Upload SBOM to GitHub Artifacts - name: Upload SBOM to GitHub Artifacts uses: actions/upload-artifact@v4 with: - name: mongo-trivy-results.sbom.json - path: mongo-trivy-results.sbom.json + name: postgres-trivy-results.sbom.json + path: postgres-trivy-results.sbom.json #Generate sarif file - name: Run Trivy vulnerability scanner continue-on-error: true uses: aquasecurity/trivy-action@master with: - image-ref: "mongo:7.0.0" + image-ref: "postgres:16.1" format: "sarif" output: "trivy-results.sarif" timeout: "10m0s" @@ -266,7 +266,7 @@ jobs: with: sarif_file: "trivy-results.sarif" - scan-mongo-latest-snyk: + scan-postgres-latest-snyk: permissions: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results @@ -274,30 +274,30 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Set up Snyk CLI to check for security issues - # Snyk can be used to break the build when it detects security issues. - # In this case we want to upload the SAST issues to GitHub Code Scanning - uses: snyk/actions/setup@master - env: - # This is where you will need to introduce the Snyk API token created with your Snyk account - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - - - name: Snyk auth - shell: bash - run: snyk config set api=$SNYK_TOKEN - env: - # This is where you will need to introduce the Snyk API token created with your Snyk account - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - - name: Snyk Container test + uses: snyk/actions/docker@master continue-on-error: true - shell: bash - run: snyk container test mongo:7.0.0 --sarif --sarif-file-output=snyk-code.sarif env: # This is where you will need to introduce the Snyk API token created with your Snyk account SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + image: postgres:16.1 + args: --sarif-file-output=snyk-code.sarif + + # Replace any "undefined" security severity values with 0. The undefined value is used in the case + # of license-related findings, which do not do not indicate a security vulnerability. + # See https://github.com/github/codeql-action/issues/2187 for more context. + - name: Post-process sarif output + run: | + sed -i 's/"security-severity": "undefined"/"security-severity": "0"/g' snyk-code.sarif + + # Replace any "null" security severity values with 0. The undefined value is used in the case + # the NVD CVSS Score is not available. + # See https://github.com/Erikvl87/docker-languagetool/issues/90 and https://github.com/github/codeql-action/issues/2187 for more context. + - name: Post-process sarif output for security severities set to "null" + run: | + sed -i 's/"security-severity": "null"/"security-severity": "0"/g' snyk-code.sarif - # Push the Snyk Code results into GitHub Code Scanning tab - name: Upload result to GitHub Code Scanning uses: github/codeql-action/upload-sarif@v3 with: @@ -317,7 +317,7 @@ jobs: continue-on-error: true uses: aquasecurity/trivy-action@master with: - image-ref: "traefik:v2.10" + image-ref: "traefik:v2.11.0" format: "cyclonedx" output: "traefik-trivy-results.sbom.json" timeout: "10m0s" @@ -333,7 +333,7 @@ jobs: continue-on-error: true uses: aquasecurity/trivy-action@master with: - image-ref: "traefik:v2.10" + image-ref: "traefik:v2.11.0" format: "sarif" output: "trivy-results.sarif" severity: "CRITICAL,HIGH" @@ -352,29 +352,29 @@ jobs: actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - name: Set up Snyk CLI to check for security issues - # Snyk can be used to break the build when it detects security issues. - # In this case we want to upload the SAST issues to GitHub Code Scanning - uses: snyk/actions/setup@master - env: - # This is where you will need to introduce the Snyk API token created with your Snyk account - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - - - name: Snyk auth - shell: bash - run: snyk config set api=$SNYK_TOKEN - env: - # This is where you will need to introduce the Snyk API token created with your Snyk account - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} - - name: Snyk Container test + uses: snyk/actions/docker@master continue-on-error: true - shell: bash - run: snyk container test traefik:v2.10 --sarif --sarif-file-output=snyk-code.sarif env: # This is where you will need to introduce the Snyk API token created with your Snyk account SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + image: traefik:v2.11.0 + args: --sarif-file-output=snyk-code.sarif + + # Replace any "undefined" security severity values with 0. The undefined value is used in the case + # of license-related findings, which do not do not indicate a security vulnerability. + # See https://github.com/github/codeql-action/issues/2187 for more context. + - name: Post-process sarif output + run: | + sed -i 's/"security-severity": "undefined"/"security-severity": "0"/g' snyk-code.sarif + + # Replace any "null" security severity values with 0. The undefined value is used in the case + # the NVD CVSS Score is not available. + # See https://github.com/Erikvl87/docker-languagetool/issues/90 and https://github.com/github/codeql-action/issues/2187 for more context. + - name: Post-process sarif output for security severities set to "null" + run: | + sed -i 's/"security-severity": "null"/"security-severity": "0"/g' snyk-code.sarif # Push the Snyk Code results into GitHub Code Scanning tab - name: Upload result to GitHub Code Scanning @@ -391,34 +391,31 @@ jobs: steps: - uses: actions/checkout@v4 - #Generate SBOM - - name: Run Trivy vulnerability scanner - continue-on-error: true - uses: aquasecurity/trivy-action@master - with: - image-ref: "chrislusf/seaweedfs:3.55" - format: "cyclonedx" - output: "seaweedfs-trivy-results.sbom.json" - timeout: "10m0s" + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force - #Upload SBOM to GitHub Artifacts - - name: Upload SBOM to GitHub Artifacts - uses: actions/upload-artifact@v4 - with: - name: seaweedfs-trivy-results.sbom.json - path: seaweedfs-trivy-results.sbom.json + # Build the docker image for testing + - name: Build a Docker image + run: DOCKER_BUILDKIT=1 docker build -f packages/grid/seaweedfs/seaweedfs.dockerfile ./packages/grid/seaweedfs -t seaweedfs:${{ github.sha }} --no-cache + # Runs Snyk Container (Container and SCA) analysis and uploads result to Snyk. - #Generate sarif file - name: Run Trivy vulnerability scanner continue-on-error: true uses: aquasecurity/trivy-action@master with: - image-ref: "chrislusf/seaweedfs:3.55" - format: "sarif" + image-ref: "seaweedfs:${{ github.sha }}" + format: "template" + template: "@/contrib/sarif.tpl" output: "trivy-results.sarif" + severity: "CRITICAL,HIGH" timeout: "10m0s" - #Upload sarif file to GitHub Security tab - name: Upload Trivy scan results to GitHub Security tab uses: github/codeql-action/upload-sarif@v3 with: @@ -440,6 +437,21 @@ jobs: # This is where you will need to introduce the Snyk API token created with your Snyk account SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + # free 10GB of space + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + + # Build the docker image for testing + - name: Build a Docker image + shell: bash + run: DOCKER_BUILDKIT=1 docker build -f packages/grid/seaweedfs/seaweedfs.dockerfile ./packages/grid/seaweedfs -t seaweedfs:${{ github.sha }} --no-cache + - name: Snyk auth shell: bash run: snyk config set api=$SNYK_TOKEN @@ -450,7 +462,7 @@ jobs: - name: Snyk Container test continue-on-error: true shell: bash - run: snyk container test seaweedfs:3.55 --sarif --sarif-file-output=snyk-code.sarif + run: snyk container test seaweedfs:${{ github.sha }} --file=packages/grid/seaweedfs/seaweedfs.dockerfile --sarif --sarif-file-output=snyk-code.sarif env: # This is where you will need to introduce the Snyk API token created with your Snyk account SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} diff --git a/.github/workflows/e2e-tests-notebook.yml b/.github/workflows/e2e-tests-notebook.yml index 4e98450a39c..d6defb126bb 100644 --- a/.github/workflows/e2e-tests-notebook.yml +++ b/.github/workflows/e2e-tests-notebook.yml @@ -7,12 +7,12 @@ on: description: "Syft version to test" required: true type: string - node_url: - description: "Node URL to use" + server_url: + description: "Server URL to use" required: true type: string - node_port: - description: "Node port" + server_port: + description: "Server port" required: true type: number exclude_notebooks: @@ -26,12 +26,12 @@ on: description: "Syft version to test" required: true type: string - node_url: - description: "Node URL to use" + server_url: + description: "Server URL to use" required: true type: string - node_port: - description: "Node port" + server_port: + description: "Server port" required: true type: number exclude_notebooks: @@ -40,7 +40,7 @@ on: type: string jobs: - notebook-test-hagrid: + notebook-test-e2e: strategy: max-parallel: 99 matrix: @@ -57,13 +57,11 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip dependencies run: | - python -m pip install --upgrade --user pip - - - name: Install Deps - run: | - pip install --upgrade pip uv==0.1.18 tox tox-uv==1.5.1 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version - name: Get pip cache dir id: pip-cache @@ -71,7 +69,7 @@ jobs: run: | echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} @@ -86,8 +84,8 @@ jobs: - name: Run Notebook tests env: SYFT_VERSION: ${{ inputs.syft_version }} - NODE_URL: ${{ inputs.node_url }} - NODE_PORT: ${{ inputs.node_port }} + SERVER_URL: ${{ inputs.server_url }} + SERVER_PORT: ${{ inputs.server_port }} EXCLUDE_NOTEBOOKS: ${{ inputs.exclude_notebooks }} run: | tox -e e2e.test.notebook diff --git a/.github/workflows/manual-delete-buildjet-cache.yml b/.github/workflows/manual-delete-buildjet-cache.yml deleted file mode 100644 index 97370c02406..00000000000 --- a/.github/workflows/manual-delete-buildjet-cache.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Manually Delete BuildJet Cache -on: - workflow_dispatch: - inputs: - cache_key: - description: "BuildJet Cache Key to Delete" - required: true - type: string -jobs: - manually-delete-buildjet-cache: - strategy: - matrix: - os: [ubuntu-latest] - python-version: ["3.10", "3.11", "3.12"] - - runs-on: ${{ matrix.os }} - steps: - - name: Checkout - uses: actions/checkout@v4 - - uses: buildjet/cache-delete@v1 - with: - cache_key: ${{ inputs.cache_key }} diff --git a/.github/workflows/nightlies.yml b/.github/workflows/nightlies.yml index 6db4d8df53c..9a62e50301d 100644 --- a/.github/workflows/nightlies.yml +++ b/.github/workflows/nightlies.yml @@ -14,10 +14,6 @@ jobs: if: github.repository == 'OpenMined/PySyft' # don't run on forks uses: OpenMined/PySyft/.github/workflows/pr-tests-linting.yml@dev - call-pr-tests-hagrid: - if: github.repository == 'OpenMined/PySyft' # don't run on forks - uses: OpenMined/PySyft/.github/workflows/pr-tests-hagrid.yml@dev - call-pr-tests-syft: if: github.repository == 'OpenMined/PySyft' # don't run on forks uses: OpenMined/PySyft/.github/workflows/pr-tests-syft.yml@dev @@ -27,22 +23,12 @@ jobs: uses: OpenMined/PySyft/.github/workflows/pr-tests-stack.yml@dev secrets: inherit - # call-pr-tests-stack-arm64: - # if: github.repository == 'OpenMined/PySyft' # don't run on forks - # uses: OpenMined/PySyft/.github/workflows/pr-tests-stack-arm64.yml@dev - # secrets: inherit - - call-pr-tests-stack-public: - if: github.repository == 'OpenMined/PySyft' # don't run on forks - uses: OpenMined/PySyft/.github/workflows/pr-tests-stack-public.yml@dev - secrets: inherit - call-container-scan: if: github.repository == 'OpenMined/PySyft' # don't run on forks uses: OpenMined/PySyft/.github/workflows/container-scan.yml@dev secrets: inherit - # call-rhel-tests: - # if: github.repository == 'OpenMined/PySyft' # don't run on forks - # uses: OpenMined/PySyft/.github/workflows/rhel-tests.yml@dev - # secrets: inherit + call-conda-install: + if: github.repository == 'OpenMined/PySyft' # don't run on forks + uses: OpenMined/PySyft/.github/workflows/conda-install.yml@dev + secrets: inherit diff --git a/.github/workflows/post-merge-tasks.yml b/.github/workflows/post-merge-tasks.yml index eefed62f8a3..61f8e324c81 100644 --- a/.github/workflows/post-merge-tasks.yml +++ b/.github/workflows/post-merge-tasks.yml @@ -26,7 +26,9 @@ jobs: - name: Install pip packages run: | - python -m pip install --upgrade --user pip tox + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version - name: Check and Bump Protocol Version run: | diff --git a/.github/workflows/pr-tests-enclave.yml b/.github/workflows/pr-tests-enclave.yml deleted file mode 100644 index 48a59f789de..00000000000 --- a/.github/workflows/pr-tests-enclave.yml +++ /dev/null @@ -1,89 +0,0 @@ -name: PR Tests - Enclave - -on: - # Temporarily disabled oblv tests - # workflow_call: - - # pull_request: - # branches: - # - dev - # - main - # - "0.8" - - workflow_dispatch: - inputs: - none: - description: "Run Tests Manually" - required: false - -concurrency: - group: enclave-${{ github.event_name == 'pull_request' && format('{0}-{1}', github.workflow, github.event.pull_request.number) || github.workflow_ref }} - cancel-in-progress: true - -jobs: - pr-tests-enclave-oblv: - strategy: - max-parallel: 4 - matrix: - os: [ubuntu-latest] - python-version: ["3.12"] - - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - if: steps.changes.outputs.syft == 'true' - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - if: steps.changes.outputs.syft == 'true' - run: | - pip install --upgrade pip uv==0.1.18 - uv --version - - - name: Get pip cache dir - id: pip-cache - if: steps.changes.outputs.syft == 'true' - shell: bash - run: | - echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.syft == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} - restore-keys: | - ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - - name: Install Dependencies - if: steps.changes.outputs.syft == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Run Enclave tests - if: steps.changes.outputs.syft == 'true' - run: | - tox -e stack.test.integration.enclave.oblv diff --git a/.github/workflows/pr-tests-frontend.yml b/.github/workflows/pr-tests-frontend.yml index e90a0eb85d5..27af2aa4e1c 100644 --- a/.github/workflows/pr-tests-frontend.yml +++ b/.github/workflows/pr-tests-frontend.yml @@ -25,7 +25,7 @@ jobs: os: [ubuntu-latest] python-version: ["3.12"] - runs-on: ${{ matrix.os }} + runs-on: ubuntu-20.04 # ${{ matrix.os }} steps: - uses: actions/checkout@v4 @@ -43,36 +43,32 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip dependencies if: steps.changes.outputs.frontend == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir id: pip-cache if: steps.changes.outputs.frontend == 'true' shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.frontend == 'true' with: path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('packages/hagrid/setup.cfg') }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-frontend restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }}- - name: Docker on MacOS if: steps.changes.outputs.frontend == 'true' && matrix.os == 'macos-latest' - uses: crazy-max/ghaction-setup-docker@v3.1.0 - - - name: Install Tox - if: steps.changes.outputs.frontend == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 + uses: crazy-max/ghaction-setup-docker@v3.3.0 - name: Remove existing containers if: steps.changes.outputs.frontend == 'true' @@ -89,107 +85,3 @@ jobs: DOCKER: true run: | tox -e frontend.test.unit - - pr-tests-frontend-e2e: - strategy: - max-parallel: 3 - matrix: - os: [ubuntu-latest] - python-version: ["3.12"] - - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - if: steps.changes.outputs.stack == 'true' - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade pip uv==0.1.18 - uv --version - - - name: Get pip cache dir - id: pip-cache - if: steps.changes.outputs.stack == 'true' - shell: bash - run: | - echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.stack == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('packages/hagrid/setup.cfg') }} - restore-keys: | - ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - - name: Install Docker Compose - if: steps.changes.outputs.stack == 'true' && runner.os == 'Linux' - shell: bash - run: | - mkdir -p ~/.docker/cli-plugins - DOCKER_COMPOSE_VERSION=v2.21.0 - curl -sSL https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose - chmod +x ~/.docker/cli-plugins/docker-compose - - - name: Docker on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - uses: crazy-max/ghaction-setup-docker@v3.1.0 - - - name: Install Tox - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Remove existing containers - if: steps.changes.outputs.stack == 'true' - continue-on-error: true - shell: bash - run: | - docker rm $(docker ps -aq) --force || true - docker volume prune -f || true - docker buildx use default || true - - - uses: pnpm/action-setup@v3 - with: - version: 8 - run_install: false - dest: ~/setup-pnpm - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v4 - with: - node-version: ${{ matrix.node-version }} - # cache: "pnpm" - # cache-dependency-path: packages/grid/frontend/pnpm-lock.yaml - - - name: Run Frontend Playwright e2e Tests - if: steps.changes.outputs.stack == 'true' - env: - DOCKER: true - run: | - tox -e frontend.test.e2e diff --git a/.github/workflows/pr-tests-hagrid.yml b/.github/workflows/pr-tests-hagrid.yml deleted file mode 100644 index 0b742a4a861..00000000000 --- a/.github/workflows/pr-tests-hagrid.yml +++ /dev/null @@ -1,170 +0,0 @@ -name: PR Tests - HAGrid - -on: - workflow_call: - - pull_request: - branches: - - dev - - main - - "0.8" - -concurrency: - group: hagrid-${{ github.event_name == 'pull_request' && format('{0}-{1}', github.workflow, github.event.pull_request.number) || github.workflow_ref }} - cancel-in-progress: true - -defaults: - run: - working-directory: ./packages/hagrid - -jobs: - pr-tests-hagrid: - strategy: - max-parallel: 99 - matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.12"] - include: - - python-version: "3.11" - os: "ubuntu-latest" - - python-version: "3.10" - os: "ubuntu-latest" - - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - if: steps.changes.outputs.hagrid == 'true' - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - if: steps.changes.outputs.hagrid == 'true' - run: | - python -m pip install --upgrade --user pip - - - name: Get pip cache dir - id: pip-cache - if: steps.changes.outputs.hagrid == 'true' - shell: bash - run: | - echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.hagrid == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('packages/hagrid/setup.cfg') }} - restore-keys: | - ${{ runner.os }}-pip-py${{ matrix.python-version }}- - - - name: Install Dependencies - if: steps.changes.outputs.hagrid == 'true' - run: | - pip install --upgrade tox bandit safety setuptools packaging wheel twine pytest - pip install -e . - - # 42923 is ansible 7.4.0 (latest 2023.04.14) - - name: Scan for security issues python 3.8+ - if: steps.changes.outputs.hagrid == 'true' - run: | - bandit -r hagrid - safety check -i 42923 -i 54229 -i 54230 -i 54230 -i 54229 -i 62044 -i 65213 - - - name: Run normal tests - if: steps.changes.outputs.hagrid == 'true' - run: | - pytest --durations=50 - - - name: Run hagrid debug - if: steps.changes.outputs.hagrid == 'true' - run: | - hagrid debug - - - name: Run hagrid quickstart - if: steps.changes.outputs.hagrid == 'true' - run: | - hagrid quickstart --reset --quiet --test - - - name: Build Wheel - if: steps.changes.outputs.hagrid == 'true' - run: | - python setup.py bdist_wheel - - - name: Twine Check - if: steps.changes.outputs.hagrid == 'true' - run: | - twine check dist/*.whl - - pr-tests-syft-hagrid-comptability: - strategy: - max-parallel: 99 - matrix: - os: [ubuntu-latest] - python-version: ["3.11"] - syft-version: ["0.8.2", "0.8.2b6", "0.8.3"] - - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - if: steps.changes.outputs.hagrid == 'true' - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - if: steps.changes.outputs.hagrid == 'true' - run: | - python -m pip install --upgrade --user pip - - - name: Get pip cache dir - id: pip-cache - if: steps.changes.outputs.hagrid == 'true' - shell: bash - run: | - echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.hagrid == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('packages/syft/setup.cfg') }} - restore-keys: | - ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('packages/syft/setup.cfg') }} - - # https://github.com/google/jax/issues/17693 - # pinning ml-dtypes due to jax version==0.4.10 - - name: Install Syft ${{ matrix.syft-version }} - if: steps.changes.outputs.hagrid == 'true' - run: | - pip install ml-dtypes==0.2.0 - pip install syft==${{ matrix.syft-version }} - pip install . - - - name: Run Orchestra Command - if: steps.changes.outputs.hagrid == 'true' - run: | - python -c "import syft as sy; domain1 = sy.orchestra.launch(name='test-domain-1', dev_mode=True, reset=True)" - python -c "import syft as sy; domain2 = sy.orchestra.launch(name='test-domain-2',dev_mode=False, reset=True)" diff --git a/.github/workflows/pr-tests-helm-lint.yml b/.github/workflows/pr-tests-helm-lint.yml index 1ef21e5a5f9..3098942acfb 100644 --- a/.github/workflows/pr-tests-helm-lint.yml +++ b/.github/workflows/pr-tests-helm-lint.yml @@ -33,8 +33,8 @@ jobs: brew install kube-linter FairwindsOps/tap/polaris # Install python deps - pip install --upgrade pip - pip install tox + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 kube-linter version polaris version diff --git a/.github/workflows/pr-tests-helm-upgrade.yml b/.github/workflows/pr-tests-helm-upgrade.yml index be8bbc21996..bdaf0c717ea 100644 --- a/.github/workflows/pr-tests-helm-upgrade.yml +++ b/.github/workflows/pr-tests-helm-upgrade.yml @@ -37,8 +37,8 @@ jobs: brew update # Install python deps - pip install --upgrade pip - pip install tox + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 # Install kubernetes brew install helm k3d devspace kubectl diff --git a/.github/workflows/pr-tests-linting.yml b/.github/workflows/pr-tests-linting.yml index e94911aa8d8..53f81d4f939 100644 --- a/.github/workflows/pr-tests-linting.yml +++ b/.github/workflows/pr-tests-linting.yml @@ -29,17 +29,18 @@ jobs: - name: Install pip packages run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir id: pip-cache shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT # TODO: change cache key from setup.cfg to something more general - - name: pip cache + - name: Load github cache uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} @@ -47,12 +48,12 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - name: Install Tox - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - uses: pre-commit/action@v3.0.1 + # - uses: pre-commit/action@v3.0.1 - name: Check Protocol Version run: | tox -e syft.protocol.check + + - name: Generate API Snapshot Diff with latest beta + run: | + tox --recreate -e syft.api.snapshot diff --git a/.github/workflows/pr-tests-stack-arm64.yml b/.github/workflows/pr-tests-stack-arm64.yml deleted file mode 100644 index ddd98acef64..00000000000 --- a/.github/workflows/pr-tests-stack-arm64.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: PR Tests - Stack - Arm64 - -on: - workflow_call: - - workflow_dispatch: - inputs: - none: - description: "Run Version Tests Manually" - required: false - -concurrency: - group: stackarm64-${{ github.event_name == 'pull_request' && format('{0}-{1}', github.workflow, github.event.pull_request.number) || github.workflow_ref }} - cancel-in-progress: true - -jobs: - pr-tests-stack-arm64: - strategy: - max-parallel: 3 - matrix: - os: [ubuntu-latest] - python-version: ["3.12"] - - runs-on: ${{matrix.os}} - - steps: - # - name: set permissions on work folder for self-runners - # run: | - # sudo chown -R $USER:$USER ~/actions-runner/_work/ - - - uses: actions/checkout@v4 - - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - run: | - pip install --upgrade pip uv==0.1.18 - uv --version - - # - name: Get pip cache dir - # id: pip-cache - # shell: bash - # run: | - # echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - # - name: pip cache - # uses: actions/cache@v3 - # with: - # path: ${{ steps.pip-cache.outputs.dir }} - # key: ${{ runner.os }}-uv-py${{ matrix.python-version }} - # restore-keys: | - # ${{ runner.os }}-uv-py${{ matrix.python-version }} - - - name: Install tox - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Install Docker Compose - if: runner.os == 'Linux' - shell: bash - run: | - mkdir -p ~/.docker/cli-plugins - DOCKER_COMPOSE_VERSION=v2.21.0 - curl -sSL https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose - chmod +x ~/.docker/cli-plugins/docker-compose - - - name: Setup linux/arm64 Docker - run: | - docker rm $(docker ps -aq) --force || true - docker volume prune -f || true - docker buildx create --platform linux/arm64 --name arm64builder || true - docker buildx use arm64builder || true - docker run --privileged --rm tonistiigi/binfmt --install arm64 - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - - name: Run integration tests - uses: nick-fields/retry@v3 - with: - timeout_seconds: 36000 - max_attempts: 3 - command: EMULATION="true" HAGRID_FLAGS="--tag=local --test --platform linux/arm64" tox -e stack.test.integration diff --git a/.github/workflows/pr-tests-stack-public.yml b/.github/workflows/pr-tests-stack-public.yml deleted file mode 100644 index 8b324469746..00000000000 --- a/.github/workflows/pr-tests-stack-public.yml +++ /dev/null @@ -1,216 +0,0 @@ -name: PR Tests - Stack - Public - -on: - workflow_call: - - workflow_dispatch: - inputs: - none: - description: "Run Stack Integration Tests Manually" - required: false - -concurrency: - group: stackpublic-${{ github.event_name == 'pull_request' && format('{0}-{1}', github.workflow, github.event.pull_request.number) || github.workflow_ref }} - cancel-in-progress: true - -jobs: - pr-tests-stack-public: - strategy: - max-parallel: 99 - matrix: - os: [ubuntu-latest, macos-latest, windows] - python-version: ["3.12"] - pytest-modules: ["frontend network"] - fail-fast: false - - runs-on: ${{matrix.os}} - - steps: - - name: "clean .git/config" - if: matrix.os == 'windows' - continue-on-error: true - shell: bash - run: | - echo "deleting ${GITHUB_WORKSPACE}/.git/config" - rm ${GITHUB_WORKSPACE}/.git/config - - - uses: actions/checkout@v4 - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - if: steps.changes.outputs.stack == 'true' - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade pip uv==0.1.18 - uv --version - - - name: Get pip cache dir - if: steps.changes.outputs.stack == 'true' - id: pip-cache - shell: bash - run: | - echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.stack == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-uv-py${{ matrix.python-version }} - restore-keys: | - ${{ runner.os }}-uv-py${{ matrix.python-version }} - - - name: Install tox - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Show choco installed packages - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: list --localonly - - - name: Install git - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install git.install --params "/GitAndUnixToolsOnPath /WindowsTerminal /NoAutoCrlf" -y - - - name: Install cmake - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install cmake.portable --installargs 'ADD_CMAKE_TO_PATH=System' -y - - - name: Check cmake version - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - run: | - cmake --version - shell: cmd - - - name: Install visualcpp-build-tools - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install visualstudio2019-workload-vctools -y - - - name: Install Docker Compose - if: steps.changes.outputs.stack == 'true' && runner.os == 'Linux' - shell: bash - run: | - mkdir -p ~/.docker/cli-plugins - DOCKER_COMPOSE_VERSION=v2.21.0 - curl -sSL https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose - chmod +x ~/.docker/cli-plugins/docker-compose - - - name: Docker on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - uses: crazy-max/ghaction-setup-docker@v3.1.0 - - - name: Docker Compose on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - shell: bash - run: | - brew install docker-compose - mkdir -p ~/.docker/cli-plugins - ln -sfn /usr/local/opt/docker-compose/bin/docker-compose ~/.docker/cli-plugins/docker-compose || true - docker compose version - - - name: Remove existing containers - if: steps.changes.outputs.stack == 'true' - continue-on-error: true - shell: bash - run: | - docker rm $(docker ps -aq) --force || true - docker volume prune -f || true - docker buildx use default || true - - # - name: Run integration tests - # if: steps.changes.outputs.stack == 'true' - # timeout-minutes: 60 - # env: - # HAGRID_ART: false - # PYTEST_MODULES: "${{ matrix.pytest-modules }}" - # HAGRID_FLAGS: "--tag=beta --test --build-src=dev" - # run: | - # tox -e stack.test.integration - - - name: Run integration tests - uses: nick-fields/retry@v3 - if: steps.changes.outputs.stack == 'true' - env: - HAGRID_ART: false - PYTEST_MODULES: "${{ matrix.pytest-modules }}" - HAGRID_FLAGS: "--tag=beta --test --build-src=dev" - with: - timeout_seconds: 1800 - max_attempts: 3 - command: tox -e stack.test.integration - continue-on-error: true - - - name: Reboot node - if: matrix.os == 'windows' && failure() - run: | - shutdown /r /t 1 - - - name: Run log collector - timeout-minutes: 5 - if: failure() - shell: bash - run: | - python ./scripts/container_log_collector.py - - - name: Get job name and url - id: job_name - if: failure() - shell: bash - run: | - echo "job_name=$(echo ${{ github.job }})" >> $GITHUB_OUTPUT - echo "url=$(echo ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_OUTPUT - - - name: Get current date - id: date - if: failure() - shell: bash - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Upload logs to GitHub - uses: actions/upload-artifact@master - if: failure() - with: - name: ${{ matrix.os }}-${{ steps.job_name.outputs.job_name }}-${{ matrix.pytest-modules }}-logs-${{ steps.date.outputs.date }} - path: ./logs/${{ steps.job_name.outputs.job_name}}/ - - - name: Get pull request url - id: pull_request - if: failure() - shell: bash - run: | - echo "url=$(echo ${{ github.event.pull_request.html_url }})" >> $GITHUB_OUTPUT - - - name: Job Report Status - if: github.repository == 'OpenMined/PySyft' && failure() - uses: ravsamhq/notify-slack-action@v2 - with: - status: ${{ job.status }} - notify_when: "failure" - notification_title: " {workflow} has {status_message}" - message_format: "${{matrix.os}} {emoji} *{job}* {status_message} in {run_url}" - footer: "Find the PR here ${{ steps.pull_request.outputs.url }}" - mention_users: "U01LNCACY03,U8KUAD396,UNMQ2SJSW,U01SAESBJA0" - mention_users_when: "failure,warnings" - env: - SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/pr-tests-stack.yml b/.github/workflows/pr-tests-stack.yml index c36b3ee9e56..9bf7d80b483 100644 --- a/.github/workflows/pr-tests-stack.yml +++ b/.github/workflows/pr-tests-stack.yml @@ -22,44 +22,19 @@ concurrency: cancel-in-progress: true jobs: - pr-tests-stack: + pr-syft-image-test: strategy: max-parallel: 99 matrix: - # os: [ubuntu-latest, macos-latest, windows-latest, windows] - # os: [om-ci-16vcpu-ubuntu2204] os: [ubuntu-latest] python-version: ["3.12"] - pytest-modules: ["frontend network container_workload local_node"] fail-fast: false runs-on: ${{matrix.os}} steps: - # - name: Permission to home directory - # run: | - # sudo chown -R $USER:$USER $HOME - - # - name: "clean .git/config" - # if: matrix.os == 'windows' - # continue-on-error: true - # shell: bash - # run: | - # echo "deleting ${GITHUB_WORKSPACE}/.git/config" - # rm ${GITHUB_WORKSPACE}/.git/config - - uses: actions/checkout@v4 - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force - - name: Check for file changes uses: dorny/paths-filter@v3 id: changes @@ -74,177 +49,28 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip packages if: steps.changes.outputs.stack == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir - if: steps.changes.outputs.stack == 'true' - id: pip-cache - shell: bash - run: | - echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.stack == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-uv-py${{ matrix.python-version }} - restore-keys: | - ${{ runner.os }}-uv-py${{ matrix.python-version }} - - - name: Install tox - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Show choco installed packages - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: list --localonly - - - name: Install git - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install git.install --params "/GitAndUnixToolsOnPath /WindowsTerminal /NoAutoCrlf" -y - - - name: Install cmake - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install cmake.portable --installargs 'ADD_CMAKE_TO_PATH=System' -y - - - name: Check cmake version - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - run: | - cmake --version - shell: cmd - - - name: Install visualcpp-build-tools - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install visualstudio2019-workload-vctools -y - - - name: Install Docker Compose - if: steps.changes.outputs.stack == 'true' && runner.os == 'Linux' - shell: bash - run: | - mkdir -p ~/.docker/cli-plugins - DOCKER_COMPOSE_VERSION=v2.21.0 - curl -sSL https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose - chmod +x ~/.docker/cli-plugins/docker-compose - - - name: Docker on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - uses: crazy-max/ghaction-setup-docker@v3.1.0 - - - name: Docker Compose on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - shell: bash - run: | - brew install docker-compose - mkdir -p ~/.docker/cli-plugins - ln -sfn /usr/local/opt/docker-compose/bin/docker-compose ~/.docker/cli-plugins/docker-compose || true - docker compose version - - - name: Remove existing containers - if: steps.changes.outputs.stack == 'true' - continue-on-error: true - shell: bash - run: | - docker rm $(docker ps -aq) --force || true - docker volume prune -f || true - docker buildx use default || true - - - name: Run integration tests - if: steps.changes.outputs.stack == 'true' - timeout-minutes: 60 - env: - HAGRID_ART: false - PYTEST_MODULES: "${{ matrix.pytest-modules }}" - AZURE_BLOB_STORAGE_KEY: "${{ secrets.AZURE_BLOB_STORAGE_KEY }}" - run: | - tox -e stack.test.integration - - #Run log collector python script - - name: Run log collector - timeout-minutes: 5 - if: failure() - shell: bash - run: | - python ./scripts/container_log_collector.py - - # Get Job name and url - - name: Get job name and url - id: job_name - if: failure() - shell: bash - run: | - echo "job_name=$(echo ${{ github.job }})" >> $GITHUB_OUTPUT - echo "url=$(echo ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_OUTPUT - - - name: Get current date - id: date - if: failure() - shell: bash - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Upload logs to GitHub - uses: actions/upload-artifact@master - if: failure() - with: - name: ${{ matrix.os }}-${{ steps.job_name.outputs.job_name }}-${{ matrix.pytest-modules }}-logs-${{ steps.date.outputs.date }} - path: ./logs/${{ steps.job_name.outputs.job_name}}/ - - - name: Mandatory Container cleanup - if: steps.changes.outputs.stack == 'true' - continue-on-error: true - shell: bash - run: | - docker rm `docker ps -aq` --force || true - docker volume prune -f || true - - # Get Job name and url - - name: Reboot node - if: matrix.os == 'windows' && failure() - run: | - shutdown /r /t 1 - - #Get Pull request url - - name: Get pull request url - id: pull_request - if: failure() - shell: bash - run: | - echo "url=$(echo ${{ github.event.pull_request.html_url }})" >> $GITHUB_OUTPUT - - - name: Job Report Status - # cant access secrets on forks - if: github.repository == 'OpenMined/PySyft' && failure() - uses: ravsamhq/notify-slack-action@v2 - with: - status: ${{ job.status }} - notify_when: "failure" - notification_title: " {workflow} has {status_message}" - message_format: "${{matrix.os}} {emoji} *{job}* {status_message} in {run_url}" - footer: "Find the PR here ${{ steps.pull_request.outputs.url }}" - mention_users: "U01LNCACY03,U8KUAD396,UNMQ2SJSW,U01SAESBJA0" - mention_users_when: "failure,warnings" - env: - SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK_WEBHOOK_URL }} + # - name: Run syft backend base image building test + # if: steps.changes.outputs.stack == 'true' + # timeout-minutes: 60 + # run: | + # tox -e backend.test.basecpu + # run: | + # echo "Skipping pr image test" - pr-syft-image-test: + pr-tests-syft-integration: strategy: max-parallel: 99 matrix: os: [ubuntu-latest] python-version: ["3.12"] + pytest-modules: ["local_server"] fail-fast: false runs-on: ${{matrix.os}} @@ -266,20 +92,21 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip dependencies if: steps.changes.outputs.stack == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir if: steps.changes.outputs.stack == 'true' id: pip-cache shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.stack == 'true' with: @@ -288,25 +115,23 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }} - - name: Install tox - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Run syft backend base image building test + - name: Run Syft Integration Tests if: steps.changes.outputs.stack == 'true' timeout-minutes: 60 + env: + PYTEST_MODULES: "${{ matrix.pytest-modules }}" + GITHUB_CI: true + shell: bash run: | - tox -e backend.test.basecpu + tox -e syft.test.integration - pr-tests-notebook-stack: + pr-tests-integration-k8s: strategy: max-parallel: 99 matrix: - # os: [ubuntu-latest, macos-latest, windows-latest, windows] os: [ubuntu-latest] python-version: ["3.12"] - notebook-paths: ["api/0.8"] + pytest-modules: ["frontend network container_workload"] fail-fast: false runs-on: ${{matrix.os}} @@ -315,26 +140,7 @@ jobs: - name: Permission to home directory run: | sudo chown -R $USER:$USER $HOME - - name: "clean .git/config" - if: matrix.os == 'windows' - continue-on-error: true - shell: bash - run: | - echo "deleting ${GITHUB_WORKSPACE}/.git/config" - rm ${GITHUB_WORKSPACE}/.git/config - - uses: actions/checkout@v4 - - # free 10GB of space - - name: Remove unnecessary files - if: matrix.os == 'ubuntu-latest' - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - docker image prune --all --force - docker builder prune --all --force - docker system prune --all --force - - name: Check for file changes uses: dorny/paths-filter@v3 id: changes @@ -349,20 +155,41 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Add K3d Registry + run: | + sudo python ./scripts/patch_hosts.py --add-k3d-registry + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + large-packages: false + + # free 10GB of space + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + + - name: Install pip dependencies if: steps.changes.outputs.stack == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir if: steps.changes.outputs.stack == 'true' id: pip-cache shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.stack == 'true' with: @@ -371,158 +198,89 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }} - - name: Install tox + - name: Install kubectl if: steps.changes.outputs.stack == 'true' run: | - pip install --upgrade tox tox-uv==1.5.1 - - - name: Show choco installed packages - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: list --localonly - - - name: Install git - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install git.install --params "/GitAndUnixToolsOnPath /WindowsTerminal /NoAutoCrlf" -y - - - name: Install cmake - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install cmake.portable --installargs 'ADD_CMAKE_TO_PATH=System' -y - - - name: Check cmake version - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - run: | - cmake --version - shell: cmd - - - name: Install visualcpp-build-tools - if: steps.changes.outputs.stack == 'true' && matrix.os == 'windows' - uses: crazy-max/ghaction-chocolatey@v3 - with: - args: install visualstudio2019-workload-vctools -y - - - name: Install Docker Compose - if: steps.changes.outputs.stack == 'true' && runner.os == 'Linux' - shell: bash - run: | - mkdir -p ~/.docker/cli-plugins - DOCKER_COMPOSE_VERSION=v2.21.0 - curl -sSL https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose - chmod +x ~/.docker/cli-plugins/docker-compose - - - name: Docker on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - uses: crazy-max/ghaction-setup-docker@v3.1.0 - - - name: Docker Compose on MacOS - if: steps.changes.outputs.stack == 'true' && matrix.os == 'macos-latest' - shell: bash - run: | - brew install docker-compose - mkdir -p ~/.docker/cli-plugins - ln -sfn /usr/local/opt/docker-compose/bin/docker-compose ~/.docker/cli-plugins/docker-compose || true - docker compose version + # cleanup apt version + sudo apt remove kubectl || true + # install kubectl 1.27 + curl -LO https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl + chmod +x kubectl + sudo install kubectl /usr/local/bin; - - name: Remove existing containers + - name: Install helm if: steps.changes.outputs.stack == 'true' - continue-on-error: true - shell: bash run: | - docker rm $(docker ps -aq) --force || true - docker volume prune -f || true - docker buildx use default || true + # install helm + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh - - name: Run Notebook integration tests + - name: Run K8s & Helm integration tests if: steps.changes.outputs.stack == 'true' timeout-minutes: 60 env: - ORCHESTRA_DEPLOYMENT_TYPE: "container_stack" - TEST_NOTEBOOK_PATHS: "${{ matrix.notebook-paths }}" PYTEST_MODULES: "${{ matrix.pytest-modules }}" - run: | - tox -e stack.test.notebook - - #Run log collector python script - - name: Run log collector - timeout-minutes: 5 - if: failure() + GITHUB_CI: true + AZURE_BLOB_STORAGE_KEY: "${{ secrets.AZURE_BLOB_STORAGE_KEY }}" shell: bash run: | - python ./scripts/container_log_collector.py + K3D_VERSION=v5.6.3 + DEVSPACE_VERSION=v6.3.12 + # install k3d + wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + mv k3d-linux-amd64 k3d + chmod +x k3d + export PATH=`pwd`:$PATH + k3d version + curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + chmod +x devspace + devspace version - # Get Job name and url - - name: Get job name and url - id: job_name - if: failure() - shell: bash - run: | - echo "job_name=$(echo ${{ github.job }})" >> $GITHUB_OUTPUT - echo "url=$(echo ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_OUTPUT + tox -e stack.test.integration.k8s + tox -e syft.build.helm + tox -e syft.package.helm + # tox -e syft.test.helm - - name: Get current date + - name: Get current timestamp id: date if: failure() shell: bash - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Upload logs to GitHub - uses: actions/upload-artifact@master - if: failure() - with: - name: ${{ matrix.os }}-${{ steps.job_name.outputs.job_name }}-${{ matrix.pytest-modules }}-logs-${{ steps.date.outputs.date }} - path: ./logs/${{ steps.job_name.outputs.job_name}}/ + run: echo "date=$(date +%s)" >> $GITHUB_OUTPUT - - name: Mandatory Container cleanup - if: steps.changes.outputs.stack == 'true' - continue-on-error: true + - name: Collect logs from k3d + if: steps.changes.outputs.stack == 'true' && failure() shell: bash run: | - docker rm `docker ps -aq` --force || true - docker volume prune -f || true + mkdir -p ./k8s-logs + kubectl describe all -A --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-desc-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-desc-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-logs-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-logs-${{ steps.date.outputs.date }}.txt + ls -la ./k8s-logs - # Get Job name and url - - name: Reboot node - if: matrix.os == 'windows' && failure() - run: | - shutdown /r /t 1 + - name: Upload logs to GitHub + uses: actions/upload-artifact@master + if: steps.changes.outputs.stack == 'true' && failure() + with: + name: k8s-logs-integration-${{ matrix.os }}-${{ steps.date.outputs.date }} + path: ./k8s-logs/ - #Get Pull request url - - name: Get pull request url - id: pull_request - if: failure() + - name: Cleanup k3d + if: steps.changes.outputs.stack == 'true' && failure() shell: bash run: | - echo "url=$(echo ${{ github.event.pull_request.html_url }})" >> $GITHUB_OUTPUT - - - name: Job Report Status - # cant access secrets on forks - if: github.repository == 'OpenMined/PySyft' && failure() - uses: ravsamhq/notify-slack-action@v2 - with: - status: ${{ job.status }} - notify_when: "failure" - notification_title: " {workflow} has {status_message}" - message_format: "${{matrix.os}} {emoji} *{job}* {status_message} in {run_url}" - footer: "Find the PR here ${{ steps.pull_request.outputs.url }}" - mention_users: "U01LNCACY03,U8KUAD396,UNMQ2SJSW,U01SAESBJA0" - mention_users_when: "failure,warnings" - env: - SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK_WEBHOOK_URL }} + export PATH=`pwd`:$PATH + k3d cluster delete test-gateway-1 || true + k3d cluster delete test-datasite-1 || true + k3d registry delete k3d-registry.localhost || true - pr-tests-stack-k8s: + pr-tests-notebook-k8s: strategy: max-parallel: 99 matrix: - # os: [ubuntu-latest, macos-latest, windows-latest, windows] - # os: [om-ci-16vcpu-ubuntu2204] os: [ubuntu-latest] python-version: ["3.12"] - pytest-modules: ["frontend network"] fail-fast: false runs-on: ${{matrix.os}} @@ -566,20 +324,21 @@ jobs: docker builder prune --all --force docker system prune --all --force - - name: Upgrade pip + - name: Install pip dependencies if: steps.changes.outputs.stack == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir if: steps.changes.outputs.stack == 'true' id: pip-cache shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.stack == 'true' with: @@ -588,11 +347,6 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }} - - name: Install tox - if: steps.changes.outputs.stack == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - name: Install kubectl if: steps.changes.outputs.stack == 'true' run: | @@ -603,15 +357,6 @@ jobs: chmod +x kubectl sudo install kubectl /usr/local/bin; - - name: Install k9s - if: steps.changes.outputs.stack == 'true' - run: | - # install k9s - wget https://github.com/derailed/k9s/releases/download/v0.27.4/k9s_Linux_amd64.tar.gz - tar -xvf k9s_Linux_amd64.tar.gz - chmod +x k9s - sudo install k9s /usr/local/bin; - - name: Install helm if: steps.changes.outputs.stack == 'true' run: | @@ -620,17 +365,15 @@ jobs: chmod 700 get_helm.sh ./get_helm.sh - - name: Run K8s & Helm integration tests + - name: Run Notebook and Scenario Notebook Tests if: steps.changes.outputs.stack == 'true' timeout-minutes: 60 env: - HAGRID_ART: false - PYTEST_MODULES: "${{ matrix.pytest-modules }}" GITHUB_CI: true shell: bash run: | - K3D_VERSION=v5.6.0 - DEVSPACE_VERSION=v6.3.10 + K3D_VERSION=v5.6.3 + DEVSPACE_VERSION=v6.3.12 # install k3d wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 mv k3d-linux-amd64 k3d @@ -640,10 +383,7 @@ jobs: curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace chmod +x devspace devspace version - tox -e stack.test.integration.k8s - tox -e syft.build.helm - tox -e syft.package.helm - # tox -e syft.test.helm + tox -e stack.test.notebook.k8s - name: Get current timestamp id: date @@ -656,17 +396,17 @@ jobs: shell: bash run: | mkdir -p ./k8s-logs - kubectl describe all -A --context k3d-testgateway1 --namespace syft > ./k8s-logs/testgateway1-desc-${{ steps.date.outputs.date }}.txt - kubectl describe all -A --context k3d-testdomain1 --namespace syft > ./k8s-logs/testdomain1-desc-${{ steps.date.outputs.date }}.txt - kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-testgateway1 --namespace syft > ./k8s-logs/testgateway1-logs-${{ steps.date.outputs.date }}.txt - kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-testdomain1 --namespace syft > ./k8s-logs/testdomain1-logs-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-desc-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-desc-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-logs-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-logs-${{ steps.date.outputs.date }}.txt ls -la ./k8s-logs - name: Upload logs to GitHub uses: actions/upload-artifact@master if: steps.changes.outputs.stack == 'true' && failure() with: - name: k8s-logs-${{ matrix.os }}-${{ steps.date.outputs.date }} + name: k8s-logs-notebook-${{ matrix.os }}-${{ steps.date.outputs.date }} path: ./k8s-logs/ - name: Cleanup k3d @@ -674,6 +414,827 @@ jobs: shell: bash run: | export PATH=`pwd`:$PATH - k3d cluster delete testgateway1 || true - k3d cluster delete testdomain1 || true + k3d cluster delete test-gateway-1 || true + k3d cluster delete test-datasite-1 || true k3d registry delete k3d-registry.localhost || true + + # pr-tests-notebook-scenario-k8s: + # strategy: + # max-parallel: 99 + # matrix: + # os: [ubuntu-latest] + # python-version: ["3.12"] + # fail-fast: false + + # runs-on: ${{matrix.os}} + + # steps: + # - name: Permission to home directory + # run: | + # sudo chown -R $USER:$USER $HOME + # - uses: actions/checkout@v4 + # - name: Check for file changes + # uses: dorny/paths-filter@v3 + # id: changes + # with: + # base: ${{ github.ref }} + # token: ${{ github.token }} + # filters: .github/file-filters.yml + + # - name: Set up Python ${{ matrix.python-version }} + # uses: actions/setup-python@v5 + # if: steps.changes.outputs.stack == 'true' + # with: + # python-version: ${{ matrix.python-version }} + + # - name: Add K3d Registry + # run: | + # sudo python ./scripts/patch_hosts.py --add-k3d-registry + + # # - name: Free Disk Space (Ubuntu) + # # uses: jlumbroso/free-disk-space@main + # # with: + # # tool-cache: true + # # large-packages: false + + # # free 10GB of space + # - name: Remove unnecessary files + # if: matrix.os == 'ubuntu-latest' + # run: | + # sudo rm -rf /usr/share/dotnet + # sudo rm -rf "$AGENT_TOOLSDIRECTORY" + # docker image prune --all --force + # docker builder prune --all --force + # docker system prune --all --force + + # - name: Install pip dependencies + # if: steps.changes.outputs.stack == 'true' + # run: | + # python -m pip install --upgrade pip + # pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + # uv --version + + # - name: Get uv cache dir + # if: steps.changes.outputs.stack == 'true' + # id: pip-cache + # shell: bash + # run: | + # echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + # - name: Load github cache + # uses: actions/cache@v4 + # if: steps.changes.outputs.stack == 'true' + # with: + # path: ${{ steps.pip-cache.outputs.dir }} + # key: ${{ runner.os }}-uv-py${{ matrix.python-version }} + # restore-keys: | + # ${{ runner.os }}-uv-py${{ matrix.python-version }} + + # - name: Install kubectl + # if: steps.changes.outputs.stack == 'true' + # run: | + # # cleanup apt version + # sudo apt remove kubectl || true + # # install kubectl 1.27 + # curl -LO https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl + # chmod +x kubectl + # sudo install kubectl /usr/local/bin; + + # - name: Install helm + # if: steps.changes.outputs.stack == 'true' + # run: | + # # install helm + # curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + # chmod 700 get_helm.sh + # ./get_helm.sh + + # - name: Run Notebooks Scenario Tests + # if: steps.changes.outputs.stack == 'true' + # timeout-minutes: 60 + # env: + # GITHUB_CI: true + # TOX_PYTHON: python${{ matrix.python-version }} + # shell: bash + # run: | + # K3D_VERSION=v5.6.3 + # DEVSPACE_VERSION=v6.3.12 + # # install k3d + # wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + # mv k3d-linux-amd64 k3d + # chmod +x k3d + # export PATH=`pwd`:$PATH + # k3d version + # curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + # chmod +x devspace + # devspace version + # export PATH="/usr/share/miniconda/bin:$PATH" + # tox -e stack.test.notebook.scenario.k8s + + # - name: Get current timestamp + # id: date + # if: failure() + # shell: bash + # run: echo "date=$(date +%s)" >> $GITHUB_OUTPUT + + # - name: Collect logs from k3d + # if: steps.changes.outputs.stack == 'true' && failure() + # shell: bash + # run: | + # mkdir -p ./k8s-logs + # kubectl describe all -A --context k3d-bigquery-high --namespace syft > ./k8s-logs/bigquery-high-desc-${{ steps.date.outputs.date }}.txt + # kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-bigquery-high --namespace syft > ./k8s-logs/bigquery-high-logs-${{ steps.date.outputs.date }}.txt + # ls -la ./k8s-logs + + # - name: Upload logs to GitHub + # uses: actions/upload-artifact@master + # if: steps.changes.outputs.stack == 'true' && failure() + # with: + # name: k8s-logs-notebook-${{ matrix.os }}-${{ steps.date.outputs.date }} + # path: ./k8s-logs/ + + # - name: Cleanup k3d + # if: steps.changes.outputs.stack == 'true' && failure() + # shell: bash + # run: | + # export PATH=`pwd`:$PATH + # k3d cluster delete test-datasite-1 || true + # k3d registry delete k3d-registry.localhost || true + + pr-tests-migrations: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest] + python-version: ["3.12"] + + runs-on: ${{ matrix.os }} + steps: + - name: "clean .git/config" + if: matrix.os == 'windows-latest' + continue-on-error: true + shell: bash + run: | + echo "deleting ${GITHUB_WORKSPACE}/.git/config" + rm ${GITHUB_WORKSPACE}/.git/config + + - uses: actions/checkout@v4 + + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.syft == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip packages + if: steps.changes.outputs.syft == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + id: pip-cache + if: steps.changes.outputs.syft == 'true' + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.syft == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }}- + + - name: Run migration tests + if: steps.changes.outputs.syft == 'true' + run: | + tox -e migration.test + pr-tests-scenarios-migrations: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest] + python-version: ["3.12"] + + runs-on: ${{ matrix.os }} + steps: + - name: "clean .git/config" + if: matrix.os == 'windows-latest' + continue-on-error: true + shell: bash + run: | + echo "deleting ${GITHUB_WORKSPACE}/.git/config" + rm ${GITHUB_WORKSPACE}/.git/config + + - uses: actions/checkout@v4 + + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.syft == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip packages + if: steps.changes.outputs.syft == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + id: pip-cache + if: steps.changes.outputs.syft == 'true' + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.syft == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }}- + + - name: Run migration tests + if: steps.changes.outputs.syft == 'true' + run: | + tox -e migration.scenarios.test + + pr-tests-migrations-k8s: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest] + python-version: ["3.12"] + fail-fast: false + + runs-on: ${{matrix.os}} + + steps: + - name: Permission to home directory + run: | + sudo chown -R $USER:$USER $HOME + - uses: actions/checkout@v4 + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.stack == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Add K3d Registry + run: | + sudo python ./scripts/patch_hosts.py --add-k3d-registry + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + large-packages: false + + # free 10GB of space + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + + - name: Install pip dependencies + if: steps.changes.outputs.stack == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + if: steps.changes.outputs.stack == 'true' + id: pip-cache + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.stack == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }} + + - name: Install kubectl + if: steps.changes.outputs.stack == 'true' + run: | + # cleanup apt version + sudo apt remove kubectl || true + # install kubectl 1.27 + curl -LO https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl + chmod +x kubectl + sudo install kubectl /usr/local/bin; + + - name: Install helm + if: steps.changes.outputs.stack == 'true' + run: | + # install helm + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh + + - name: Run Migrations Tests + if: steps.changes.outputs.stack == 'true' + timeout-minutes: 60 + env: + GITHUB_CI: true + shell: bash + run: | + K3D_VERSION=v5.6.3 + DEVSPACE_VERSION=v6.3.12 + # install k3d + wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + mv k3d-linux-amd64 k3d + chmod +x k3d + export PATH=`pwd`:$PATH + k3d version + curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + chmod +x devspace + devspace version + tox -e migration.k8s.test + + - name: Get current timestamp + id: date + if: failure() + shell: bash + run: echo "date=$(date +%s)" >> $GITHUB_OUTPUT + + - name: Collect logs from k3d + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + mkdir -p ./k8s-logs + # kubectl describe all -A --context k3d-syft-migration-source --namespace syft > ./k8s-logs/syft-migration-source-desc-${{ steps.date.outputs.date }}.txt + # kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-syft-migration-source --namespace syft > ./k8s-logs/syft-migration-source-logs-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-desc-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-logs-${{ steps.date.outputs.date }}.txt + ls -la ./k8s-logs + + - name: Upload logs to GitHub + uses: actions/upload-artifact@master + if: steps.changes.outputs.stack == 'true' && failure() + with: + name: k8s-logs-notebook-${{ matrix.os }}-${{ steps.date.outputs.date }} + path: ./k8s-logs/ + + - name: Cleanup k3d + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + export PATH=`pwd`:$PATH + k3d cluster delete syft-migration-source || true + k3d cluster delete test-datasite-1 || true + k3d registry delete k3d-registry.localhost || true + + pr-tests-notebook-scenario-k8s-sync: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest] + python-version: ["3.12"] + fail-fast: false + + runs-on: ${{matrix.os}} + + steps: + - name: Permission to home directory + run: | + sudo chown -R $USER:$USER $HOME + - uses: actions/checkout@v4 + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.stack == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Add K3d Registry + run: | + sudo python ./scripts/patch_hosts.py --add-k3d-registry + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + large-packages: false + + # free 10GB of space + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + - name: Install pip dependencies + if: steps.changes.outputs.stack == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + - name: Get uv cache dir + if: steps.changes.outputs.stack == 'true' + id: pip-cache + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.stack == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }} + - name: Install kubectl + if: steps.changes.outputs.stack == 'true' + run: | + # cleanup apt version + sudo apt remove kubectl || true + # install kubectl 1.27 + curl -LO https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl + chmod +x kubectl + sudo install kubectl /usr/local/bin; + - name: Install helm + if: steps.changes.outputs.stack == 'true' + run: | + # install helm + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh + - name: Run Notebook Scenario Sync Tests + if: steps.changes.outputs.stack == 'true' + timeout-minutes: 60 + env: + GITHUB_CI: true + shell: bash + run: | + K3D_VERSION=v5.6.3 + DEVSPACE_VERSION=v6.3.12 + # install k3d + wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + mv k3d-linux-amd64 k3d + chmod +x k3d + export PATH=`pwd`:$PATH + k3d version + curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + chmod +x devspace + devspace version + tox -e stack.test.notebook.scenario.k8s.sync + - name: Get current timestamp + id: date + if: failure() + shell: bash + run: echo "date=$(date +%s)" >> $GITHUB_OUTPUT + + - name: Collect logs from k3d + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + mkdir -p ./k8s-logs + kubectl describe all -A --context k3d-bigquery-low --namespace syft > ./k8s-logs/bigquery-low-desc-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-bigquery-low --namespace syft > ./k8s-logs/bigquery-low-logs-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-bigquery-high --namespace syft > ./k8s-logs/bigquery-high-desc-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-bigquery-high --namespace syft > ./k8s-logs/bigquery-high-logs-${{ steps.date.outputs.date }}.txt + ls -la ./k8s-logs + - name: Upload logs to GitHub + uses: actions/upload-artifact@master + if: steps.changes.outputs.stack == 'true' && failure() + with: + name: k8s-logs-notebook-${{ matrix.os }}-${{ steps.date.outputs.date }} + path: ./k8s-logs/ + + - name: Cleanup k3d + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + export PATH=`pwd`:$PATH + k3d cluster delete bigquery-high || true + k3d cluster delete bigquery-low || true + + pr-tests-simulation-scenario-k8s: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest] + python-version: ["3.12"] + fail-fast: false + + runs-on: ${{matrix.os}} + + steps: + - name: Permission to home directory + run: | + sudo chown -R $USER:$USER $HOME + - uses: actions/checkout@v4 + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.stack == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Add K3d Registry + run: | + sudo python ./scripts/patch_hosts.py --add-k3d-registry + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + large-packages: false + + # free 10GB of space + - name: Remove unnecessary files + if: matrix.os == 'ubuntu-latest' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + + - name: Install pip dependencies + if: steps.changes.outputs.stack == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + if: steps.changes.outputs.stack == 'true' + id: pip-cache + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.stack == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }} + + - name: Install kubectl + if: steps.changes.outputs.stack == 'true' + run: | + # cleanup apt version + sudo apt remove kubectl || true + # install kubectl 1.27 + curl -LO https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl + chmod +x kubectl + sudo install kubectl /usr/local/bin; + + - name: Install helm + if: steps.changes.outputs.stack == 'true' + run: | + # install helm + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh + + - name: Install just + if: steps.changes.outputs.stack == 'true' + run: | + curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin + + - name: Run scenario tests + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + env: + TOX_PYTHON: python${{ matrix.python-version }} + shell: bash + run: | + K3D_VERSION=v5.6.3 + DEVSPACE_VERSION=v6.3.12 + # install k3d + wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + mv k3d-linux-amd64 k3d + chmod +x k3d + export PATH=`pwd`:$PATH + k3d version + curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + chmod +x devspace + devspace version + tox -e stack.test.scenario.k8s + + - name: Collect logs + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + mkdir -p ./output-logs + if [ -d "tests/scenariosv2/.logs" ]; then + cp -R tests/scenariosv2/.logs/* ./output-logs/ + else + echo "Log directory not found" + fi + + - name: Upload logs to GitHub + uses: actions/upload-artifact@master + if: steps.changes.outputs.stack == 'true' && failure() + with: + name: simulation-scenario-k8s-logs-${{ matrix.os }}-${{ steps.date.outputs.date }} + path: ./output-logs/ + + - name: Cleanup k3d + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + export PATH=`pwd`:$PATH + k3d cluster delete syft-low || true + k3d registry delete k3d-registry.localhost || true + + # pr-tests-simulation-scenario-k8s-sync: + # strategy: + # max-parallel: 99 + # matrix: + # os: [ubuntu-latest] + # python-version: ["3.12"] + # fail-fast: false + + # runs-on: ${{matrix.os}} + + # steps: + # - name: Permission to home directory + # run: | + # sudo chown -R $USER:$USER $HOME + # - uses: actions/checkout@v4 + # - name: Check for file changes + # uses: dorny/paths-filter@v3 + # id: changes + # with: + # base: ${{ github.ref }} + # token: ${{ github.token }} + # filters: .github/file-filters.yml + + # - name: Set up Python ${{ matrix.python-version }} + # uses: actions/setup-python@v5 + # if: steps.changes.outputs.stack == 'true' + # with: + # python-version: ${{ matrix.python-version }} + + # - name: Add K3d Registry + # run: | + # sudo python ./scripts/patch_hosts.py --add-k3d-registry + + # - name: Free Disk Space (Ubuntu) + # uses: jlumbroso/free-disk-space@main + # with: + # tool-cache: true + # large-packages: false + + # # free 10GB of space + # - name: Remove unnecessary files + # if: matrix.os == 'ubuntu-latest' + # run: | + # sudo rm -rf /usr/share/dotnet + # sudo rm -rf "$AGENT_TOOLSDIRECTORY" + # docker image prune --all --force + # docker builder prune --all --force + # docker system prune --all --force + + # - name: Install pip dependencies + # if: steps.changes.outputs.stack == 'true' + # run: | + # python -m pip install --upgrade pip + # pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + # uv --version + + # - name: Get uv cache dir + # if: steps.changes.outputs.stack == 'true' + # id: pip-cache + # shell: bash + # run: | + # echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + # - name: Load github cache + # uses: actions/cache@v4 + # if: steps.changes.outputs.stack == 'true' + # with: + # path: ${{ steps.pip-cache.outputs.dir }} + # key: ${{ runner.os }}-uv-py${{ matrix.python-version }} + # restore-keys: | + # ${{ runner.os }}-uv-py${{ matrix.python-version }} + + # - name: Install kubectl + # if: steps.changes.outputs.stack == 'true' + # run: | + # # cleanup apt version + # sudo apt remove kubectl || true + # # install kubectl 1.27 + # curl -LO https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl + # chmod +x kubectl + # sudo install kubectl /usr/local/bin; + + # - name: Install helm + # if: steps.changes.outputs.stack == 'true' + # run: | + # # install helm + # curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + # chmod 700 get_helm.sh + # ./get_helm.sh + + # - name: Install just + # if: steps.changes.outputs.stack == 'true' + # run: | + # curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin + + # - name: Run scenario tests + # if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + # env: + # BUMP_VERSION: "${{ matrix.bump-version }}" + # TOX_PYTHON: python${{ matrix.python-version }} + # shell: bash + # run: | + # K3D_VERSION=v5.6.3 + # DEVSPACE_VERSION=v6.3.12 + # # install k3d + # wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + # mv k3d-linux-amd64 k3d + # chmod +x k3d + # export PATH=`pwd`:$PATH + # k3d version + # curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + # chmod +x devspace + # devspace version + # tox -e stack.test.scenario.k8s.sync + + # - name: Collect logs + # if: steps.changes.outputs.stack == 'true' && failure() + # shell: bash + # run: | + # mkdir -p ./output-logs + # if [ -d "tests/scenariosv2/.logs" ]; then + # cp -R tests/scenariosv2/.logs/* ./output-logs/ + # else + # echo "Log directory not found" + # fi + + # - name: Upload logs to GitHub + # uses: actions/upload-artifact@master + # if: steps.changes.outputs.stack == 'true' && failure() + # with: + # name: simulation-scenario-k8s-sync-logs-${{ matrix.os }}-${{ steps.date.outputs.date }} + # path: ./output-logs/ + + # - name: Cleanup k3d + # if: steps.changes.outputs.stack == 'true' && failure() + # shell: bash + # run: | + # export PATH=`pwd`:$PATH + # k3d cluster delete syft-low || true + # k3d cluster delete syft-high || true + # k3d registry delete k3d-registry.localhost || true diff --git a/.github/workflows/pr-tests-syft.yml b/.github/workflows/pr-tests-syft.yml index 9adf4a71100..4edc006984d 100644 --- a/.github/workflows/pr-tests-syft.yml +++ b/.github/workflows/pr-tests-syft.yml @@ -39,7 +39,7 @@ jobs: # run: | # sudo chown -R $USER:$USER $HOME - name: "clean .git/config" - if: matrix.os == 'windows' + if: matrix.os == 'windows-latest' continue-on-error: true shell: bash run: | @@ -62,20 +62,21 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip packages if: steps.changes.outputs.syft == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir id: pip-cache if: steps.changes.outputs.syft == 'true' shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.syft == 'true' with: @@ -86,19 +87,15 @@ jobs: # - name: Docker on MacOS # if: steps.changes.outputs.syft == 'true' && matrix.os == 'macos-latest' - # uses: crazy-max/ghaction-setup-docker@v3.1.0 + # uses: crazy-max/ghaction-setup-docker@v3.3.0 # with: # set-host: true - - name: Install Dependencies - if: steps.changes.outputs.syft == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - name: Run unit tests if: steps.changes.outputs.syft == 'true' run: | tox -e syft.test.unit + tox -e seaweedfs.test.unit pr-tests-syft-notebook-python: strategy: @@ -110,6 +107,7 @@ jobs: python-version: ["3.12"] deployment-type: ["python"] notebook-paths: ["tutorials"] + bump-version: ["False"] include: - python-version: "3.11" os: "ubuntu-latest" @@ -119,6 +117,11 @@ jobs: os: "ubuntu-latest" deployment-type: "python" notebook-paths: "tutorials" + - python-version: "3.12" + os: "ubuntu-latest" + deployment-type: "python" + notebook-paths: "tutorials" + bump-version: "True" runs-on: ${{ matrix.os }} steps: @@ -127,7 +130,7 @@ jobs: # run: | # sudo chown -R $USER:$USER $HOME - name: "clean .git/config" - if: matrix.os == 'windows' + if: matrix.os == 'windows-latest' continue-on-error: true shell: bash run: | @@ -150,20 +153,21 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip packages if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir id: pip-cache if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks == 'true' shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks == 'true' with: @@ -172,29 +176,404 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - name: Install Dependencies - if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - name: Run notebook tests uses: nick-fields/retry@v3 if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks == 'true' env: ORCHESTRA_DEPLOYMENT_TYPE: "${{ matrix.deployment-type }}" TEST_NOTEBOOK_PATHS: "${{ matrix.notebook-paths }}" + BUMP_VERSION: "${{ matrix.bump-version }}" with: timeout_seconds: 2400 max_attempts: 3 command: tox -e syft.test.notebook - pr-tests-syft-notebook-container: + pr-tests-syft-scenario: + strategy: + max-parallel: 99 + matrix: + # TODO try enabling on other OS + os: [ubuntu-latest] + python-version: ["3.12"] + deployment-type: ["python"] + bump-version: ["False"] + include: + - python-version: "3.11" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.10" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.12" + os: "ubuntu-latest" + deployment-type: "python" + bump-version: "True" + + runs-on: ${{ matrix.os }} + steps: + # - name: Permission to home directory + # if: matrix.os == 'ubuntu-latest' + # run: | + # sudo chown -R $USER:$USER $HOME + - name: "clean .git/config" + if: matrix.os == 'windows-latest' + continue-on-error: true + shell: bash + run: | + echo "deleting ${GITHUB_WORKSPACE}/.git/config" + rm ${GITHUB_WORKSPACE}/.git/config + + - uses: actions/checkout@v4 + + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip packages + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + id: pip-cache + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }}- + + - name: Run scenario tests + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + env: + ORCHESTRA_DEPLOYMENT_TYPE: "${{ matrix.deployment-type }}" + BUMP_VERSION: "${{ matrix.bump-version }}" + TOX_PYTHON: python${{ matrix.python-version }} + shell: bash + run: | + tox -e syft.test.scenario + + - name: Collect logs + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + mkdir -p ./output-logs + if [ -d "tests/scenariosv2/.logs" ]; then + cp -R tests/scenariosv2/.logs/* ./output-logs/ + else + echo "Log directory not found" + fi + + - name: Upload logs to GitHub + uses: actions/upload-artifact@master + if: steps.changes.outputs.stack == 'true' && failure() + with: + name: simulation-scenario-logs-${{ matrix.os }}-${{ steps.date.outputs.date }} + path: ./output-logs/ + + pr-tests-syft-scenario-sync: + strategy: + max-parallel: 99 + matrix: + # TODO try enabling on other OS + os: [ubuntu-latest] + python-version: ["3.12"] + deployment-type: ["python"] + bump-version: ["False"] + include: + - python-version: "3.11" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.10" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.12" + os: "ubuntu-latest" + deployment-type: "python" + bump-version: "True" + + runs-on: ${{ matrix.os }} + steps: + # - name: Permission to home directory + # if: matrix.os == 'ubuntu-latest' + # run: | + # sudo chown -R $USER:$USER $HOME + - name: "clean .git/config" + if: matrix.os == 'windows-latest' + continue-on-error: true + shell: bash + run: | + echo "deleting ${GITHUB_WORKSPACE}/.git/config" + rm ${GITHUB_WORKSPACE}/.git/config + + - uses: actions/checkout@v4 + + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip packages + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + id: pip-cache + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }}- + + - name: Run scenario tests + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + env: + ORCHESTRA_DEPLOYMENT_TYPE: "${{ matrix.deployment-type }}" + BUMP_VERSION: "${{ matrix.bump-version }}" + TOX_PYTHON: python${{ matrix.python-version }} + shell: bash + run: | + tox -e syft.test.scenario.sync + + - name: Collect logs + if: steps.changes.outputs.stack == 'true' && failure() + shell: bash + run: | + mkdir -p ./output-logs + if [ -d "tests/scenariosv2/.logs" ]; then + cp -R tests/scenariosv2/.logs/* ./output-logs/ + else + echo "Log directory not found" + fi + + - name: Upload logs to GitHub + uses: actions/upload-artifact@master + if: steps.changes.outputs.stack == 'true' && failure() + with: + name: simulation-scenario-sync-logs-${{ matrix.os }}-${{ steps.date.outputs.date }} + path: ./output-logs/ + + pr-tests-syft-notebook-scenario: + strategy: + max-parallel: 99 + matrix: + # Disable on windows until its flakyness is reduced. + # os: [ubuntu-latest, macos-latest, windows-latest] + # os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest] # mac wont start SMTPD mail server? + python-version: ["3.12"] + deployment-type: ["python"] + bump-version: ["False"] + include: + - python-version: "3.11" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.10" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.12" + os: "ubuntu-latest" + deployment-type: "python" + bump-version: "True" + + runs-on: ${{ matrix.os }} + steps: + # - name: Permission to home directory + # if: matrix.os == 'ubuntu-latest' + # run: | + # sudo chown -R $USER:$USER $HOME + - name: "clean .git/config" + if: matrix.os == 'windows-latest' + continue-on-error: true + shell: bash + run: | + echo "deleting ${GITHUB_WORKSPACE}/.git/config" + rm ${GITHUB_WORKSPACE}/.git/config + + - uses: actions/checkout@v4 + + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip packages + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + id: pip-cache + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }}- + + - name: Run notebook scenario tests + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + env: + ORCHESTRA_DEPLOYMENT_TYPE: "${{ matrix.deployment-type }}" + BUMP_VERSION: "${{ matrix.bump-version }}" + TOX_PYTHON: python${{ matrix.python-version }} + shell: bash + run: | + export PATH="/usr/share/miniconda/bin:$PATH" + tox -e syft.test.notebook.scenario + + pr-tests-syft-notebook-scenario-sync: + strategy: + max-parallel: 99 + matrix: + # Disable on windows until its flakyness is reduced. + # Also disable on macos for now since SMTP server + # fails to write to emails.json in CI runner. + # os: [ubuntu-latest, macos-latest, windows-latest] + os: [ubuntu-latest] + python-version: ["3.12"] + deployment-type: ["python"] + bump-version: ["False"] + include: + - python-version: "3.11" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.10" + os: "ubuntu-latest" + deployment-type: "python" + - python-version: "3.12" + os: "ubuntu-latest" + deployment-type: "python" + bump-version: "True" + + runs-on: ${{ matrix.os }} + steps: + # - name: Permission to home directory + # if: matrix.os == 'ubuntu-latest' + # run: | + # sudo chown -R $USER:$USER $HOME + - name: "clean .git/config" + if: matrix.os == 'windows-latest' + continue-on-error: true + shell: bash + run: | + echo "deleting ${GITHUB_WORKSPACE}/.git/config" + rm ${GITHUB_WORKSPACE}/.git/config + + - uses: actions/checkout@v4 + + - name: Check for file changes + uses: dorny/paths-filter@v3 + id: changes + with: + base: ${{ github.ref }} + token: ${{ github.token }} + filters: .github/file-filters.yml + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip packages + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + run: | + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 + uv --version + + - name: Get uv cache dir + id: pip-cache + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-uv-py${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-uv-py${{ matrix.python-version }}- + + - name: Run notebook scenario with sync tests + uses: nick-fields/retry@v3 + if: steps.changes.outputs.syft == 'true' || steps.changes.outputs.notebooks_scenario == 'true' + env: + ORCHESTRA_DEPLOYMENT_TYPE: "${{ matrix.deployment-type }}" + BUMP_VERSION: "${{ matrix.bump-version }}" + with: + timeout_seconds: 2400 + max_attempts: 3 + command: tox -e syft.test.notebook.scenario.sync + + pr-tests-syft-notebook-single-container: strategy: max-parallel: 99 matrix: os: [ubuntu-latest] python-version: ["3.10", "3.11", "3.12"] - deployment-type: ["single_container"] + deployment-type: ["remote"] notebook-paths: ["api/0.8"] fail-fast: false @@ -231,20 +610,21 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip packages if: steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir id: pip-cache if: steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true' shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true' with: @@ -253,11 +633,6 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - name: Install Dependencies - if: steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - name: Docker Compose on Linux if: (steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true') && matrix.os == 'ubuntu-latest' shell: bash @@ -270,7 +645,7 @@ jobs: - name: Docker on MacOS if: (steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true') && matrix.os == 'macos-latest' - uses: crazy-max/ghaction-setup-docker@v3.1.0 + uses: crazy-max/ghaction-setup-docker@v3.3.0 - name: Docker Compose on MacOS if: (steps.changes.outputs.stack == 'true' || steps.changes.outputs.notebooks == 'true') && matrix.os == 'macos-latest' @@ -300,7 +675,9 @@ jobs: DEV_MODE: "True" # force orchestra --build TEST_NOTEBOOK_PATHS: "${{ matrix.notebook-paths }}" run: | + tox -e single_container.launch tox -e stack.test.notebook + tox -e single_container.destroy pr-tests-syft-security: strategy: @@ -330,20 +707,21 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Upgrade pip + - name: Install pip packages if: steps.changes.outputs.syft == 'true' run: | - pip install --upgrade pip uv==0.1.18 + python -m pip install --upgrade pip + pip install uv==0.4.1 tox==4.18.0 tox-uv==1.11.2 uv --version - - name: Get pip cache dir + - name: Get uv cache dir if: steps.changes.outputs.syft == 'true' id: pip-cache shell: bash run: | echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - - name: pip cache + - name: Load github cache uses: actions/cache@v4 if: steps.changes.outputs.syft == 'true' with: @@ -352,11 +730,6 @@ jobs: restore-keys: | ${{ runner.os }}-uv-py${{ matrix.python-version }}- - - name: Install Dependencies - if: steps.changes.outputs.syft == 'true' - run: | - pip install --upgrade tox tox-uv==1.5.1 - - name: Scan for security issues if: steps.changes.outputs.syft == 'true' run: | diff --git a/.github/workflows/rhel-tests.yml b/.github/workflows/rhel-tests.yml deleted file mode 100644 index 9180635362d..00000000000 --- a/.github/workflows/rhel-tests.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Rhel Podman Stack Tests - -on: - workflow_call: - - workflow_dispatch: - inputs: - none: - description: "Run Version Tests Manually" - required: false - -jobs: - podman-tests-stack: - strategy: - max-parallel: 99 - matrix: - os: [om-ci-rhel-9] - python-version: ["3.12"] - fail-fast: false - - runs-on: ${{matrix.os}} - - steps: - - name: set permissions on work folder for self-runners - run: | - sudo chown -R $USER:$USER ~/actions-runner/_work/ - - - uses: actions/checkout@v4 - - - name: check python version - run: | - python${{matrix.python-version}} --version - - # - name: Check for file changes - # uses: dorny/paths-filter@v2 - # id: changes - # with: - # base: ${{ github.ref }} - # token: ${{ github.token }} - # filters: .github/file-filters.yml - - - name: Install tox - # if: steps.changes.outputs.stack == 'true' - run: | - pip${{matrix.python-version}} install -U tox - - - name: Run notebook tests - #if: steps.changes.outputs.stack == 'true' - run: | - tox -e stack.test.podman diff --git a/.github/workflows/test-github-arc.yml b/.github/workflows/test-github-arc.yml deleted file mode 100644 index 4f3dfacfa29..00000000000 --- a/.github/workflows/test-github-arc.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Actions Runner Controller Demo -on: - workflow_dispatch: - -jobs: - Test-Github-ARC-x64: - # You need to use the INSTALLATION_NAME from the previous step - runs-on: sh-arc-linux-x64 - steps: - - name: "Test Github ARC" - run: | - echo "🎉 This job uses runner scale set runners!" - - - name: "Check Architecture" - run: | - uname -a - - Test-Github-ARC-arm64: - # You need to use the INSTALLATION_NAME from the previous step - runs-on: sh-arc-linux-arm64 - steps: - - name: "Test Github ARC" - run: | - echo "🎉 This job uses runner scale set runners!" - - - name: "Check Architecture" - run: | - uname -a diff --git a/.github/workflows/vm-tests.yml b/.github/workflows/vm-tests.yml deleted file mode 100644 index be07b4a42a3..00000000000 --- a/.github/workflows/vm-tests.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: VM Tests - Stack - -on: - workflow_call: - - # pull_request: - # branches: - # - dev - # - main - # - "0.8" - - workflow_dispatch: - inputs: - none: - description: "Run Version Tests Manually" - required: false - -jobs: - vm-tests-stack: - strategy: - max-parallel: 99 - matrix: - os: [macos-12] - python-version: ["3.12"] - deployment-type: ["vm"] - fail-fast: false - - runs-on: ${{matrix.os}} - - steps: - - uses: actions/checkout@v4 - - - name: Check for file changes - uses: dorny/paths-filter@v3 - id: changes - with: - base: ${{ github.ref }} - token: ${{ github.token }} - filters: .github/file-filters.yml - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - if: steps.changes.outputs.stack == 'true' - with: - python-version: ${{ matrix.python-version }} - - - name: Get pip cache dir - if: steps.changes.outputs.stack == 'true' - id: pip-cache - shell: bash - run: | - echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - - name: pip cache - uses: actions/cache@v4 - if: steps.changes.outputs.stack == 'true' - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-py${{ matrix.python-version }} - restore-keys: | - ${{ runner.os }}-pip-py${{ matrix.python-version }} - - - name: Upgrade pip - if: steps.changes.outputs.stack == 'true' - run: | - python -m pip install --upgrade --user pip - - - name: Install tox - if: steps.changes.outputs.stack == 'true' - run: | - pip install -U tox - - - name: Run notebook tests - if: steps.changes.outputs.stack == 'true' - env: - ORCHESTRA_DEPLOYMENT_TYPE: "${{ matrix.deployment-type }}" - run: | - tox -e stack.test.vm diff --git a/.gitignore b/.gitignore index 33dc85c251c..3967bd1e34d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,8 @@ .idea/ .mypy_cache .python-version -.vscode/ +.vscode/* +!.vscode/launch.json .tox/* .creds build @@ -28,9 +29,8 @@ build # docker compose volumes docker/data/* -# hagrid temps -packages/hagrid/syft -packages/hagrid/grid +# env files +.env # vagrant .vagrant @@ -60,7 +60,6 @@ notebooks/**/*.pkl k3d-registry .envfile -packages/hagrid/.envfile # rendered template dir @@ -71,7 +70,30 @@ js/node_modules/* #nohup nohup.out +# jupyter lsp +.virtual_documents + # notebook data notebooks/helm/scenario_data.jsonl + # tox syft.build.helm generated file -out.txt +out.* +.git-blame-ignore-revs + +# migration data +packages/grid/helm/examples/dev/migration.yaml + +# dynaconf settings file +**/settings.yaml + +# Any temporary material created for scenarios +notebooks/scenarios/bigquery/*.json +notebooks/scenarios/bigquery/sync/*.json +notebooks/scenarios/bigquery/sync/*.json.lock +notebooks/tutorials/version-upgrades/*.yaml +notebooks/tutorials/version-upgrades/*.blob +notebooks/scenarios/bigquery/sync/emails.json + +# logs dir generated by sim tests +.logs + diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index 584f776b221..00000000000 --- a/.gitpod.yml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: - - init: pip install -e packages/hagrid - command: hagrid quickstart -ports: - - name: Jupyter - port: 8888 - visibility: public - - name: Nodes - port: 8081-8083 - onOpen: open-browser - visibility: public diff --git a/.isort.cfg b/.isort.cfg index e9749ee711d..26309a07039 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -2,22 +2,22 @@ profile=black force_single_line=True known_syft=syft -known_grid=grid +known_server=grid known_syftcli=syftcli known_first_party=src remove_redundant_aliases=True -sections=FUTURE,STDLIB,THIRDPARTY,SYFT,GRID,SYFTCLI,FIRSTPARTY,LOCALFOLDER +sections=FUTURE,STDLIB,THIRDPARTY,SYFT,SERVER,SYFTCLI,FIRSTPARTY,LOCALFOLDER lines_between_types=0 force_sort_within_sections=True import_heading_future=future import_heading_stdlib=stdlib import_heading_thirdparty=third party import_heading_syft=syft absolute -import_heading_grid=grid absolute +import_heading_server=server absolute import_heading_syftcli=syftcli absolute import_heading_firstparty=first party import_heading_localfolder=relative ignore_comments=False force_grid_wrap=True honor_noqa=True -skip_glob = packages/syft/src/syft/__init__.py,packages/grid/data/* +skip_glob=packages/syft/src/syft/__init__.py,packages/grid/data/* \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2e1ead0e3f0..8943a32efc2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,20 +3,18 @@ repos: rev: v4.5.0 hooks: - id: check-ast - exclude: ^(packages/grid/ansible/) always_run: true - id: trailing-whitespace always_run: true exclude: ^(docs/|.+\.md|.bumpversion.cfg) - id: check-docstring-first always_run: true - exclude: ^(packages/grid/ansible/) - id: check-json always_run: true - exclude: ^(packages/grid/frontend/) + exclude: ^(packages/grid/frontend/|.vscode) - id: check-added-large-files always_run: true - exclude: ^(packages/grid/backend/wheels/.*|docs/img/header.png|docs/img/terminalizer.gif) + exclude: ^(packages/grid/backend/wheels/.*|docs/img/header.png|docs/img/terminalizer.gif|^notebooks/scenarios/bigquery/upgradability/sync/migration_.*\.blob) - id: check-yaml always_run: true exclude: ^(packages/grid/k8s/rendered/|packages/grid/helm/) @@ -25,20 +23,18 @@ repos: args: ["--assume-in-merge"] - id: check-executables-have-shebangs always_run: true - exclude: ^(packages/grid/ansible/) - id: debug-statements always_run: true - exclude: ^(packages/grid/ansible/) - id: name-tests-test always_run: true - exclude: ^(.*/tests/utils/)|^(.*fixtures.py) + exclude: ^(.*/tests/utils/)|^(.*fixtures.py)|^(tests/scenariosv2/(sim|flows)) - id: requirements-txt-fixer always_run: true - id: mixed-line-ending args: ["--fix=lf"] exclude: '\.bat|\.csv|\.ps1$' - - repo: https://github.com/MarcoGorelli/absolufy-imports + - repo: https://github.com/MarcoGorelli/absolufy-imports # This repository has been archived by the owner on Aug 15, 2023. It is now read-only. rev: v0.3.1 hooks: - id: absolufy-imports @@ -51,8 +47,8 @@ repos: packages/syft/src/syft/proto.*| packages/syft/tests/syft/lib/python.*| packages/grid.*| - packages/hagrid.*| - packages/syft/src/syft/federated/model_serialization/protos.py + packages/syft/src/syft/federated/model_serialization/protos.py| + packages/syft/src/syft/util/test_helpers/.*| )$ - repo: https://github.com/MarcoGorelli/absolufy-imports @@ -76,14 +72,15 @@ repos: always_run: true - repo: https://github.com/nbQA-dev/nbQA - rev: 1.8.3 + rev: 1.8.5 + # nbQA has no files attribute + # files: "^notebooks/(api|tutorials|admin)" hooks: - id: nbqa-isort - - id: nbqa-black - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: "v0.3.0" + rev: "v0.4.7" hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --show-fixes] @@ -92,32 +89,7 @@ repos: types_or: [python, pyi, jupyter] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 - hooks: - - id: mypy - name: "mypy: hagrid" - always_run: true - files: ^packages/hagrid - args: [ - "--ignore-missing-imports", - "--scripts-are-modules", - "--disallow-incomplete-defs", - "--no-implicit-optional", - "--warn-unused-ignores", - "--warn-redundant-casts", - "--strict-equality", - "--warn-unreachable", - # "--disallow-untyped-decorators", - "--disallow-untyped-defs", - "--disallow-untyped-calls", - "--namespace-packages", - "--install-types", - "--non-interactive", - "--config-file=tox.ini", - ] - - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.10.0 hooks: - id: mypy name: "mypy: syft-cli" @@ -142,7 +114,7 @@ repos: ] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.10.0 hooks: - id: mypy name: "mypy: grid" @@ -167,13 +139,12 @@ repos: ] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.10.0 hooks: - id: mypy name: "mypy: syft" always_run: true files: "^packages/syft/src/syft/" - exclude: "packages/syft/src/syft/types/dicttuple.py|^packages/syft/src/syft/service/action/action_graph.py|^packages/syft/src/syft/external/oblv/" args: [ "--follow-imports=skip", "--ignore-missing-imports", @@ -191,18 +162,19 @@ repos: "--non-interactive", "--config-file=tox.ini", ] + exclude: ^(packages/syft/src/syft/util/test_helpers) - repo: https://github.com/kynan/nbstripout rev: 0.7.1 hooks: - id: nbstripout - files: "^notebooks/api|^notebooks/tutorials" + files: "^notebooks/(api|tutorials|admin|scenarios)" - - repo: https://github.com/pre-commit/mirrors-prettier + - repo: https://github.com/pre-commit/mirrors-prettier # This repository has been archived by the owner on Apr 11, 2024. It is now read-only. rev: "v3.0.0-alpha.9-for-vscode" hooks: - id: prettier - exclude: ^(packages/grid/helm|packages/grid/frontend/pnpm-lock.yaml|packages/hagrid/hagrid/manifest_template.yml) + exclude: ^(packages/grid/helm|packages/grid/frontend/pnpm-lock.yaml|.vscode) # - repo: meta # hooks: diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 00000000000..6b9e091a405 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,3 @@ +**/*.jinja2 +**/*.min.js +**/*.min.css \ No newline at end of file diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 00000000000..1ca87ab7d8a --- /dev/null +++ b/.prettierrc @@ -0,0 +1,3 @@ +{ + "singleQuote": false +} diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000000..7e30fe06537 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,32 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + { + "name": "Syft Debugger", + "type": "debugpy", + "request": "attach", + "connect": { + "host": "localhost", + "port": "${input:port}" + } + } + ], + "inputs": [ + { + "id": "port", + "description": "Port on which the debugger is listening", + "type": "promptString", + "default": "5678" + } + ] +} \ No newline at end of file diff --git a/README.md b/README.md index d3898f3d93b..85b9f401143 100644 --- a/README.md +++ b/README.md @@ -1,365 +1,172 @@ -
+


- - Syft Logo + + Syft Logo -Perform data science on `data` that remains in `someone else's` server +

Data Science on data you are not allowed to see

-# Quickstart +PySyft enables a new way to do data science, where you can use non-public information, without seeing nor obtaining a copy of the data itself. All you need is to connect to a Datasite! -✅ `Linux` ✅ `macOS` ✅ `Windows` ✅ `Docker` ✅ `Podman` ✅ `Kubernetes` +Datasites are like websites, but for data. Designed with the principles of structured transparency, they enable data owners to control how their data is protected and data scientists to use data without obtaining a copy. -## Install Client +PySyft supports any statistical analysis or machine learning, offering support for directly running Python code - even using third-party Python libraries. -```bash -$ pip install -U syft[data_science] -``` +

Supported on:

-## Launch Server - -```python -# from Jupyter / Python -import syft as sy -sy.requires(">=0.8.5,<0.8.6") -node = sy.orchestra.launch(name="my-domain", port=8080, dev_mode=True, reset=True) -``` +✅ Linux +✅ macOS +✅ Windows +✅ Docker +✅ Kubernetes -```bash -# or from the command line -$ syft launch --name=my-domain --port=8080 --reset=True +# Quickstart -Starting syft-node server on 0.0.0.0:8080 -``` +Try out your first query against a live demo Datasite! -## Launch Client +## Install Client -```python -import syft as sy -sy.requires(">=0.8.5,<0.8.6") -domain_client = sy.login(port=8080, email="info@openmined.org", password="changethis") +```bash +pip install -U "syft[data_science]" ``` -## PySyft in 10 minutes - -📝 API Example Notebooks +More instructions are available here. -- 00-load-data.ipynb -- 01-submit-code.ipynb -- 02-review-code-and-approve.ipynb -- 03-data-scientist-download-result.ipynb -- 04-jax-example.ipynb -- 05-custom-policy.ipynb -- 06-multiple-code-requests.ipynb -- 07-domain-register-control-flow.ipynb -- 08-code-version.ipynb -- 09-blob-storage.ipynb -- 10-container-images.ipynb -- 11-container-images-k8s.ipynb +## Launch Server -## Deploy Kubernetes Helm Chart +Launch a development server directly in your Jupyter Notebook: -**Note**: Assuming we have a Kubernetes cluster already setup. +```python +import syft as sy -#### 1. Add and update Helm repo for Syft +sy.requires(">=0.9.5,<0.9.6") -```sh -helm repo add openmined https://openmined.github.io/PySyft/helm -helm repo update openmined +server = sy.orchestra.launch( + name="my-datasite", + port=8080, + create_producer=True, + n_consumers=1, + dev_mode=False, + reset=True, # resets database +) ``` -#### 2. Search for available Syft versions +or from the command line: -```sh -helm search repo openmined/syft --versions --devel -``` - -#### 3. Set your preferred Syft Chart version +```bash +$ syft launch --name=my-datasite --port=8080 --reset=True -```sh -SYFT_VERSION="" +Starting syft-datasite server on 0.0.0.0:8080 ``` -#### 4. Provisioning Helm Charts +Datasite servers can be deployed as a single container using Docker or directly in Kubernetes. Check out our deployment guide. -```sh -helm install my-domain openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className="traefik" -``` - -### Ingress Controllers +## Launch Client -For Azure AKS +Main way to use a Datasite is via our Syft client, in a Jupyter Notebook. Check out our PySyft client guide: -```sh -helm install ... --set ingress.className="azure-application-gateway" -``` +```python +import syft as sy -For AWS EKS +sy.requires(">=0.9.5,<0.9.6") -```sh -helm install ... --set ingress.className="alb" +datasite_client = sy.login( + port=8080, + email="info@openmined.org", + password="changethis" +) ``` -For Google GKE we need the [`gce` annotation](https://cloud.google.com/kubernetes-engine/docs/how-to/load-balance-ingress#create-ingress) annotation. +## PySyft - Getting started 📝 -```sh -helm install ... --set ingress.class="gce" -``` +Learn about PySyft via our getting started guide: -## Deploy to a Container Engine or Cloud +- PySyft from the ground up +- Part 1: Datasets & Assets +- Part 2: Client and Datasite Access +- Part 3: Propose the research study +- Part 4: Review Code Requests +- Part 5: Retrieving Results -1. Install our handy 🛵 cli tool which makes deploying a Domain or Gateway server to Docker or VM a one-liner: - `pip install -U hagrid` +# PySyft In-depth -2. Then run our interactive jupyter Install 🧙🏽‍♂️ WizardBETA: - `hagrid quickstart` +📚 Check out our docs website. -3. In the tutorial you will learn how to install and deploy: - `PySyft` = our `numpy`-like 🐍 Python library for computing on `private data` in someone else's `Domain` +Quick PySyft components links: - `PyGrid` = our 🐳 `docker` / 🐧 `vm` `Domain` & `Gateway` Servers where `private data` lives +- DataSite Server -## Docs and Support +- Syft Client -- 📚 Docs -- `#support` on Slack +- Datasets API (`.datasets`) -# Install Notes +- Users API (`.users`) -- HAGrid 0.3 Requires: 🐍 `python` 🐙 `git` - Run: `pip install -U hagrid` -- Interactive Install 🧙🏽‍♂️ WizardBETA Requires 🛵 `hagrid`: - Run: `hagrid quickstart` -- PySyft 0.8.1 Requires: 🐍 `python 3.10 - 3.12` - Run: `pip install -U syft` -- PyGrid Requires: 🐳 `docker`, 🦦 `podman` or ☸️ `kubernetes` - Run: `hagrid launch ...` + -# Versions +- Request API (`.requests`) -`0.9.0` - Coming soon... -`0.8.6` (Beta) - `dev` branch 👈🏽 API - Coming soon... -`0.8.5` (Stable) - API +- Code API (`.code`) -Deprecated: +- Syft Policies API (`.policy`) -- `0.8.4` - API -- `0.8.3` - API -- `0.8.2` - API -- `0.8.1` - API -- `0.8.0` - API -- `0.7.0` - Course 3 Updated -- `0.6.0` - Course 3 -- `0.5.1` - Course 2 + M1 Hotfix -- `0.2.0` - `0.5.0` +- Settings API (`.settings`) -PySyft and PyGrid use the same `version` and its best to match them up where possible. We release weekly betas which can be used in each context: +- Notifications API (`.notifications`) -PySyft (Stable): `pip install -U syft` -PyGrid (Stable) `hagrid launch ... tag=latest` +- Sync API (`.sync`) -PySyft (Beta): `pip install -U syft --pre` -PyGrid (Beta): `hagrid launch ... tag=beta` +## Why use PySyft? -HAGrid is a cli / deployment tool so the latest version of `hagrid` is usually the best. +In a variety of domains across society, data owners have **valid concerns about the risks associated with sharing their data**, such as legal risks, privacy invasion (_misuing the data_), or intellectual property (_copying and redistributing it_). -# What is Syft? +Datasites enable data scientists to **answer questions** without even seeing or acquiring a copy of the data, **within the data owners's definition of acceptable use**. We call this process Remote Data Science. - - - Syft - +This means that the **current risks** of sharing information with someone will **no longer prevent** the vast benefits such as innovation, insights and scientific discovery. With each Datasite, data owners are able to enable `1000x more accesible data` in each scientific field and lead, together with data scientists, breakthrough innovation. -`Syft` is OpenMined's `open source` stack that provides `secure` and `private` Data Science in Python. Syft decouples `private data` from model training, using techniques like [Federated Learning](https://ai.googleblog.com/2017/04/federated-learning-collaborative.html), [Differential Privacy](https://en.wikipedia.org/wiki/Differential_privacy), and [Encrypted Computation](https://en.wikipedia.org/wiki/Homomorphic_encryption). This is done with a `numpy`-like interface and integration with `Deep Learning` frameworks, so that you as a `Data Scientist` can maintain your current workflow while using these new `privacy-enhancing techniques`. +Learn more about our work on our website. -### Why should I use Syft? +## Support -`Syft` allows a `Data Scientist` to ask `questions` about a `dataset` and, within `privacy limits` set by the `data owner`, get `answers` to those `questions`, all without obtaining a `copy` of the data itself. We call this process `Remote Data Science`. It means in a wide variety of `domains` across society, the current `risks` of sharing information (`copying` data) with someone such as, privacy invasion, IP theft and blackmail will no longer prevent the vast `benefits` such as innovation, insights and scientific discovery which secure access will provide. +For questions about PySyft, reach out via `#support` on Slack. -No more cold calls to get `access` to a dataset. No more weeks of `wait times` to get a `result` on your `query`. It also means `1000x more data` in every domain. PySyft opens the doors to a streamlined Data Scientist `workflow`, all with the individual's `privacy` at its heart. +## Syft Versions - +- `0.9.5` (Stable) - Docs +- Install PySyft (Stable): `pip install -U syft` -# Terminology - - - - - - - - - - - - - - - - - - - -
- -

👨🏻‍💼 Data Owners

-
- -

👩🏽‍🔬 Data Scientists

-
- - -Provide `datasets` which they would like to make available for `study` by an `outside party` they may or may not `fully trust` has good intentions. - - - - -Are end `users` who desire to perform `computations` or `answer` a specific `question` using one or more data owners' `datasets`. - -
- -

🏰 Domain Server

-
- -

🔗 Gateway Server

-
- - -Manages the `remote study` of the data by a `Data Scientist` and allows the `Data Owner` to manage the `data` and control the `privacy guarantees` of the subjects under study. It also acts as a `gatekeeper` for the `Data Scientist's` access to the data to compute and experiment with the results. - - - - -Provides services to a group of `Data Owners` and `Data Scientists`, such as dataset `search` and bulk `project approval` (legal / technical) to participate in a project. A gateway server acts as a bridge between it's members (`Domains`) and their subscribers (`Data Scientists`) and can provide access to a collection of `domains` at once.
+Find more about previous releases here. # Community - - - - - - -
- -
- - - - - +Supported by the OpenMined Foundation, the OpenMined Community is an online network of over 17,000 technologists, researchers, and industry professionals keen to _unlock 1000x more data in every scientific field and industry_. - - -
-
- - - - -
- - - - - - - - -
-
+ # Courses
- +
- +
- +
@@ -369,13 +176,20 @@ Provides services to a group of `Data Owners` and `Data Scientists`, such as dat # Contributors -OpenMined and Syft appreciates all contributors, if you would like to fix a bug or suggest a new feature, please see our [guidelines](https://openmined.github.io/PySyft/developer_guide/index.html).
+OpenMined and Syft appreciates all contributors, if you would like to fix a bug or suggest a new feature, please reach out via Github or Slack! Contributors +# About OpenMined + +OpenMined is a non-profit foundation creating technology infrastructure that helps researchers get answers from data without needing a copy or direct access. Our community of technologists is building Syft. + + + # Supporters @@ -433,19 +247,6 @@ OpenMined and Syft appreciates all contributors, if you would like to fix a bug
-# Open Collective - -`OpenMined` is a fiscally sponsored `501(c)(3)` in the USA. We are funded by our generous supporters on Open Collective.

- - - - Contributors - - -# Disclaimer - -Syft is under active development and is not yet ready for pilots on private data without our assistance. As early access participants, please contact us via [Slack](https://slack.openmined.org/) or email if you would like to ask a question or have a use case that you would like to discuss. - # License [Apache License 2.0](LICENSE)
diff --git a/VERSION b/VERSION index efbbf8e7c62..368606c3066 100644 --- a/VERSION +++ b/VERSION @@ -1,5 +1,5 @@ - # Mono Repo Global Version -__version__ = "0.8.6-beta.1" +# Mono Repo Global Version +__version__ = "0.9.6-beta.6" # elsewhere we can call this file: `python VERSION` and simply take the stdout # stdlib diff --git a/docs/img/personas_image/dataengineer.png b/docs/img 2/personas_image/dataengineer.png similarity index 100% rename from docs/img/personas_image/dataengineer.png rename to docs/img 2/personas_image/dataengineer.png diff --git a/docs/img/personas_image/dataowner.png b/docs/img 2/personas_image/dataowner.png similarity index 100% rename from docs/img/personas_image/dataowner.png rename to docs/img 2/personas_image/dataowner.png diff --git a/docs/img/personas_image/datascientist.png b/docs/img 2/personas_image/datascientist.png similarity index 100% rename from docs/img/personas_image/datascientist.png rename to docs/img 2/personas_image/datascientist.png diff --git a/docs/img/Syft-Logo-Light.svg b/docs/img/Syft-Logo-Light.svg new file mode 100644 index 00000000000..8d1bff88f21 --- /dev/null +++ b/docs/img/Syft-Logo-Light.svg @@ -0,0 +1,126 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/img/Syft-Logo.svg b/docs/img/Syft-Logo.svg new file mode 100644 index 00000000000..24adb15bbf7 --- /dev/null +++ b/docs/img/Syft-Logo.svg @@ -0,0 +1,126 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt index 6f3176dae92..adc37f38d57 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,13 +1,15 @@ -certifi>=2023.7.22 # not directly required, pinned by Snyk to avoid a vulnerability +certifi>=2024.7.4 # not directly required, pinned by Snyk to avoid a vulnerability +idna>=3.7 # not directly required, pinned by Snyk to avoid a vulnerability ipython==8.10.0 -jinja2>=3.1.3 # not directly required, pinned by Snyk to avoid a vulnerability +jinja2>=3.1.4 # not directly required, pinned by Snyk to avoid a vulnerability markupsafe==2.0.1 pydata-sphinx-theme==0.7.2 pygments>=2.15.0 # not directly required, pinned by Snyk to avoid a vulnerability -requests>=2.31.0 # not directly required, pinned by Snyk to avoid a vulnerability -setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability +requests>=2.32.2 # not directly required, pinned by Snyk to avoid a vulnerability +setuptools>=70.0.0 # not directly required, pinned by Snyk to avoid a vulnerability sphinx==4.3.0 sphinx-autoapi==1.8.4 sphinx-code-include==1.1.1 sphinx-copybutton==0.4.0 sphinx-panels==0.6.0 +urllib3>=2.2.2 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/docs/source/_static/install_tutorials/pygrid_ui.png b/docs/source/_static/install_tutorials/pygrid_ui.png deleted file mode 100644 index a31db3d6111..00000000000 Binary files a/docs/source/_static/install_tutorials/pygrid_ui.png and /dev/null differ diff --git a/docs/source/_static/personas-image/data-owner/00-deploy-domain-00.gif b/docs/source/_static/personas-image/data-owner/00-deploy-datasite-00.gif similarity index 100% rename from docs/source/_static/personas-image/data-owner/00-deploy-domain-00.gif rename to docs/source/_static/personas-image/data-owner/00-deploy-datasite-00.gif diff --git a/docs/source/_static/personas-image/data-owner/00-deploy-domain-01.jpg b/docs/source/_static/personas-image/data-owner/00-deploy-datasite-01.jpg similarity index 100% rename from docs/source/_static/personas-image/data-owner/00-deploy-domain-01.jpg rename to docs/source/_static/personas-image/data-owner/00-deploy-datasite-01.jpg diff --git a/docs/source/api_reference/index.rst b/docs/source/api_reference/index.rst index 7d7e85d02a8..d87a01b35e9 100644 --- a/docs/source/api_reference/index.rst +++ b/docs/source/api_reference/index.rst @@ -22,7 +22,7 @@ objects, functions and methods. syft.client syft.external - syft.node + syft.server syft.serde syft.service syft.store diff --git a/docs/source/api_reference/syft.client.api.rst b/docs/source/api_reference/syft.client.api.rst index 9e7889482c6..3022a1c5086 100644 --- a/docs/source/api_reference/syft.client.api.rst +++ b/docs/source/api_reference/syft.client.api.rst @@ -27,7 +27,7 @@ syft.client.api APIEndpoint APIModule APIRegistry - NodeView + ServerView SignedSyftAPICall SyftAPI SyftAPICall diff --git a/docs/source/api_reference/syft.client.connection.rst b/docs/source/api_reference/syft.client.connection.rst index 912c3b50596..139d9eee60e 100644 --- a/docs/source/api_reference/syft.client.connection.rst +++ b/docs/source/api_reference/syft.client.connection.rst @@ -17,7 +17,7 @@ syft.client.connection .. autosummary:: - NodeConnection + ServerConnection diff --git a/docs/source/api_reference/syft.client.registry.rst b/docs/source/api_reference/syft.client.registry.rst index 57f0136d312..9b2987bfb78 100644 --- a/docs/source/api_reference/syft.client.registry.rst +++ b/docs/source/api_reference/syft.client.registry.rst @@ -17,7 +17,7 @@ syft.client.registry .. autosummary:: - DomainRegistry + DatasiteRegistry NetworkRegistry diff --git a/docs/source/api_reference/syft.external.oblv.rst b/docs/source/api_reference/syft.external.oblv.rst deleted file mode 100644 index 18eeb313704..00000000000 --- a/docs/source/api_reference/syft.external.oblv.rst +++ /dev/null @@ -1,82 +0,0 @@ -syft.external.oblv package -========================== - -.. automodule:: syft.external.oblv - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -syft.external.oblv.auth module ------------------------------- - -.. automodule:: syft.external.oblv.auth - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.constants module ------------------------------------ - -.. automodule:: syft.external.oblv.constants - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.deployment module ------------------------------------- - -.. automodule:: syft.external.oblv.deployment - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.deployment\_client module --------------------------------------------- - -.. automodule:: syft.external.oblv.deployment_client - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.exceptions module ------------------------------------- - -.. automodule:: syft.external.oblv.exceptions - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.oblv\_keys module ------------------------------------- - -.. automodule:: syft.external.oblv.oblv_keys - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.oblv\_keys\_stash module -------------------------------------------- - -.. automodule:: syft.external.oblv.oblv_keys_stash - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.oblv\_proxy module -------------------------------------- - -.. automodule:: syft.external.oblv.oblv_proxy - :members: - :undoc-members: - :show-inheritance: - -syft.external.oblv.oblv\_service module ---------------------------------------- - -.. automodule:: syft.external.oblv.oblv_service - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/api_reference/syft.external.rst b/docs/source/api_reference/syft.external.rst index 0a59c9187b3..00110dba1d7 100644 --- a/docs/source/api_reference/syft.external.rst +++ b/docs/source/api_reference/syft.external.rst @@ -25,14 +25,3 @@ - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :recursive: - - syft.external.oblv - diff --git a/docs/source/api_reference/syft.node.credentials.rst b/docs/source/api_reference/syft.node.credentials.rst index a1888491bcc..99e54dd153e 100644 --- a/docs/source/api_reference/syft.node.credentials.rst +++ b/docs/source/api_reference/syft.node.credentials.rst @@ -1,7 +1,7 @@ -syft.node.credentials +syft.server.credentials ===================== -.. automodule:: syft.node.credentials +.. automodule:: syft.server.credentials diff --git a/docs/source/api_reference/syft.node.datasite.rst b/docs/source/api_reference/syft.node.datasite.rst new file mode 100644 index 00000000000..687eb004816 --- /dev/null +++ b/docs/source/api_reference/syft.node.datasite.rst @@ -0,0 +1,29 @@ +syft.server.datasite +================ + +.. automodule:: syft.server.datasite + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + Datasite + + + + + + + + + diff --git a/docs/source/api_reference/syft.node.domain.rst b/docs/source/api_reference/syft.node.domain.rst deleted file mode 100644 index eb4ff6334a5..00000000000 --- a/docs/source/api_reference/syft.node.domain.rst +++ /dev/null @@ -1,29 +0,0 @@ -syft.node.domain -================ - -.. automodule:: syft.node.domain - - - - - - - - - - - - .. rubric:: Classes - - .. autosummary:: - - Domain - - - - - - - - - diff --git a/docs/source/api_reference/syft.node.gateway.rst b/docs/source/api_reference/syft.node.gateway.rst index 410841b9447..8b6d6335fd1 100644 --- a/docs/source/api_reference/syft.node.gateway.rst +++ b/docs/source/api_reference/syft.node.gateway.rst @@ -1,7 +1,7 @@ -syft.node.gateway +syft.server.gateway ================= -.. automodule:: syft.node.gateway +.. automodule:: syft.server.gateway diff --git a/docs/source/api_reference/syft.node.node.rst b/docs/source/api_reference/syft.node.node.rst index fe87e6e5fa6..e3f263f97d0 100644 --- a/docs/source/api_reference/syft.node.node.rst +++ b/docs/source/api_reference/syft.node.node.rst @@ -1,7 +1,7 @@ -syft.node.node +syft.server.server ============== -.. automodule:: syft.node.node +.. automodule:: syft.server.server @@ -14,12 +14,11 @@ syft.node.node .. autosummary:: create_admin_new - create_oblv_key_pair create_worker_metadata get_default_root_email get_default_root_password get_env - get_node_uid_env + get_server_uid_env get_private_key_env gipc_decoder gipc_encoder @@ -36,7 +35,7 @@ syft.node.node .. autosummary:: - Node + Server diff --git a/docs/source/api_reference/syft.node.routes.rst b/docs/source/api_reference/syft.node.routes.rst index a5bce85bd46..926814c5b62 100644 --- a/docs/source/api_reference/syft.node.routes.rst +++ b/docs/source/api_reference/syft.node.routes.rst @@ -1,7 +1,7 @@ -syft.node.routes +syft.server.routes ================ -.. automodule:: syft.node.routes +.. automodule:: syft.server.routes diff --git a/docs/source/api_reference/syft.node.rst b/docs/source/api_reference/syft.node.rst index a94e20d906d..183e2e7f281 100644 --- a/docs/source/api_reference/syft.node.rst +++ b/docs/source/api_reference/syft.node.rst @@ -1,7 +1,7 @@ -syft.node +syft.server ========= -.. automodule:: syft.node +.. automodule:: syft.server @@ -27,13 +27,13 @@ :toctree: :recursive: - syft.node.credentials - syft.node.domain - syft.node.gateway - syft.node.node - syft.node.routes - syft.node.run - syft.node.server - syft.node.worker - syft.node.worker_settings + syft.server.credentials + syft.server.datasite + syft.server.gateway + syft.server.server + syft.server.routes + syft.server.run + syft.server.server + syft.server.worker + syft.server.worker_settings diff --git a/docs/source/api_reference/syft.node.run.rst b/docs/source/api_reference/syft.node.run.rst index 3f8cc943294..588a53322d9 100644 --- a/docs/source/api_reference/syft.node.run.rst +++ b/docs/source/api_reference/syft.node.run.rst @@ -1,7 +1,7 @@ -syft.node.run +syft.server.run ============= -.. automodule:: syft.node.run +.. automodule:: syft.server.run diff --git a/docs/source/api_reference/syft.node.server.rst b/docs/source/api_reference/syft.node.server.rst index efcc7d9642a..1484aee73f4 100644 --- a/docs/source/api_reference/syft.node.server.rst +++ b/docs/source/api_reference/syft.node.server.rst @@ -1,7 +1,7 @@ -syft.node.server +syft.server.server ================ -.. automodule:: syft.node.server +.. automodule:: syft.server.server @@ -17,7 +17,7 @@ syft.node.server kill_process make_app run_uvicorn - serve_node + serve_server diff --git a/docs/source/api_reference/syft.node.worker.rst b/docs/source/api_reference/syft.node.worker.rst index 510732d7c39..f2da3a94e81 100644 --- a/docs/source/api_reference/syft.node.worker.rst +++ b/docs/source/api_reference/syft.node.worker.rst @@ -1,7 +1,7 @@ -syft.node.worker +syft.server.worker ================ -.. automodule:: syft.node.worker +.. automodule:: syft.server.worker diff --git a/docs/source/api_reference/syft.node.worker_settings.rst b/docs/source/api_reference/syft.node.worker_settings.rst index 0b5521ddaef..a9880847ec0 100644 --- a/docs/source/api_reference/syft.node.worker_settings.rst +++ b/docs/source/api_reference/syft.node.worker_settings.rst @@ -1,7 +1,7 @@ -syft.node.worker\_settings +syft.server.worker\_settings ========================== -.. automodule:: syft.node.worker_settings +.. automodule:: syft.server.worker_settings diff --git a/docs/source/api_reference/syft.rst b/docs/source/api_reference/syft.rst index f2bf2008e6c..0e930b08dce 100644 --- a/docs/source/api_reference/syft.rst +++ b/docs/source/api_reference/syft.rst @@ -15,7 +15,7 @@ Subpackages syft.capnp syft.client syft.external - syft.node + syft.server syft.serde syft.service syft.store @@ -25,18 +25,11 @@ Subpackages Submodules ---------- -syft.abstract\_node module +syft.abstract\_server module -------------------------- -.. automodule:: syft.abstract_node +.. automodule:: syft.abstract_server :members: :undoc-members: :show-inheritance: -syft.gevent\_patch module -------------------------- - -.. automodule:: syft.gevent_patch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/api_reference/syft.service.action.action_object.rst b/docs/source/api_reference/syft.service.action.action_object.rst index 014e584d460..89424403841 100644 --- a/docs/source/api_reference/syft.service.action.action_object.rst +++ b/docs/source/api_reference/syft.service.action.action_object.rst @@ -18,7 +18,7 @@ syft.service.action.action\_object has_action_data_empty is_action_data_empty make_action_side_effect - propagate_node_uid + propagate_server_uid send_action_side_effect diff --git a/docs/source/api_reference/syft.service.context.rst b/docs/source/api_reference/syft.service.context.rst index 276c578330b..990de2487d1 100644 --- a/docs/source/api_reference/syft.service.context.rst +++ b/docs/source/api_reference/syft.service.context.rst @@ -19,7 +19,7 @@ syft.service.context AuthedServiceContext ChangeContext - NodeServiceContext + ServerServiceContext UnauthedServiceContext diff --git a/docs/source/api_reference/syft.service.policy.policy.rst b/docs/source/api_reference/syft.service.policy.policy.rst index 74b52aa14cb..4e6cb3820dc 100644 --- a/docs/source/api_reference/syft.service.policy.policy.rst +++ b/docs/source/api_reference/syft.service.policy.policy.rst @@ -29,7 +29,7 @@ syft.service.policy.policy init_policy load_policy_code new_getfile - partition_by_node + partition_by_server process_class_code retrieve_from_db submit_policy_code_to_user_code diff --git a/docs/source/api_reference/syft.store.dict_document_store.rst b/docs/source/api_reference/syft.store.dict_document_store.rst deleted file mode 100644 index 0d297482041..00000000000 --- a/docs/source/api_reference/syft.store.dict_document_store.rst +++ /dev/null @@ -1,32 +0,0 @@ -syft.store.dict\_document\_store -================================ - -.. automodule:: syft.store.dict_document_store - - - - - - - - - - - - .. rubric:: Classes - - .. autosummary:: - - DictBackingStore - DictDocumentStore - DictStoreConfig - DictStorePartition - - - - - - - - - diff --git a/docs/source/api_reference/syft.store.kv_document_store.rst b/docs/source/api_reference/syft.store.kv_document_store.rst deleted file mode 100644 index a5d91177e60..00000000000 --- a/docs/source/api_reference/syft.store.kv_document_store.rst +++ /dev/null @@ -1,31 +0,0 @@ -syft.store.kv\_document\_store -============================== - -.. automodule:: syft.store.kv_document_store - - - - - - - - - - - - .. rubric:: Classes - - .. autosummary:: - - KeyValueBackingStore - KeyValueStorePartition - UniqueKeyCheck - - - - - - - - - diff --git a/docs/source/api_reference/syft.store.mongo_client.rst b/docs/source/api_reference/syft.store.mongo_client.rst deleted file mode 100644 index a21d43700aa..00000000000 --- a/docs/source/api_reference/syft.store.mongo_client.rst +++ /dev/null @@ -1,31 +0,0 @@ -syft.store.mongo\_client -======================== - -.. automodule:: syft.store.mongo_client - - - - - - - - - - - - .. rubric:: Classes - - .. autosummary:: - - MongoClient - MongoClientCache - MongoStoreClientConfig - - - - - - - - - diff --git a/docs/source/api_reference/syft.store.mongo_codecs.rst b/docs/source/api_reference/syft.store.mongo_codecs.rst deleted file mode 100644 index 1d91b779e95..00000000000 --- a/docs/source/api_reference/syft.store.mongo_codecs.rst +++ /dev/null @@ -1,35 +0,0 @@ -syft.store.mongo\_codecs -======================== - -.. automodule:: syft.store.mongo_codecs - - - - - - - - .. rubric:: Functions - - .. autosummary:: - - fallback_syft_encoder - - - - - - .. rubric:: Classes - - .. autosummary:: - - SyftMongoBinaryDecoder - - - - - - - - - diff --git a/docs/source/api_reference/syft.store.mongo_document_store.rst b/docs/source/api_reference/syft.store.mongo_document_store.rst deleted file mode 100644 index 30fdb6bc6ca..00000000000 --- a/docs/source/api_reference/syft.store.mongo_document_store.rst +++ /dev/null @@ -1,40 +0,0 @@ -syft.store.mongo\_document\_store -================================= - -.. automodule:: syft.store.mongo_document_store - - - - - - - - .. rubric:: Functions - - .. autosummary:: - - from_mongo - syft_obj_to_mongo - to_mongo - - - - - - .. rubric:: Classes - - .. autosummary:: - - MongoBsonObject - MongoDocumentStore - MongoStoreConfig - MongoStorePartition - - - - - - - - - diff --git a/docs/source/api_reference/syft.store.rst b/docs/source/api_reference/syft.store.rst index b21cf230488..e83e8699025 100644 --- a/docs/source/api_reference/syft.store.rst +++ b/docs/source/api_reference/syft.store.rst @@ -32,8 +32,5 @@ syft.store.kv_document_store syft.store.linked_obj syft.store.locks - syft.store.mongo_client - syft.store.mongo_codecs - syft.store.mongo_document_store syft.store.sqlite_document_store diff --git a/docs/source/api_reference/syft.store.sqlite_document_store.rst b/docs/source/api_reference/syft.store.sqlite_document_store.rst deleted file mode 100644 index cdc7ad4f4f0..00000000000 --- a/docs/source/api_reference/syft.store.sqlite_document_store.rst +++ /dev/null @@ -1,39 +0,0 @@ -syft.store.sqlite\_document\_store -================================== - -.. automodule:: syft.store.sqlite_document_store - - - - - - - - .. rubric:: Functions - - .. autosummary:: - - thread_ident - - - - - - .. rubric:: Classes - - .. autosummary:: - - SQLiteBackingStore - SQLiteDocumentStore - SQLiteStoreClientConfig - SQLiteStoreConfig - SQLiteStorePartition - - - - - - - - - diff --git a/docs/source/api_reference/syft.types.grid_url.rst b/docs/source/api_reference/syft.types.grid_url.rst deleted file mode 100644 index 123f9f34af6..00000000000 --- a/docs/source/api_reference/syft.types.grid_url.rst +++ /dev/null @@ -1,29 +0,0 @@ -syft.types.grid\_url -==================== - -.. automodule:: syft.types.grid_url - - - - - - - - - - - - .. rubric:: Classes - - .. autosummary:: - - GridURL - - - - - - - - - diff --git a/docs/source/api_reference/syft.types.rst b/docs/source/api_reference/syft.types.rst index 67c05d63fd2..646214ca364 100644 --- a/docs/source/api_reference/syft.types.rst +++ b/docs/source/api_reference/syft.types.rst @@ -29,7 +29,7 @@ syft.types.base syft.types.datetime - syft.types.grid_url + syft.types.server_url syft.types.syft_metaclass syft.types.syft_object syft.types.transforms diff --git a/docs/source/api_reference/syft.types.server_url.rst b/docs/source/api_reference/syft.types.server_url.rst new file mode 100644 index 00000000000..c9ad2ab0625 --- /dev/null +++ b/docs/source/api_reference/syft.types.server_url.rst @@ -0,0 +1,29 @@ +syft.types.server\_url +==================== + +.. automodule:: syft.types.server_url + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + ServerURL + + + + + + + + + diff --git a/docs/source/api_reference/syft.types.transforms.rst b/docs/source/api_reference/syft.types.transforms.rst index 374052d54f7..ef575c10e7b 100644 --- a/docs/source/api_reference/syft.types.transforms.rst +++ b/docs/source/api_reference/syft.types.transforms.rst @@ -14,7 +14,7 @@ syft.types.transforms .. autosummary:: add_credentials_for_key - add_node_uid_for_key + add_server_uid_for_key convert_types drop generate_id diff --git a/docs/source/deployment/glossary.rst b/docs/source/deployment/glossary.rst index 1cfa7688e9c..6e5b2bf95e0 100644 --- a/docs/source/deployment/glossary.rst +++ b/docs/source/deployment/glossary.rst @@ -12,20 +12,20 @@ General terms Data Consortium ~~~~~~~~~~~~~~~~~~~~~ -A legal agreement under which multiple data owners delegate legal authority (IRB authority) to a central party, such that a data scientist need only enter into legal contract with that central party in order to perform analysis across all relevant participating domains in the ``data consortium``. +A legal agreement under which multiple data owners delegate legal authority (IRB authority) to a central party, such that a data scientist need only enter into legal contract with that central party in order to perform analysis across all relevant participating datasites in the ``data consortium``. Differential Privacy ~~~~~~~~~~~~~~~~~~~~~ While the textbook definition can be found here_, within the context of remote data science, ``differential privacy`` is a set of algorithms which empower a data owner to limit the probability that a data scientist will be able to use their statistical results to reverse engineer the data owner's def. of too much information about the underlying data that generated those results. In a nutshell, its aim is to prevent a Data Scientist from identifying any individual from the dataset through reverse-engineering. -Domain Node +Datasite Server ~~~~~~~~~~~~~~~~~~~~~ A ``computer system`` (or collection of computer systems) which manages the remote study of a data owner's data by a data scientist. It is responsible for allowing the `Data Owner` to manage the data, as well as incoming ``requests`` from data scientists and for gatekeeping the data scientist's access to data, compute, and experimental results stored within the data owner's compute infrastructure. -Network Node +Gateway Server ~~~~~~~~~~~~~~~~~~~~~ -A server which exists outside of any data owner's institution, providing services to the network of data owners and data scientists such as dataset search and bulk project approval (simultaneous legal/technical approval to participate in a project across groups of domains and data scientists at a time). A Network acts as a bridge between between its members and subscribers. The members are ``Domains`` while subscribers are the ``end users`` (e.g. Data Scientist) who explore and perform analysis on the datasets hosted by the members. -A network is used to provide access to a collection of domains at once i.e. if a user agrees to a ``Network Agreement``, then they automatically agree to the conditions to the Domains enlisted in that Network. +A server which exists outside of any data owner's institution, providing services to the network of data owners and data scientists such as dataset search and bulk project approval (simultaneous legal/technical approval to participate in a project across groups of datasites and data scientists at a time). A Network acts as a bridge between between its members and subscribers. The members are ``Datasites`` while subscribers are the ``end users`` (e.g. Data Scientist) who explore and perform analysis on the datasets hosted by the members. +A network is used to provide access to a collection of datasites at once i.e. if a user agrees to a ``Network Agreement``, then they automatically agree to the conditions to the Datasites enlisted in that Network. Privacy Budget ~~~~~~~~~~~~~~~~~~~~~ @@ -39,13 +39,9 @@ PySyft ~~~~~~~~~~~~~~~~~~~~~ An open-source platform that enables remote data science experiments by combining ``federated learning`` and ``differentialy privacy`` techniques. -PyGrid +Syft Server ~~~~~~~~~~~~~~~~~~~~~ -``PyGrid`` is a ``peer-to-peer network`` of data owners and data scientists who can collectively train AI models using ``PySyft``. ``PyGrid`` is also the central server for conducting both model-centric and data-centric ``federated learning``. You may control PyGrid via our user-interface, ``PyGrid Admin``. - -HaGrid -~~~~~~~~~~~~~~~~~~~~~ -``Hagrid`` (HAppy GRID!) is a ``command-line tool`` that speeds up the deployment of ``PyGrid``, the software providing a peer-to-peer network of data owners and data scientists who can collectively train models. +``Syft Server`` is also the central server for conducting both model-centric and data-centric ``federated learning``. You may control Syft Server via our user-interface, ``Syft UI``. Remote Data Science ~~~~~~~~~~~~~~~~~~~~~ @@ -58,25 +54,25 @@ Data Owner ~~~~~~~~~~~~~~~~~~~~~ Within the field of remote data science, a data owner is someone who has a (digital) dataset which they would like to make available for study by an outside party whom they may or may not fully trust to have good intentions. -Domain Owner +Datasite Owner ~~~~~~~~~~~~~~~~~~~~~ -A user of ``PyGrid`` who has deployed a domain node. +A user of ``Syft Server`` who has deployed a datasite server. Network Owner ~~~~~~~~~~~~~~~~~~~~~ -Within the field of remote data science, a network owner provides technical and legal services helping to connect data scientists with data owners (domains) by helping them find each other (dataset search) and by helping them enter into bulk legal agreements through the hosting of a network-level data consortium to which such data owners and data scientist may apply. +Within the field of remote data science, a network owner provides technical and legal services helping to connect data scientists with data owners (datasites) by helping them find each other (dataset search) and by helping them enter into bulk legal agreements through the hosting of a network-level data consortium to which such data owners and data scientist may apply. Data Scientist ~~~~~~~~~~~~~~~~~~~~~ -Within the context of remote data science, a data scientist is a persona which desires to answer a specific question using data owned by someone else. This user is required to sign a ``Data Access Agreement`` if you have required one in the ``Domain Settings Configurations``. +Within the context of remote data science, a data scientist is a persona which desires to answer a specific question using data owned by someone else. This user is required to sign a ``Data Access Agreement`` if you have required one in the ``Datasite Settings Configurations``. -Domain Compliance Officer +Datasite Compliance Officer ~~~~~~~~~~~~~~~~~~~~~~~~~~~ All the personas in an institution that are in charge of making sure that the utilization of data at an institution occurs within legal boundaries and under their supervision and with their liability/responsibility. Network Compliance Officer ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -All the personas in an institution that are in charge of making sure that the access and utilization of data between their network's domains and members (data scientists) fall within the bounds outlined in the network's legal agreements. +All the personas in an institution that are in charge of making sure that the access and utilization of data between their network's datasites and members (data scientists) fall within the bounds outlined in the network's legal agreements. User roles ============ @@ -86,7 +82,7 @@ Default roles Data Scientist """""""""""""" -This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your domain through search and discovery. By default this user can see a ``list of your datasets`` and can request to get results. This user will also be required to sign a ``Data Access Agreement`` if you have required one in the ``Domain Settings Configurations``. +This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your datasite through search and discovery. By default this user can see a ``list of your datasets`` and can request to get results. This user will also be required to sign a ``Data Access Agreement`` if you have required one in the ``Datasite Settings Configurations``. Default permissions: @@ -94,7 +90,7 @@ Default permissions: Compliance Officer """""""""""""""""""" -This role is for users who will help you manage requests made on your node. They should be users you trust. They are not able to change ``Domain Settings`` or edit roles but they are by default able to accept or deny ``user requests`` on behalf of the ``domain node``. +This role is for users who will help you manage requests made on your server. They should be users you trust. They are not able to change ``Datasite Settings`` or edit roles but they are by default able to accept or deny ``user requests`` on behalf of the ``datasite server``. Default permissions: @@ -104,7 +100,7 @@ Default permissions: Admin """""" -This role is for users who will help you manage your node. This should be users you trust. The main difference between this ``user`` and a ``Compliance Officer`` is that this user by default not only can manage requests but can also edit ``Domain Settings.`` This is the highest level permission outside of an Owner. +This role is for users who will help you manage your server. This should be users you trust. The main difference between this ``user`` and a ``Compliance Officer`` is that this user by default not only can manage requests but can also edit ``Datasite Settings.`` This is the highest level permission outside of an Owner. Default permissions: @@ -112,7 +108,7 @@ Default permissions: Owner """""""" -There is only one Owner account assigned to any one domain node. The owner account is the highest level permission and is a requirement for deploying a domain node. If you should ever want to transfer ownership of your domain node to someone, please contact us at support@openmined.org. +There is only one Owner account assigned to any one datasite server. The owner account is the highest level permission and is a requirement for deploying a datasite server. If you should ever want to transfer ownership of your datasite server to someone, please contact us at support@openmined.org. Default permissions: @@ -120,16 +116,16 @@ Default permissions: * Cannot disable permissions by default -Domain membership roles +Datasite membership roles ~~~~~~~~~~~~~~~~~~~~~~~~~~ Guest """""""""""""" -The lowest level of ``network membership``, a guest domain is listed within a network node's registry and its datasets are searchable/discoverable by all users of the network, but the network has no legal relationship to the domain nor any authority to grant data scientists access to its data. As such, upon discovering a domain on the network, such a data scientist must apply directly to the domain for access by creating an account on such a domain and signing a legal agreement (a "data-sharing agreement") directly with its corresponding data owner. +The lowest level of ``network membership``, a guest datasite is listed within a network server's registry and its datasets are searchable/discoverable by all users of the network, but the network has no legal relationship to the datasite nor any authority to grant data scientists access to its data. As such, upon discovering a datasite on the network, such a data scientist must apply directly to the datasite for access by creating an account on such a datasite and signing a legal agreement (a "data-sharing agreement") directly with its corresponding data owner. Member """""""""""""" -The highest level of ``network membership``, a full domain member is greater than a guest member because, beyond its listing within a network node's registry, the domain has entered into a legal relationship with the network owner such that the network owner can unilaterally give its full data scientists access to data hosted by the domain. Note that this does not mean that the network can control access to all potential users of the ``registered domain``, because the domain's membership in the network is non-exclusive (domains can register in multiple networks and also accept direct data-scientist users on the side). A network node only has authority to give its own full data scientists access to any full domain within its registry. +The highest level of ``network membership``, a full datasite member is greater than a guest member because, beyond its listing within a network server's registry, the datasite has entered into a legal relationship with the network owner such that the network owner can unilaterally give its full data scientists access to data hosted by the datasite. Note that this does not mean that the network can control access to all potential users of the ``registered datasite``, because the datasite's membership in the network is non-exclusive (datasites can register in multiple networks and also accept direct data-scientist users on the side). A network server only has authority to give its own full data scientists access to any full datasite within its registry. .. |image0| image:: ../_static/deployment/image2.png :width: 95% diff --git a/docs/source/deployment/index.rst b/docs/source/deployment/index.rst deleted file mode 100644 index 461ecb3734e..00000000000 --- a/docs/source/deployment/index.rst +++ /dev/null @@ -1,673 +0,0 @@ -.. _advanced_deployment: - -=========================================== -Advanced Deployment: Introduction to HaGrid -=========================================== - -.. toctree:: - :maxdepth: 3 - -Hagrid (HAppy GRID!) is a command-line tool that speeds up the -deployment of PyGrid, the software providing a peer-to-peer network of -data owners and data scientists who can collectively train AI models -using `PySyft `__. - -Hagrid is able to orchestrate a collection of PyGrid Domain and Network -nodes and scale them in a local development environment (based on a -docker-compose file). By stacking multiple copies of this docker, you -can simulate multiple entities (e.g countries) that collaborate over -data and experiment with more complicated data flows such as SMPC. - -Similarly to the local deployment, Hagrid can bootstrap docker on a -Vagrant VM or on a cloud VM, helping you deploy in an user-friendly way -on Azure, AWS\* and GCP*. - -*\* Deploying to AWS and GCP is still under development.* - -Working with Hagrid & Syft API versions: - -- **Development mode:** - You can experiment with your own local checked-out version of Syft - and bootstrap a local Jupyter Notebook where you can use the Syft - & Grid API to communicate with a prod/local dev system\ *.* - -- **Production mode:** - You can specify the branch and repository you want to fork (including your own fork) and Hagrid will monitor those branches in a cron job, pull new changes and restart the services to apply them, therefore your deployed system will always stay up to date. - -Prerequisites -=============== - -The following operating systems are currently supported: Linux, Windows, MacOS. Please ensure you have at least 8GB of ram if you intend to run Hagrid locally. - -Setting up virtual environment using Python 3.9 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. Ensure using **Python3.8+**, which should be first installed in your system. To easily handle further dependencies, we suggest using conda: - - a. Install conda `following these instructions `_ depending on your OS. - - b. Create a new env specifying the Python version (we recommend Python 3.8/3.9) in the terminal: - - .. code-block:: bash - - $ conda create -n myenv python=3.9 - $ conda activate myenv - (to exit): conda deactivate - -Using latest pip -~~~~~~~~~~~~~~~~~ - -**Pip** is required to install dependencies, so make sure you have it installed and up-to-date by running the following these `instructions `__. - -If you have it installed, please check it is the latest version: - -.. code-block:: bash - - $ pip install --upgrade pip && pip -V (Linux) - $ python -m pip install --upgrade pip (for Windows) - - -Install Jupyter Notebook -~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. A very convenient way to interact with a deployed node is via Python, using a Jupyter Notebook. You can install it by running: - - .. code-block:: bash - - $ pip install jupyter-notebook - -2. If you encounter issues, you can also install it using Conda: - - .. code-block:: bash - - $ conda install -c conda-forge notebook - -3. To launch the Jupyter Notebook, you can run the following in your terminal: - - .. code-block:: bash - - $ jupyter notebook - -Installing and configuring Docker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. Install **Docker** and **Docker Composite V2,** which is needed to orchestrate docker, as explained below: - - For **Linux**: - - a. Install **Docker**: - - .. code-block:: bash - - $ sudo apt-get upgrade docker & docker run hello-world - - b. Install **Docker Composite V2** as described `here `__. - - c. You should see ‘Docker Compose version v2’ when running: - - .. code-block:: bash - - $ docker compose version - - d. If not, go through the `instructions here `__ or if you are using Linux, you can try to do: - - .. code-block:: bash - - $ mkdir -p ~/.docker/cli-plugins - $ curl -sSL https://github.com/docker/compose-cli/releases/download/v2.0.0-beta.5/docker-compose-linux-amd64 -o ~/.docker/cli-plugins/docker-compose - $ chmod +x ~/.docker/cli-plugins/docker-compose - - e. Also, make sure you can run without sudo: - - .. code-block:: bash - - $ echo $USER //(should return your username) - $ sudo usermod -aG docker $USER - - - For **Windows**, **MacOs**: - - a. You can install Desktop Docker as explained `here for Windows `_ or `here for MacOS `_. - - b. The ``docker-compose`` should be enabled by default. If you encounter issues, you can check it by: - - - Go to the Docker menu, click ``Preferences (Settings on Windows)`` > ``Experimental features``. - - - Make sure the Use ``Docker Compose V2`` box is checked. - - c. Ensure at least 8GB of RAM are allocated in the Desktop Docker app: - - - Go to 'Preferences' -> 'Resources' - - - Drag the 'Memory' dot until it says at least 8.00GB - - - Click 'Apply & Restart' - -2. Make sure you are using the **dev** branch of the PySyft repository (branch can be found `here `__) - - -Explore locally with the PySyft API -==================================== - -1. Install **tox**: - - .. code-block:: bash - - $ pip install tox - -2. Move to the correct branch in the PySyft repository: - - .. code-block:: bash - - $ git checkout dev - -3. Check current tasks that can be run by tox: - - .. code-block:: bash - - $ tox -l - -4. Open an editable Jupyter Notebook which doesn't require to run in a container: - - .. code-block:: bash - - $ tox -e syft.jupyter - - -Local deployment using Docker -==================================== - -1. Install Hagrid: - - .. code-block:: bash - - $ pip install -U hagrid - -2. Launch a Domain Node: - - .. code-block:: bash - - $ hagrid launch domain - - - .. note:: - - First run **it might take ~5-10 mins** to build the PyGrid docker image. Afterwards, you should see something like: - - .. code-block:: bash - - Launching a domaing PyGrid node on port 8081 ! - - - TYPE: domain - - NAME: mystifying_wolf - - TAG: 035c3b6a378a50f78cd74fc641d863c7 - - PORT: 8081 - - DOCKER: v2.2.3 - - Optionally, you can provide here additional args to use a certain repository and branch, as: - - .. code-block:: bash - - $ hagrid launch domain --repo $REPO --branch $BRANCH - -3. Go to ``localhost:port/login`` in your browser (using the port specified in your CLI, here *8081*) to see the PyGrid Admin UI where you, as a data owner, can manage your PyGrid deployment. - - a. Log in using the following credentials: - - .. code-block:: python - - info@openmined.org - - changethis - - - b. Explore the interface or you can even do requests via `Postman `__. You can check all the available endpoints at http://localhost:8081/api/v1/openapi.json/ and have all the following environment variables set (a more detailed explanationcan be found in `this video section `__): - - |image0| - - The auth token can be obtained by doing a login request as follows: - - |image1| - -4. While the Domain Node is online, you can start a Jupyter Notebook as described `above <#explore-locally-with-the-pysyft-api-no-containers-involved>`__ to use PySyft to communicate to it in a Python client rather than a REST API. Connecting to it can be done as following: - - .. code-block:: python - - import syft as sy - - domain = sy.login(email='info@openmined.org', password='changethis', port=8081) - - domain.store - - domain.requests - - Domain.users - -5. To stop the node, run: - - .. code-block:: bash - - $ hagrid land --tag=035c3b6a378a50f78cd74fc641d863c7 (using the TAG specified in your CLI) - - -Local deployment using Vagrant and VirtualBox -=============================================== - -This is particularly useful to experiment with the Ansible scripts to test new changes. - -1. Run hagrid status and ensure all dependencies are checked to make sure you have Vagrant and VirtualBox installed. - - |image2| - -2. For installing Vagrant, check the `instructions here. `__ - -3. Additionally to Vagrant, we need to install a plugin called landrush that allows using a custom DNS that points to the IP address used in the VM: - - .. code-block:: bash - - $ vagrant plugin install landrush - -3. Move to the correct branch and directory in the PySyft repository: - - .. code-block:: bash - - $ git checkout 0.6.0 - $ cd packages/grid - - -4. Create the environment using vagrant for the first time: - - .. code-block:: bash - - $ vagrant init - $ vagrant up - - - When the VM is booted up, it starts the docker service and then the docker service starts all the containers as configured. As it is just created, provisioning is always **run** automatically\ **.** - - When deploying locally, the tasks listed in ‘main.yml’ for the node are not being run. Therefore, it does not have to do the lengthy - setup every time (installing docker, cloning PySyft and launching the cronjob to reload PySyft). - - .. note:: The tasks for the containers and nodes respectively can be found in \*.yml files defined in ``packages/grid/ansible/roles/containers`` and ``packages/grid/ansible/roles/nodes`` - -5. If you intend to run it frequently and not only once, either run ``vagrant status`` to see if the env has already been created and if yes, to ``run vagrant up --provision`` every time to launch the provisioners, otherwise it is just resuming the existing machine. - -6. To access the VM via SSh and jump to the user we are creating in vagrant: - - .. code-block:: bash - - $ vagrant ssh - $ sudo su -om - $ whoami # should return 'om' - -8. You can go to ``http://10.0.1.2/login`` which is at port 80 to access the PyGrid Admin UI, which you can explore, query via Postman or in a - local Jupyter Notebook using a Python client as described in `steps 3 and 4 here <#local-deployment-using-docker>`__. - -9. To shut down the machine currently managed by Vagrant, you can run the following after exiting this node shell: - - .. code-block:: bash - - $ vagrant halt - -10. Or alternatively to destroy it using: - - .. code-block:: bash - - $ vagrant destroy - - -Deploying on Kubernetes -======================== - -We provide an option to deploy the stack using kubernetes. To test and run this locally we use ``minikube`` and ``devspace``. - -These are the prerequisites needed further, which are explained step-by-step below: - -* docker -* hyperkit -* minikube -* devspace -* kubectl -* kubectx - -MacOS -~~~~~ - -* **Hyperkit** - -Ingress is not working on Mac and Docker and the issue is `being tracked here `_. Until then we will use the ``hyperkit`` backend. - -#. Install hyperkit by running: - -.. code-block:: bash - - $ brew install hyperkit - - -* **Docker** - -#. See above about using ``hyperkit`` on Mac until the ingress issue is fixed. - -#. We will be using Docker - however you do not need to ``enable kubernetes`` in your Docker Desktop App. If it is enabled, disable it and click `Apply & Restart`. - -#. This is because we will use ``minikube`` which will create and manage all the k8s resources we require as a normal container in docker engine. We install it by running: - -.. code-block:: bash - - $ brew install minikube - - - -* **Minikube** - -1. ``minikube`` is a mini master k8s node that you can run on your local machine in a similar manner to Docker. To use minikube you need it to be running: - -.. code-block:: bash - - $ minikube config set driver hyperkit - $ minikube start --disk-size=40g - $ minikube addons enable ingress - -2. If you ever need to reset ``minikube`` you can do: - -.. code-block:: bash - - $ minikube delete --all --purge - -3. Once ``minikube`` is running, you should see the container in Docker by running: - -.. code-block:: bash - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 57f73851bf08 gcr.io/k8s-minikube/kicbase:v0.0.25 "/usr/local/bin/entr…" 46 hours ago Up About a minute 127.0.0.1:57954->22/tcp, 127.0.0.1:57955->2376/tcp, 127.0.0.1:57957->5000/tcp, 127.0.0.1:57958->8443/tcp, 127.0.0.1:57956->32443/tcp minikube - - - -* **Kubectl** - -``kubectl`` is the CLI tool for kubernetes. If you have ran ``minikube``, it should have configured your kubectl to point to the local minikube cluster by default. - -You should be able to see this if you run the following command: - -.. code-block:: bash - - $ kubectl get all - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/kubernetes ClusterIP 10.96.0.1 443/TCP 45h - -* **k8s Namespaces** - -To understand the usage of ``k8s Namespaces``, think of a namespace as a grouping of resources and permissions which lets you easily create and destroy everything related to a single keyword. - -.. code-block:: bash - - $ kubectl get namespaces - NAME STATUS AGE - default Active 45h - kube-node-lease Active 45h - kube-public Active 45h - kube-system Active 45h - kubernetes-dashboard Active 45h - -All k8s have a default namespace and the other ones here are from kubernetes and minikube. - -We will use the namespace ``openmined`` to make it clear what belongs to the Grid stack and what is something else. To create it, we can run: - -.. code-block:: bash - - $ kubectl create namespace openmined - -.. code-block:: bash - - $ kubectl get all -n openmined - No resources found in openmined namespace. - - -* **Kubectx** - -``kubectx`` is a package of helpful utilities which can help you do things like set a default namespace. - -.. code-block:: bash - - $ brew install kubectx - -Now we can use a tool like ``kubens`` to change the default namespace to openmined. - -.. code-block:: bash - - $ kubens openmined - Context "minikube" modified. - Active namespace is "openmined". - -Now when we use commands without `-n` we get openmined by default. - -.. code-block:: bash - - $ kubectl get all - No resources found in openmined namespace. - -* **Helm Charts** - -The most popular way to deploy applications to k8s is with a tool called Helm. What helm aims to do is to provide another layer of abstraction over kubernetes yaml configuration with hierarchical variables, templates and a package definition which can be hosted over HTTP allowing custom applications to depend on other prefabricated helm charts or to provide consumable packages of your code as a helm chart itself. - -* **devspace** - -To make development and deployment of our kubernetes code easier, we use a tool called ``devspace`` which aims to be like a hot reloading dev optimised version of `docker compose` but for kubernetes. More documentation can be `found here `_. - -Additionally ``devspace`` allows us to deploy using helm by auto-generating the values and charts from the ``devspace.yaml`` which means the single source of truth can be created which includes both production helm charts and kubernetes yaml configuration as well as local dev overrides. - -.. code-block:: bash - - $ brew install devspace - - -Deploy to local dev -~~~~~~~~~~~~~~~~~~~ - -1. Check that you have the right namespace: - -.. code-block:: bash - - $ devspace list namespaces - Name Default Exists - default false true - kube-node-lease false true - kube-public false true - kube-system false true - kubernetes-dashboard false true - openmined *true* true - -2. Run the ``dev`` command with ``devspace``: - -* To run a network with headscale VPN: - -.. code-block:: bash - - $ cd packages/grid - $ devspace dev -b -p network - -* To run a domain without the headscale VPN: - -.. code-block:: bash - - $ cd packages/grid - $ devspace dev -b -p domain - -3. Connect VPN in dev: - -You can run the connect VPN settings using all the opened ports with: - -.. code-block:: bash - - $ cd packages/grid - $ python3 vpn/connect_vpn.py http://localhost:8088 http://localhost:8087 http://headscale:8080 - -4. Destroy the local deployment - -.. code-block:: bash - - $ devspace purge - -5. Delete persistent volumes - -The database and the VPN containers have persistent volumes. - -* You can check them with: - -.. code-block:: bash - - $ kubectl get persistentvolumeclaim - -* Then delete PostgreSQL as it follows: - -.. code-block:: bash - - $ kubectl delete persistentvolumeclaim app-db-data-db-0 - -6. Check which images / tags are being used - -This will show all the unique images and their tags currently deployed which is useful -when debugging which version is actually running in the cluster. - -.. code-block:: bash - - $ kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort | uniq -c - - -7. Restart a container / pod / deployment - -* To get all the deployments: - -.. code-block:: bash - - $ kubectl get deployments - NAME READY UP-TO-DATE AVAILABLE AGE - backend 1/1 1 1 18m - backend-stream 1/1 1 1 18m - backend-worker 1/1 1 1 18m - frontend 1/1 1 1 18m - queue 1/1 1 1 19m - -* Restart the backend-worker - -.. code-block:: bash - - $ kubectl rollout restart deployment backend-worker - - -Deploy to Google Kubernetes Engine (GKE) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1.Configure kubectl context with GKE: - -.. code-block:: bash - - $ gcloud container clusters get-credentials --region us-central1-c staging-cluster-1 - -2. Check that you have the correct context - -.. code-block:: bash - - $ kubectx - -3. Configure your Google Container Registry (GCR): - -.. code-block:: bash - - $ gcloud auth configure-docker - -4. Check your settings with print - -.. code-block:: bash - - $ devspace print -p domain --var=CONTAINER_REGISTRY=gcr.io/reflected-space-315806/ - -5. You should see that you are creating a domain and that the container registry variable changes the image name to: - -.. code-block:: bash - - images: - backend: - image: gcr.io/reflected-space-315806/openmined/grid-backend - -.. note:: This will tell ``devspace`` to publish to the GCR for your active GCP project. - -6. Create the openmined namespace - -.. code-block:: bash - - $ kubectl create namespace openmined - -7. Tell devspace to use the openmined namespace - -.. code-block:: bash - - $ devspace use namespace openmined - -8. Deploy to GKE: - -.. code-block:: bash - - $ devspace deploy -p domain --var=CONTAINER_REGISTRY=gcr.io/reflected-space-315806/ - -9. Access a container directly: - -.. code-block:: bash - - $ devspace enter - -10. Attach to container stdout: - -.. code-block:: bash - - $ devspace attach - -11. Use port forwarding to access an internal service: - -.. code-block:: bash - - $ kubectl port-forward deployment/tailscale :4000 - - -Deploying to Azure -==================================== - -1. Get your virtual machine on Azure ready - - a. To create one, you can either go to `portal.azure.com `__ or use `this 1-click template `__ available off-the-shelves. - - b. If you proceed to create it yourself, make sure you respect the following: - - i. Use ``Ubuntu Server 20.04`` or newer - - ii. Select ``SSH``, ``HTTP``, ``HTTPS`` as inbound ports - - iii. Have at least ``2x CPU``, ``4GB RAM``, ``40GB HDD``. - - .. note:: - During creation, write down the username used and save the key locally. In case warnings arise regarding having an unprotected key, you can run: - - .. code-block:: bash - - $ sudo chmod 600 key.pem - -2. To deploy to Azure, the following can be run: - - .. code-block:: bash - - $ hagrid launch node --username=azureuser --key-path=~/hagriddeploy_key.pem domain to 51.124.153.133 - - - Additionally, you are being asked if you want to provide another repository and branch to fetch and update HAGrid, which you can skip by pressing ``Enter``. - -3. If successful, you can now access the deployed node at the specified IP address and interact with it via the PyGrid Admin UI at http://51.124.153.133/login (change IP with yours) or use Postman to do API requests. - -.. |image0| image:: ../_static/deployment/image2.png - :width: 95% - -.. |image1| image:: ../_static/deployment/image1.png - :width: 95% - -.. |image2| image:: ../_static/deployment/image3.png - :width: 95% diff --git a/docs/source/getting_started/index.rst b/docs/source/getting_started/index.rst index 4633130e472..e2264459c00 100644 --- a/docs/source/getting_started/index.rst +++ b/docs/source/getting_started/index.rst @@ -7,9 +7,9 @@ Getting Started .. toctree:: :maxdepth: 3 -Welcome to the domain deployment installation tutorials! +Welcome to the datasite deployment installation tutorials! This section of our documentation is designed to be the -simplest way to get you started deploying your data to a domain node +simplest way to get you started deploying your data to a datasite server on an OSX, Linux, or Windows machine and interacting with it as a data scientist using PySyft. @@ -20,7 +20,7 @@ as a data scientist using PySyft. `advanced deployment documentation `__. The purpose of these tutorials is to help you install everything -you need to run a Domain node from your personal machine (such +you need to run a Datasite server from your personal machine (such as if you're running through OpenMined `courses `__ or @@ -30,7 +30,7 @@ notebooks with PySyft installed, such as if you're pretending to be both Data Owner and Data Scientist as a part of a tutorial or course. -**We will be setting up the following dependencies before PySyft and PyGrid:** +**We will be setting up the following dependencies before PySyft and Syft Server:** * Python >=3.9 * pip diff --git a/docs/source/guides/data-owner/00-deploy-domain.rst b/docs/source/guides/data-owner/00-deploy-domain.rst deleted file mode 100644 index 0d11a065ce8..00000000000 --- a/docs/source/guides/data-owner/00-deploy-domain.rst +++ /dev/null @@ -1,190 +0,0 @@ -Deploying your own Domain Server -=============================================== - -**Data Owner Tutorials** - -◻️ 00-deploy-domain 👈 - -◻️ 01-upload-data - -.. note:: - **TIP:** To run this tutorial interactively in Jupyter Lab on your own machine type: - -:: - - pip install -U hagrid - hagrid quickstart data-owner - - - -Data owners are defined by those with ``datasets`` 💾 they want to make available for -study by an outside party. - -This tutorial will help you understand how a Data Owner can -``launch`` their own Domain Server to securely host private datasets. - - **Note:** Throughout the tutorials, we also mean Domain Servers whenever we refer to Domain Node. Both mean the same and are used interchangeably. - -Why do Data Owners Deploy Domain Servers? ------------------------------------------ - -The concept of Remote Data Science starts with a server-based model -that we call ``Domain Server``. It allows people/data owners 👨 to load -their ``private`` data into these servers and create an account with -a username and password for Data Scientists💻. - -The advantage of using a Domain Server is that you can catalyze the impact your dataset can have by allowing... - -#. a Data Scientist to only get ``answers`` to the types of ``questions`` you allow them to -#. and by allowing them to get those answers without needing to directly ``access`` or have a copy of your data - - -|00-deploy-domain-00| - - -This means that by having your organization retain governance over the information they steward without -needing to share direct ``copies`` of data to collaborators, domain servers create an opportunity for more -collaboration and more research to happen without losing ``control`` of your data and risking things like IP. - -Steps To Deploy a Domain ------------------------- - -How collaboration gets streamlined will be covered in our tutorials about connecting to a ``"Network Node."`` We will discuss -how control is maintained in our tutorials about ``"How to assign a Privacy Budget."`` For this tutorial, however, -let's start by learning how to deploy a domain server. - -📒 Overview of this tutorial: - -* **Installing** the required software -* **Running** the servers -* **Checking** the status of deployed server - -|00-deploy-domain-01| - -Few things to make a note of before starting: - -- **PySyft** = Privacy-Preserving Library -- **PyGrid** = Networking and Management Platform -- **HAGrid** = Deployment and Command Line Tool - -Step 1: Install wizard -~~~~~~~~~~~~~~~~~~~~~~~ - -To simplify the installation process, we have an `install wizard` that will help you -setup the latest versions of `hagrid` and `syft` on your machine. - -You can go to the install wizard at any time by running the below command: - -:: - - hagrid quickstart - - -.. warning:: - The next step will show you how to launch a domain node. If - you run into any ``issue`` running the above installation wizard, consider - looking for the ``error`` you are getting on our - `GitHub-Issue `__ page. - Still not able to figure out the problem, don’t worry. We are here to - help you. Join the OpenMined - `slack `__ - community and explain your problem in the ``#general`` channel, and - any one of us might be able to help you. - - -Step 2: Launching a Domain Server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Great work, people!! Once you have installed all the dependencies, it is -time to use ``HAGrid`` to launch your Domain Node. - -To launch a domain node, there are three things that you -need to know: - -1. **What type of node do you need to deploy?** -There are two different types of nodes: Domain Node and Network Node. By -default, HAGrid launches the ``primary`` node that is our Domain Node. - -2. **Where are you going to launch this node to?** -We need to specify that we want to launch it to the ``docker container`` at -port ``8081``. - -3. **What is the name of your Domain Node going to be?** -For that, don’t forget to specify the ``DOMAIN_NAME`` to your -preference. - -After completing the Install Wizard, run the cell below to launch your very first domain node. - -:: - - In: - - # edit DOMAIN_NAME and run this cell - - DOMAIN_NAME = "My Domain" - - !hagrid launch {DOMAIN_NAME} to docker:8081 --tag=latest - -While this command runs, you will see various ``volumes`` and -``containers`` being created. Once this step is complete, move on to -the next step, where we will learn to monitor the health of -our Domain Node. - -Step 3: Checking your Domain Server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One exciting benefit of HAGrid is that it makes it easier for your organization/ IT department -to ``monitor`` & ``maintain`` the status of your system as you move forward with other steps. -Let's do a quick health check to ensure the Domain is up and running. - - -:: - - In: - - # run this cell - !hagrid check localhost:8081 - - Out: - - Detecting External IP... - ┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━┓ - ┃ PyGrid ┃ Info ┃ ┃ - ┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━┩ - │ host │ 20.31.143.254 │ ✅ │ - │ UI (βeta) │ http://20.31.143.254/login │ ✅ │ - │ api │ http://20.31.143.254/api/v1 │ ✅ │ - │ ssh │ hagrid ssh 20.31.143.254 │ ✅ │ - │ jupyter │ http://20.31.143.254:8888 │ ✅ │ - └───────────┴─────────────────────────────┴────┘ - -If your output is similar to the above image, voila!! A -``Domain`` ``Node`` was just ``born``. When it’s ready, you will see the -following in the output: - -- **host:** ``IP address`` of the launched Domain Node. -- **UI (Beta):** Link to an ``admin portal`` that allows you to - control Domain Node from a web browser. -- **api:** ``Application layer`` that we run in our notebooks to make - the experience more straightforward and intuitive. -- **Ssh:** ``Key`` to get into virtual machine. -- **jupyter:** Notebook ``environment`` you will use to upload your - datasets. - -Congratulations 👏 You have now successfully deployed a Domain Server! -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now what? ---------- - -Once you, as a Data Owner, have deployed your Domain Node representing your theoretical organization's -private data server, the next step is to :doc:`Upload Private Data to a Domain Server <01-upload-data>` for research or project use. - - In our following tutorial, we will see how you as a Data Owners can preprocess the data, mark it with correct - metadata and upload it to the Domain Node you've just deployed. - -.. |00-deploy-domain-00| image:: ../../_static/personas-image/data-owner/00-deploy-domain-00.gif - :width: 95% - -.. |00-deploy-domain-01| image:: ../../_static/personas-image/data-owner/00-deploy-domain-01.jpg - :width: 95% diff --git a/docs/source/guides/data-owner/01-upload-data.rst b/docs/source/guides/data-owner/01-upload-data.rst deleted file mode 100644 index 3eea4bda6ca..00000000000 --- a/docs/source/guides/data-owner/01-upload-data.rst +++ /dev/null @@ -1,252 +0,0 @@ -Uploading Private Data to a Domain Server -============================================================ - -**Data Owner Tutorials** - -☑️ 00-deploy-domain - -◻️ 01-upload-data👈 - -.. note:: - **TIP:** To run this tutorial interactively in Jupyter Lab on your own machine type: - -:: - - pip install -U hagrid - hagrid quickstart data-owner - - - -Welcome back to another Data Owner tutorial. In the last tutorial, -you learned :doc:`How to Deploy a Domain Server <00-deploy-domain>` that represents -your organization’s private data servers. But right now, -the node you just deployed is empty. - -After today’s tutorial, you will learn how to ``upload data`` to your new -``domain node``, which involves annotating and doing ETL before -uploading it to our Domain Node/server. - - **Note:** Throughout the tutorials, we also mean Domain Servers - whenever we refer to Domain Node. Both mean the same and are used - interchangeably. - -Step to Upload Private Data ---------------------------- - -📒 Overview of this tutorial: - -#. **Preprocessing** of Data -#. **Marking** it with correct metadata -#. **Uploading** data to Domain Server - -|01-upload-data-00| - -Step 1: Import Syft -~~~~~~~~~~~~~~~~~~~ - -To utilize the privacy-enhancing features offered in PyGrid and to -communicate with your domain node, you must first ``import`` OpenMined's -``private`` deep learning library: PySyft. - -Lets import Syft by running the below cell: - -:: - - In: - - # run this cell - try: - import syft as sy - print("Syft is imported") - except: - print("Syft is not installed. Please use the 🧙🏽‍♂️ Install Wizard above.") - - Out: Syft is imported - -.. _step2: - -Step 2: Log into Domain -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default, only the Domain node ``Admin`` can upload data, -so to upload your data, you will need to first login as the admin. -(*Upload data permissions can be customized after logging into the domain node.*) - -To login to your Domain node, you will need to define which Domain you are logging into and who you are. In this case, it will take the form of: - -* IP Address of the domain host -* Your user account Email and Password - - **WARNING:** Change the default username and password below to a more secure and private combination of your preference. - -:: - - In: - - # run this cell - try: - domain_client = sy.login( - port=8081, - email="info@openmined.org", - password="changethis" - ) - except Exception as e: - print("Unable to login. Please check your domain is up with `!hagrid check localhost:8081`") - - Out: - - Connecting to 20.253.155.183... done! Logging into openmined... done! - -Lovely :) You have just logged in to your Domain. - -.. note:: - Steps to change the default admin credentials for Domain Owner are shown below 👇 - -|01-upload-data-01| - - -Step 3: Prepare Dataset -~~~~~~~~~~~~~~~~~~~~~~~ - -For this tutorial, we will use a simple dataset of four peoples ``ages``. - - -:: - - In: - - # run this cell - try: - import pandas as pd - data = {'ID': ['011', '015', '022', '034'], - 'Age': [40, 39, 9, 8]} - - dataset = pd.DataFrame(data) - print(dataset.head()) - except Exception: - print("Install the latest version of Pandas using the command: %pip install pandas") - - Out: - - ID Age - 011 40 - 015 39 - 022 9 - 034 8 - -.. _step4: - -Step 4: Annotate Data for Automatic DP -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now that we have our dataset, we can begin annotating it with -privacy-specific metadata called Auto DP metadata. Auto DP -metadata allows the PySyft library to protect and adjust the -visibility different Data Scientists will have into any one of -our data subjects. ``Data Subjects`` are the entities whose privacy -we want to protect. So, in this case, they are the individual -family members. - -.. note:: - In order to protect the ``privacy`` of the people within our dataset we - first need to specify who those people are. In this example we have - created a column with unique ``ID’s`` for each person in this dataset. - -Important steps: -^^^^^^^^^^^^^^^^ - -- ``data subjects`` are entities whose privacy we want to protect -- each feature needs to define the appropriate ``minimum`` and - ``maximum`` ranges -- when defining min and max values, we are actually defining the - ``theoretical`` amount of values that could be learned about that - aspect. -- To help obscure the variables someone may learn about these datasets - we then need to set an appropriate ``lower_bound`` to the ``lowest`` possible persons age ``(0)``, - and the ``upper_bound`` to the ``highest`` possible (mostly) persons age ``(100)``. - - -:: - - In: - - # run this cell - data_subjects = sy.DataSubjectArray.from_objs(dataset["ID"]) - - age_data = sy.Tensor(dataset["Age"]).annotate_with_dp_metadata( - lower_bound=0, upper_bound=100, data_subjects=data_subjects - ) - -.. - - **Note:** If your project has a training set, validation set and test - set, you must annotate each data set with Auto DP metadata. - -.. _step5: - -Step 5: Upload the Dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once you have prepared your data, it’s time to upload it to the Domain -node. To help Data Scientists later ``search`` and ``discover`` our -datasets, we will add details like a ``name`` and a ``description`` of -what this dataset represents. - - **Note:** If your project has a train, validation and test set, you - need to add them as assets. In this case, it is just our age column. - -:: - - In: - - # run this cell - domain_client.load_dataset( - name="Family_Age_Dataset", - assets={ - "Age_Data": age_data, - }, - description="Our dataset contains the Ages of our four Family members with unique ID's. There are 2 columns and 4 rows in our dataset." - ) - - Out: - - Dataset is uploaded successfully !!! - - -Step 6: Check the Dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To ``check`` the dataset you uploaded to the Domain Node, go ahead and -run the below command, and it will list ``all`` the datasets on this -Domain with their Names, Descriptions, Assets, and Unique IDs. - -:: - - In: - - # run this cell - domain_client.datasets - -Awesome 👏 !! You have uploaded the dataset onto your Domain Server! -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By uploading the dataset onto the Domain Node, Data Owners are opening -up the possibilities of different Data Scientists being able to study it -without downloading it and without the Data Owners doing any -experiment-specific work while Data Scientists are studying their -private data. - -What’s Next? ------------- -Alright, so we have walked through :doc:`How to deploy a -Domain Node <00-deploy-domain>` and :doc:`How to prepare and upload a dataset to that Domain -Node <01-upload-data>` so that Data Scientists can study our datasets without being -able to download them. - - In the following tutorial, we will see how Data Scientists can find - datasets and work across all the different Domain nodes. - -.. |01-upload-data-00| image:: ../../_static/personas-image/data-owner/01-upload-data-00.jpg - :width: 95% - -.. |01-upload-data-01| image:: ../../_static/personas-image/data-owner/01-upload-data-01.gif - :width: 95% \ No newline at end of file diff --git a/docs/source/guides/data-owner/02-create-account-configure-pb.rst b/docs/source/guides/data-owner/02-create-account-configure-pb.rst deleted file mode 100644 index 9d98384d4a8..00000000000 --- a/docs/source/guides/data-owner/02-create-account-configure-pb.rst +++ /dev/null @@ -1,328 +0,0 @@ -Creating User Accounts on your Domain Server -=============================================== - -**Data Owner Tutorials** - -☑️ :doc:`00-deploy-domain <00-deploy-domain>` - -☑️ :doc:`01-upload-data <01-upload-data>` - -◻️ 02-create-account👈 - -HAGrid Quickstart Setup ---------------------------- - -To run this tutorial interactively in Jupyter Lab on your own machine type, -you need to start a ``HAGrid Quickstart environment`` as follows: - -:: - - pip install -U hagrid - hagrid quickstart data-owner - - -If you already have a HAGrid Quickstart environment operating, run the following to download the tutorials notebooks: - -:: - - from hagrid import quickstart - quickstart.download(“data-owner”) - - ------ - - -Domain Owners can directly ``create`` user accounts for Data Scientists to use their -domain nodes. When the domain owner creates a new user account, by default that user -will have the lowest level of permissions to access that data (means data is highly private) -and will be assigned ``0`` Privacy Budget. - -In today's tutorial we will learn how to create a user account, how to check permissions, -and how to assign a privacy budget to that user. Then we'll touch on why setting a privacy -budget is important later in your workflow. - - -🚨 Pre-Requisites Steps ---------------------------- - -Before you can create user accounts on your domain, you have to first: - -#. :ref:`Annotate your dataset with the appropriate DP metadata ` -#. :ref:`Upload your dataset to Domain Server ` - -.. note:: - The above prerequisite steps are covered in the previous tutorial :doc:`How to upload private data to the Domain - Node <01-upload-data>`. Please execute those steps before implementing this tutorial. - -📒 Overview of this tutorial ------------------------------- - -#. **Import** Syft & **Login** to Domain Server -#. **Define** account credentials -#. **Check** account permissions - -|02-create-account-configure-pb-00| - -Step 1: Import Syft & Login to Domain Server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To utilize the privacy-enhancing features offered in PyGrid and to -create an account for the user, you must first ``import`` OpenMined's -``private`` deep learning library: PySyft. - -Let's import Syft by running the below cell: - -:: - - In: - - # run this cell - try: - import syft as sy - print("Syft is imported") - except: - print("Syft is not installed. Please use the 🧙🏽‍♂️ Install Wizard above.") - - Out: Syft is imported - -To login to your Domain node, you will need to define which Domain you are logging into and who you are. In this case, it will take the form of: - -* IP Address of the domain host -* Your user account Email and Password - -.. WARNING:: - ``info@openmined.org`` and ``changethis`` are the default admin credentials for any domain node that is launched by - the user in the documentation. Change the default email and password below to a more secure and - private combination of your preference. - -:: - - In: - - # run this cell - try: - domain_client = sy.login( - port=8081, - email="info@openmined.org", - password="changethis" - ) - except Exception as e: - print("Unable to login. Please check your domain is up with `!hagrid check localhost:8081 --silent`") - - Out: - - Connecting to 20.253.155.183... done! Logging into openmined... done! - -Lovely :) You have just logged in to your Domain. - - -Step 2: Create a User Account -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After you have launched and logged into your domain as an ``admin``, you can create user accounts for others to use. - -.. WARNING:: - In this case, we will create an account for a Data Scientist from within our own team or organization. - -.. note:: - You should only create direct user accounts on your domain node for those who have been - appropriately vetted and verified by your organization. To expand research done on your - datasets to those not directly within or verified by your organization, you should ``connect`` - your ``domain`` to one or more networks so that proper verification measures have been taken. - You can learn more about this in our "Connect Your Domain to a Network" tutorial. - -There are ``three`` different ways for a new user account to be created on your domain. - -* **Option A**, by a Domain Owner creating a new user account and specifying their - credentials directly through the notebook API. -* **Option B**, by a Domain Owner creating a new user account and specifying their credentials - through PyGrid’s default UI interface. -* **Option C**, by a potential user finding or being given the Domain node’s profile URL and - submitting an application that a Domain Owner can triage. (This functionality is currently in Beta). - -.. note:: - In all three cases, the user of your domain will be assigned the role of Data Scientist by default. - -A. Using PySyft: Create account from Domain Client -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create a Data Scientist account for someone within your team or organization, you need to tell your Domain 4 things: - -#. **Name**: Name of the individual -#. **Email**: Associated email address of the individual -#. **Password**: Password they would need to login into your domain (this can be changed later when they customize their ``account settings``) -#. **Budget**: When you specify a ``budget``, you assign this account with a ``privacy budget`` of ``0``. This privacy budget, set in units of ``epsilon``, is the limiter that blocks a data scientist from knowing too much about any one data subject in your dataset. - - **Note:** In future exercises, we will explore how privacy budget limits affect data subject visibility. - Still, for now, we will set the ``privacy budget`` to its default of ``0`` (means data is highly private), - the lowest level of permission to access the data. - Also, by default, the role assigned to a user is a Data Scientist. - -:: - - In: - - # run this cell - data_scientist_details = domain_client.create_user( - name="Jane Doe", - email="jane@email.com", - password="supersecurepassword", - budget=0 - ) - - Out: - - User created successfully! - -Once you have created an account, you can ``verify`` if the user account was made successfully. - -:: - - In: - - # list the users that have registered to the domain - domain_client.users - -Print the details of the account you created and share the ``credentials`` with the Data Scientists. - -:: - - In: - - # run the cell then copy the output - print("Please give these details to the Data Scientists ⬇️") - print(data_scientist_details) - - Out: - - Please give these details to the Data Scientists ⬇️ - {'name': 'Jane Doe', 'email': 'jane@email.com', 'password': 'supersecurepassword', 'url': '20.253.155.183'} - - -B. Using PySyft: Create account from Domain URL -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A user can also ``sign-up`` or create an account on a Domain node if they have access to the ``URL`` to the Domain. -Instead of creating an account individually for each Data Scientist, a Data Owner can ``share`` the URL to their -Domain node and ask their team members to ``register`` to the Domain. - -To register to a Domain, you need the following details: - -#. **Name**: Name of the individual -#. **Email**: Email of the individual that will be used to log into the Domain -#. **Password**: A secured password to log into the Domain -#. **Url**: Url to the domain node. -#. **Port**: Port number - -:: - - In: - - # run this cell - import syft as sy - domain_client = sy.register( - name="Jane Doe", - email="jane@email.com", - password="supersecurepassword", - url="localhost", - port=8081 - ) - -On successful registration, the user is auto-logged into the domain. - -.. note:: - By default the role assigned to the registered user is of a ``Data Scientist`` and the assigned ``privacy budget`` is ``0``. The future tutorial series will cover a better explanation of `setting the privacy budget`. - -C. Using PyGrid UI: Create account as a Domain Admin -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -PyGrid's UI is meant to help Domain Owners get a bigger picture view of their domains and manage them. - -When we use the ``hagrid launch`` command to start our private data server, we define the ``port`` where -we want to launch the server. By default, the port is launched at ``8081``. - - **Note:** Make sure your docker application is up and running in the background. - -We will use this ``port number`` to visit the following UI interface at the URL: - -:: - - http://localhost: - - e.g. - - http://localhost:8081 - - -Once you are on PyGrid's web page, execute following steps to create an account for Data Scientist: - -.. WARNING:: - ``info@openmined.org`` and ``changethis`` are the default admin credentials for any domain node that is launched by - the user in the documentation. Change the default email and password below to a more secure and - private combination of your preference. - -#. Login using your admin credentials (**Email:** info@openmined.org | **Password:** changethis) -#. Create a new user account by clicking on the ``+ Create User`` button -#. Specify the following fields - * **Name**: Name of the individual - * **Email**: Email of the individual that will be used to log into the Domain - * **Password**: A secured password to log into the Domain - * **Role**: Assign them the role of Data Scientist (By default user account will take the role with the lowest amount of permission which in this case is the **Data Scientist** role.) -#. Set appropriate Privacy Budget (By default, they have ``0e`` privacy budget) - -|02-create-account-configure-pb-04| - - -Step 3: Check Permissions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Now that we have created an account for our Data Scientist, let's check to see if it -was made and if we need to change any permissions. - -.. note:: - Permissions are determined by the ``role`` a user has been assigned by the Data Owner. - By default a user will be created with the role with the ``lowest`` set of ``permissions``. - To simplify the concepts, let us consider the below scenario. - -Scenario ------------ - -Let's login to our PyGrid's UI as we did earlier when we had to create an account -for the user in the prior steps. On the homepage, go to the ``Permissions`` tab, -where you will notice the different roles and associated permissions with them. - -.. note:: - Each role has a set of default ``permissions``, but they can be changed according to the norms of each organization. - -|02-create-account-configure-pb-01| - -#. **Data Scientist (default)**: This role is for users who will be performing computations on your datasets. They may be known users or those who found your domain through search and discovery. By default, this user can see a list of your datasets and can request to get results. This user will also be required to sign a Data Access Agreement if you have required one in the Domain Settings Configurations. -#. **Compliance Officer**: This role is for users who will help you manage requests made on your node. They should be users you trust. They cannot change domain settings or edit roles but are, by default, able to accept or deny user requests on behalf of the domain node. -#. **Administrator**: This role is for users who will help you manage your node. These should be users you trust. The main difference between this user and a Compliance Officer is that this user, by default, not only can manage requests but can also edit Domain Settings. This is the highest level of permission outside of an Owner. -#. **Owner**: Only one Owner account is assigned to any domain node. The owner account is the highest level permission and is a requirement for deploying a domain node. If you ever want to transfer ownership of your domain node to someone else, you can do so by following these steps. - -Suppose you created a user account for a person named ``John Smith``; by default, -the role assigned to John will be a ``Data Scientist``. But you want to change the -role of John to ``Data Protection Officer`` instead of a Data Scientist. - -#. Select the user and click on its name. -#. Go to ``Change role``, and in the drop-down option, select ``Compliance Officer``. -#. You can see the permissions given to the Compliance Officer below their role. The default permissions can be changed in the ``Permissions`` tab, as shown in the above image. -#. Click ``Change Role``, and the role of John Smith has now successfully changed to the Compliance Officer. - -|02-create-account-configure-pb-02| - - -Now our domain node is available for the data scientists to use 👏 ---------------------------------------------------------------------- - -.. |02-create-account-configure-pb-00| image:: ../../_static/personas-image/data-owner/02-create-account-configure-pb-00.jpg - :width: 95% - -.. |02-create-account-configure-pb-01| image:: ../../_static/personas-image/data-owner/02-create-account-configure-pb-01.gif - :width: 95% - -.. |02-create-account-configure-pb-02| image:: ../../_static/personas-image/data-owner/02-create-account-configure-pb-02.gif - :width: 95% - -.. |02-create-account-configure-pb-04| image:: ../../_static/personas-image/data-owner/02-create-account-configure-pb-04.gif - :width: 95% diff --git a/docs/source/guides/data-owner/03-join-network.rst b/docs/source/guides/data-owner/03-join-network.rst deleted file mode 100644 index 89a227a7f62..00000000000 --- a/docs/source/guides/data-owner/03-join-network.rst +++ /dev/null @@ -1,169 +0,0 @@ -Joining a Network -=============================================== - -**Data Owner Tutorials** - -☑️ 00-deploy-domain - -☑️ 01-upload-data - -☑️ 02-create-account - -◻️ 03-join-network👈 - -.. note:: - **TIP:** To run this tutorial interactively in Jupyter Lab on your own machine type: - -:: - - pip install -U hagrid - hagrid quickstart data-owner - - -A Network Node is a node that connects different domains to a broader base of data scientists (also known as a network's members). It is a server which exists outside of any data owner's institution, providing search & discovery, VPN, and authentication services to the network of data owners and data scientists. - -.. note:: - Data is only stored on the separate Domain Servers. Network Nodes do not contain data, they simply provide an extra layer of services to Domain Nodes and Data Science users. - -Let us give an example: assume you are in a hospital and the hospital has different cancer-related datasets hosted on their domain. The hospital now wants to increase the research impact their datasets can have but does not want to do so at the cost of risking a privacy leak nor at the risk of moving their data. By joining a network (for example one hosted by WHO) a Domain Owner can increase the searchability of their datasets to appropriate audiences without those datasets needing to leave the Domain servers. - -In today's tutorial we will learn how to join a network and apply our domain to it. - - -🚨 Pre-Requisites Steps ---------------------------- - -Before you can create user accounts on your domain, you have to first: - -* `Login to your Domain Node` - -.. note:: - The above prerequisite step is covered in an existing tutorial `How to deploy a Domain Node `_. Please execute those steps before implementing this tutorial. - -📒 Overview of this tutorial --------------------------------- - -#. **Login** to your Domain Server -#. **Finding** a Network -#. **Applying** our Domain to the Network -#. **Verifying** our Domain on the Network - -Step 1: Import Syft -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Syft is the main library our Domain servers run off of, so to start we will need to import Syft so that our methods in later steps will work. -:: - - In: - - # run this cell - import syft as sy - - -Step 2: Login to your domain -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once you have imported syft, and have your domain node up along with it's credentials available to you, connect and login to the domain hosted at the URL generated on the Step 4 of the Deploy Domain notebook. - -.. WARNING:: - The below cell has default credentials, please change accordingly. - -:: - - In: - - # run this cell - domain_client = sy.login( - url="http://localhost:8081/", email="info@openmined.org", password="changethis" - ) - -Step 3: Fetch all Available Networks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Now we’ve come to the main part, let’s take a look at what networks are available for us to join. -The command below will fetch all of the currently available networks, this list may change as more networks get created or as they go on and offline. - -:: - - In: - - # run this cell - sy.networks - -You can now choose the network that suits best your needs. After looking at the available networks, let’s choose a network that best fits our domain. For this tutorial we are going to choose the **OpenMined** network. - -Step 4: Connect to the Network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In future iterations of PyGrid Network nodes will be able to have domains join as Members or as Guests, but in today’s current iteration of PyGrid all domains start out by joining as Guests. To apply to a network as a guest we first need to connect to the network server. - -Connecting to a network can be done via it's name/URL/index in the above list. - -:: - - In: - - # run this cell - network_client = sy.networks[0] - -On successful login, the `network_client` will contain an authenticated client to the network. - -Step 5: Fetch all Domains on the Network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Now that we have an authenticated client with the network, let's fetch and see the currently connected domains on the network. - -We can list all of them with the below command: - -:: - - In: - - # run this cell - network_client.domains - -Since we have not applied our domain yet, it should not be visible on the output of the above command. - -Step 6: Apply our Domain to the Network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this step, we will be joining the OpenMined network. If our application to join gets accepted, our domain will then be listed among the available domains on this network which will help Data Scientists find and work from our datasets. - -.. note:: - This step might have multiple retries before actually getting connected, so please don’t worry! - -The below command will apply our domain node to the network we just authenticated with - -:: - - In: - - # run this cell - domain_client.apply_to_network(network_client) - - -Step 7: Verify our Domain on the same Network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this step, we will verify whether we have successfully joined the network node or not. We will do this by listing the domains available on this network and seeing whether our domain appears. - -:: - - In: - - # run this cell - network_client.domains - -If you can see your domain's name here, then hoorah! - -If you haven't, don’t worry, go through the above steps and see if you missed anything. - -Step 8: Verify the VPN status -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Now, let us verify that our domain is succesfully connected to the Network node via VPN. - -Run the cell below as mentioned: - -:: - - In: - - # run this cell - domain_client.vpn_status() - -You should receive the domain ID in the `peers list` in the connected field. This confirms our connection to the network, Yay! - -Now our domain node applied on the network and we have succesfully joined it!👏 \ No newline at end of file diff --git a/docs/source/guides/data-owner/04-configure-pb.rst b/docs/source/guides/data-owner/04-configure-pb.rst deleted file mode 100644 index 3fbbde4ed70..00000000000 --- a/docs/source/guides/data-owner/04-configure-pb.rst +++ /dev/null @@ -1,369 +0,0 @@ -Configuring Privacy Budget on your Domain Server -================================================== - -**Data Owner Tutorials** - -☑️ 00-deploy-domain - -☑️ 01-upload-data - -☑️ 02-create-account - -☑️ 03-join-network - -◻️ 04-configure-pb👈 - -.. note:: - **TIP:** To run this tutorial interactively in Jupyter Lab on your own machine type: - -:: - - pip install -U hagrid - hagrid quickstart data-owner - - -A privacy budget is a collection of quantitative measures through which a Data Owner can -pre-determine the degree of information access they grant to a user using their domain server. -For PyGrid, you can think of a privacy budget as a specified limit to the ``visibility`` a user -can have into any one data subject on your domain server. -As we saw in the :doc:`creating user accounts tutorial <02-create-account-configure-pb>`, when you -create a user account in PyGrid, by default that user is assigned the lowest level of ``permissions`` -and is given a privacy budget of ``0`` which means that they have ``0`` visibility into your domain’s data subjects. - -In today's tutorial, you will discover the underlying concept behind Differential Privacy and -how setting a privacy budget for a user determines how much can be learned from any data subject - - -🚨 Pre-Requisites Steps ---------------------------- -Before you can specify a privacy budget for your domain users, you must first ``prepare`` the dataset, ``upload`` it, and -``create`` a user account for your team members or Data Scientists. -The prerequisite steps are covered in the previous -tutorial :doc:`Creating User Accounts on your Domain Server <02-create-account-configure-pb>` and -:ref:`Uploading Private Data to a Domain Server `. -Please execute those steps before implementing this tutorial. - -📒 Overview of this tutorial ---------------------------- - -#. **Introduction** to Differential Privacy -#. **Login** to PyGrid UI as a Domain Admin -#. **Explore** different Privacy Budgets - -Step 1: Introduction to Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this step, lets understand the concept behind differential privacy and privacy budget by considering a simple scenario. - -A. Scenario -############## -Consider there are ``500`` patients represented in ``2`` different datasets. One dataset is -about general ``medical history``; the other has some but not all of the ``500`` patients -and is focused on patients who have had ``mammography`` images taken in the past year. Now -let's say that ``Jane Doe`` is a patient in both and is open to being studied for -``breast cancer research`` as long as she can remain unidentifiable in the study. - -B. Quick Definition: Differential Privacy -############################################ -A core feature of Syft is that Syft allows you to use a ``PET(Privacy Enhancing technology)`` called -Differential Privacy to protect the ``Privacy`` of the individuals or data subjects -within your datasets. In this case, Differential Privacy is maintained when a -query across both datasets ``with`` Jane Doe in it versus that same query on both -datasets ``without`` Jane Doe creates the ``same output``. Noise is added to help average -out and make up the difference between having Jane there versus not. In other words, Jane Doe becomes a very -difficult, if not impossible, straw to find within the haystack. - -From a top-level view, this means a couple of things: - -* Differential Privacy can help a Data Scientist see trends in data ``without`` being able to ``identify`` the participants. -* The more a specific data subject involved in the query ``stands out`` in a dataset, the more noise has to be added to ``obfuscate`` them. -* There is a natural ``tradeoff`` between how much ``Privacy`` is preserved versus how much ``Accuracy`` is given to the Data Scientist.. -* You can set a privacy limit in PyGrid and trust that a Data Scientist will not be able to get answers to a query that surpasses that limit on any one ``Data Subject``. (see the image 👇 for reference) -* Data scientists can download answers that remain within specified ``privacy limits``, creating a streamlined flow where answering questions using an org's Domain Server will be as easy as going to the organization's public website. (see the image 👇 for reference) - -|04-configure-pb-02| - -C. Quick Definition: Epsilon or Privacy Budget -################################################ -Differential Privacy in practice is an algorithm that obscures an individual data subject's -contributions to the given ``results`` of a ``query``. Privacy Budget measured in units of ``Epsilon`` -is a way to measure the potential ``privacy loss`` or ``visibility`` you are allowing into any one of those data subjects. - -.. note:: - Syft specifically ``tracks`` privacy budgets against individual data subjects instead - of the ``dataset`` as a whole. This may be different from other tools that use - Differential Privacy. This allows more ``utility`` on the dataset. - -D. Takeaway -############### -When you assign a ``privacy budget`` in Syft, you specify a ``risk tolerance`` on what -level of ``visibility`` you feel comfortable having that Data Scientist have on your -data subjects. You are balancing this with keeping the ``accuracy`` they get on a -helpful level and maximizing the benefit of your dataset(s). - -Let's say, in the above scenario, you allow your ``Data Scientist`` to have ``0.5e`` to -conduct their Breast Cancer Research. You can interpret ``e`` to mean: - -* That this Data Scientist will have ``0.5x`` more ``visibility`` into any one data subject like Jane Doe -* That this Data Scientist is ``0.5x`` more likely to ``learn`` something unique about Jane Doe -* That this Data Scientist can ``learn no more than 0.5e`` on Jane Doe - -.. note:: - If a query would expose more than ``0.5e`` about ``Jane Doe``, then Jane Doe would get - dropped from the result, and noise would be used to mitigate the difference. - -Step 2: Login to PyGrid UI as a Domain Admin -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When we use the ``hagrid launch`` command to start our private data server, we define -the ``port`` where we want to launch the server. - -.. note:: - By default, the port is launched at ``8081``. - -|04-configure-pb-00| - -We will use this port number to visit the following UI interface at the ``URL``: - -:: - - http://localhost: - - e.g. - - http://localhost:8081 - -|04-configure-pb-01| - -The default email and password for the domain are: - -* **email:** info@openmined.org -* **password:** changethis - -Once we're logged in, you can move to the next section, which explores setting a privacy budget. - -Step 3: Explore Different Privacy Budget -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. _step3a: - -A. Assign Data Scientist Account with 0.5e Privacy Budget -############################################################## -When you create a user account on your domain server, the privacy budget assigned to the -user is ``0e``, and the role assigned will be a data scientist by default. - -Follow the steps in the image below to change the privacy budget of our data scientist to ``0.5e``. - -.. note:: - John Smith is a Data Scientist whose account we created for demonstration purposes - in the :doc:`create user accounts tutorial <02-create-account-configure-pb>`. - -|04-configure-pb-03| - - -B. Make a Query With 0.5e Privacy Budget As a Data Scientist -################################################################# - -After you have changed the privacy budget to ``0.5e``, it's time for Domain Owners to -wear the hat of a Data Scientist. Let's make a ``query`` using 0.5e and then analyze the ``results`` -to compare how close the value of the results is to the actual value. - -Firstly, we should ``login`` to the domain as a data scientist using the same credentials through which -we created a data scientist account in :doc:`creating user accounts tutorial <02-create-account-configure-pb>`. - -The credentials to login as a Data Scientist are: - -* **Email:** janedoe@email.com -* **Password:** supersecretpassword - -.. WARNING:: - We will use the same ``age dataset`` defined in the previous tutorial to keep things simple. - So, before Data Scientists can make a ``query``, Domain Owners have - to :ref:`prepare the dataset and upload it to the Domain Servers`. -:: - - In: - - # run this cell - import syft as sy - - ds_domain_client = sy.login( - email="janedoe@email.com", - password="supersecretpassword", - port=8081, - url="localhost" - ) - -Now, as a Data Scientist, you can ``verify`` the privacy budget using the below command ⬇️ - -:: - - In: - - # run this cell - print("Allotted PB: ", ds_domain_client.privacy_budget) - - Out: - - Allotted PB: 0.5 - -Let's grab the age data from the domain and define a simple query to calculate the ``mean age``. - -:: - - In: - - age_data = ds_domain_client.datasets[0]["Age_Data"] - age_mean = age_data.mean() - age_mean_public = age_mean.publish(sigma=20) - - # Check if mean data exists - age_mean_public.exists - - # Download/Get mean age - age_mean_public.get(delete_obj=False) - - print("Remaining PB: ", ds_domain_client.privacy_budget) - - Out: - - Remaining PB: 0.000120578321 - -.. note:: - Remember, sigma represents how much noise the user wants added to the result. - The noise is selected randomly from a Gaussian distribution with sigma as the - standard deviation and zero mean. - -So the first thing we need to remember while setting ``sigma`` is that if we set a very low sigma -compared to the published value, it might not add enough noise, and the user would require a -large ``privacy budget`` to get the accurate result. - -Now we want the noise to be picked randomly with a standard deviation of ``20``. -Thus decreasing the value of ``sigma`` will result in more accurate results but at -the expense of a more privacy budget being spent and leaking more information -about private data. - -**Example:** Let's assume the value being published is ``100000``, then adding a slight noise of ``20`` -will result in ``100020``, which isn't significant noise comparatively and thus would require a large -budget to be spent. Similarly, if the value being published is ``0.1`` and you add noise of ``20``, then -the result value is ``20.1`` which is way off from the actual result and thus affects the accuracy of -the result, although having spent low PB. - -C. Make a Query With 7.5e Privacy Budget As a Data Scientist -################################################################# - -The privacy budget is cumulative and doesn't represent the actual spent value. Once something is -known, you can't remove that knowledge. Let us ``increase`` the ``privacy budget`` and perform again with -the same query as above and compare the accuracy of the result and the privacy budget spent. - -.. WARNING:: - You need to go to :ref:`Step 3.A ` and change the privacy budget to ``7.5e`` this time, as shown in the image. - -After you have changed the privacy budget to ``7.5e``, we will again make a ``query`` and then ``analyze`` the results. - -:: - - In: - - import syft as sy - - ds_domain_client = sy.login( - email="janedoe@email.com", - password="supersecretpassword", - port=8081, - url="localhost" - ) - - print("Allotted PB: ", ds_domain_client.privacy_budget) - - age_data = ds_domain_client.datasets[0]["Age_Data"] - age_mean = age_data.mean() - age_mean_public = age_mean.publish(sigma=20) - - # Check if mean data exists - age_mean_public.exists - - # Download/Get mean age - age_mean_public.get(delete_obj=False) - - print("Remaining PB: ", ds_domain_client.privacy_budget) - - Out: - - Allotted PB: 7.5 - Remaining PB: 1.0740261245118496 - -Now, if you try to view the variable `age_mean` in a new cell, you will notice three things about this pointer: - -#. **PointerID:** ID of the pointer -#. **Status [Ready/ Processing]:** Tells if the results to the pointer have been calculated or not on the server side -#. **Representation:** This shows synthetic data/ values that the pointer could represent. - -:: - - In: - - print(age_mean) - - Out: - - PointerId: da75693b1fd0439ab0a623dd183ff8ce - Status: Ready - Representation: array([64.31603086]) - - (The data printed above is synthetic - it is an imitation of the real data.) - -D. Make a Query With 10e Privacy Budget As a Data Scientist -################################################################# -For the last time, let us change the value of the ``privacy budget`` to ``10e``, perform again with the -same query as above, and compare the accuracy of the result and the privacy budget spent. - -.. WARNING:: - You need to go to :ref:`Step 3.A ` and change the privacy budget to ``10e`` this time, as shown in the image. - -After you have changed the privacy budget to ``10e``, we will again make a ``query`` and then ``analyze`` the results. - -:: - - In: - - import syft as sy - - ds_domain_client = sy.login( - email="janedoe@email.com", - password="supersecretpassword", - port=8081, - url="localhost" - ) - - print("Allotted PB: ", ds_domain_client.privacy_budget) - - age_data = ds_domain_client.datasets[0]["Age_Data"] - age_mean = age_data.mean() - age_mean_public = age_mean.publish(sigma=20) - - # Check if mean data exists - age_mean_public.exists - - # Download/Get mean age - age_mean_public.get(delete_obj=False) - - print("Remaining PB: ", ds_domain_client.privacy_budget) - - Out: - - Allocated PB: 10.0 - Remaining PB: 3.5740261245118496 - -Congratulations 👏 You have learned to configure your Privacy Budget on your Domain Server!! ----------------------------------------------------------------------------------------------- - -.. |04-configure-pb-00| image:: ../../_static/personas-image/data-owner/04-configure-pb-00.png - :width: 95% - -.. |04-configure-pb-01| image:: ../../_static/personas-image/data-owner/04-configure-pb-01.png - :width: 50% - -.. |04-configure-pb-02| image:: ../../_static/personas-image/data-owner/04-configure-pb-02.gif - :width: 95% - -.. |04-configure-pb-03| image:: ../../_static/personas-image/data-owner/04-configure-pb-03.gif - :width: 95% \ No newline at end of file diff --git a/docs/source/guides/data-owner/04-create-network.rst b/docs/source/guides/data-owner/04-create-network.rst deleted file mode 100644 index 7358d9a7932..00000000000 --- a/docs/source/guides/data-owner/04-create-network.rst +++ /dev/null @@ -1,53 +0,0 @@ -Creating a Network -=============================================== - - -What is a Network Node? ------------------------------------------------------ - -A Network Node is a node that connects different domains to a broader base of data scientists (also known as a network's members). It is a server which exists outside of any data owner's institution, providing services to the network of data owners and data scientists. - -In short, a Network node provides a secure interface between its cohorts or Domains and its members or Data Scientists. - -Let us give an example: assume you are in a hospital and the hospital has different cancer related datasets hosted on their domain. The hospital's data owners now want to increase the visibility and searchability of these datasets, so that more and more researches and doctors can utilise these datasets and advance our understanding and diagnosis of cancer. - -However, due to privacy concerns, they do not want to provide access to random actors, such as sharing the URL of the domain with everyone. In order to tackle this privacy issue and make the dataset still accessible, the domain owner can join a Network Node (for example the one hosted by WHO) hence opening the accessibility of their datasets to a much larger audience in a private and secure manner. - - -Why do you need a new Network Node? ---------------------------------------------------------------------------------- -Before requesting a Network Creation, please read the following carefully: - - -Ask yourself the below questions based on your use-case: - -* Do you want to enable data owners to host their dataset without sharing their domain URL? -* Do you have similar-purpose serving datasets? -* Do you want to improve the visibility and searchability of the datasets hosted on your Network Node? -* Do you want data scientists and researchers connect to your Network Node to perform remote Data Science? - - -If you answer the above questions with a **Yes**, then you might be looking to create your own network. Fill up the form below and we will get back to you with further instructions on how to proceed. - -.. note:: - We will be using the email you provide here for further communication. - - - -.. raw:: html - - - - - - diff --git a/docs/source/guides/data-scientist/00-connect-to-domain.rst b/docs/source/guides/data-scientist/00-connect-to-domain.rst deleted file mode 100644 index c912aefd341..00000000000 --- a/docs/source/guides/data-scientist/00-connect-to-domain.rst +++ /dev/null @@ -1,136 +0,0 @@ -Connecting to a Domain Server -==================================== - -**Data Scientist Tutorials** - -◻️ 00-connect-to-domain👈 - -◻️ 01-search-for-datasets - -.. note:: - **TIP:** To run this tutorial interactively in Jupyter Lab on your own machine type: - -:: - - pip install -U hagrid - hagrid quickstart data-scientist - - -Data Scientists are end users who want to perform ``computations`` or ``answer`` specific questions using -the dataset(s) of one or more data owners. The very first thing Data Scientists have to do in order -to submit their requests is ``login`` and ``connect`` to the Domain Server that hosts the data they would -like to make requests off of or to connect to a network by which they can search for different -datasets. Today's tutorial will show you how you as a Data Scientist can connect to an -organization's domain server using PySyft. - -For connecting to a Domain Server, we will use the login credentials assigned to us by -the Domain Owner. By default, we as Data Scientists have the lowest level of ``permission`` -to access the data (which means data is highly private) and will be assigned a Privacy Budget of ``0``. - -.. note:: - Check out this tutorial to understand how Domain Owners - can :doc:`create a user account <../data-owner/02-create-account-configure-pb>` on their Domain Servers. - - Throughout the tutorials, we also mean Data Scientists - whenever we refer to users. Both are used interchangeably. - -Steps to Connect to a Domain Server -------------------------------------- - -📒 Overview of this tutorial: - -#. **Obtain** Login Credentials -#. **Login** to the Domain as a Data Scientist -#. **Explore** some useful starting commands - - -.. note:: - PyGrid Admin (the UI) is only meant to be used by domain or data owners so a data scientist - would never login to the domain node via the UI. - -.. _step-ds-1: - -Step 1: Obtain Login Credentials -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To utilize the ``privacy-enhancing`` features and play around with your ``privacy budget``, as a -Data Scientist you must first get your login ``credentials`` from the domain owner. -What you will need to login to the domain server is the following information: - -* email -* password -* URL of the domain -* port of the domain - -.. WARNING:: - Change the default username and password below to a more secure and private combination of your preference. - -:: - - In: - - # run this cell - import syft as sy - domain_client = sy.register( - name="Alice", - email="alice@email.com", - password="supersecurepassword", - url="localhost", - port=8081 - ) - -.. note:: - By default, the role assigned to the registered user is of a Data Scientist, and the assigned privacy budget is 0. - - -Step 2: Login to the Domain as a Data Scientist -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once you have the above information you can open a ``Jupyter Notebook`` and begin ``logging`` into the domain server. - -To start you will need to install syft - -:: - - In: - - import syft as sy - -Then you can provide your login credentials by typing: - -:: - - In: - - domain = sy.login(email="____", password="____", url="____",port=8081) - - -Step 3: Explore some useful starting commands -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -As a Data Scientist, you can ``explore`` the Domain Server using the Python ``Syft`` library. - -.. note:: - We will explore more about each command in the next series of tutorials. - -:: - - In: - - # name of the domain - domain.name - - # View datasets on the domain - domain.datasets - - # View store on the domain - domain.store - -Awesome 👏 You have now successfully connected to a Domain Node !! -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -What's Next? ---------------- -Alright, now that you are connected to a Domain node, we would first like to look for the -available datasets on the public network which users can join. - - The following tutorial will show how Data Scientists can search for a dataset on the Domain Node. diff --git a/docs/source/guides/data-scientist/01-search-for-datasets.rst b/docs/source/guides/data-scientist/01-search-for-datasets.rst deleted file mode 100644 index af8b10abcd5..00000000000 --- a/docs/source/guides/data-scientist/01-search-for-datasets.rst +++ /dev/null @@ -1,169 +0,0 @@ -Search for Datasets a Domain Server -============================================================ - -**Data Scientist Tutorials** - -☑️ 00-connect-to-domain - -◻️ 01-search-for-datasets👈 - -.. note:: - **TIP:** To run this tutorial interactively in Jupyter Lab on your own machine type: - -:: - - pip install -U hagrid - hagrid quickstart data-scientist - - - -In the last tutorial, you learned :doc:`How to Connect to a Domain Server <00-deploy-domain>` -that allows us to connect to your organization’s private data servers. - -Once we are connected to the data servers, the first thing that we -would like to do is to look for the available datasets on it. This -is exactly what we are going to cover in this tutorial. - -After today’s tutorial, you will learn how to ``search for datasets`` -on the ``domain node`` you are connected to. - - **Note:** Throughout the tutorials, we mean Domain Servers - whenever we refer to Domain Node. Both point to the same and are used - interchangeably. - -Steps to Search for Datasets on a Domain ---------------------------- - -📒 Overview of this tutorial: - -#. **Login** to the Domain -#. **List** the Datasets on the Domain -#. **Choose** a Dataset -#. **Preview** the Description of the chosen Dataset - -|01-upload-data-00| - -Step 1: Import Syft -~~~~~~~~~~~~~~~~~~~ - -To utilize the privacy-enhancing features offered in PyGrid and to -communicate with your domain node, you must first ``import`` OpenMined's -``private`` deep learning library: PySyft. - -Let's import Syft by running the below cell: - -:: - - In: - # run this cell - - import syft as sy - print("Syft is imported") - - # If Syft is not installed. Please use the 🧙🏽‍♂️ Install Wizard above - - Out: Syft is imported - -Step 2: Log into Domain -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Let's login to our Domain with the credentials provided to you by a domain owner. -If you do not have a domain owner, you can create one locally for yourself following -the tutorials starting here: `data-owner/00-deploy-domain <../data-owner/00-deploy-domain.html>`_. - -To login to your Domain node, you will need to define which Domain you are logging into and who you are. -In this case, it will take the form of: - -* IP Address and Port of the domain host -* Your user account Email and Password - -.. warning:: - Make sure to use the Data Scientist credentials provided to you. - -:: - - In: - - # Modify the port, email, and password accordingly! We are using the ones that will be generated for those who followed the Data-Owner tutorials and are now here. - domain_client = sy.login( - url="localhost", - port=8081, - email="jane@email.com", - password="supersecurepassword" - ) - - Out: - Connecting to ... done! Logging into ... done! - -Amazing :) You have just logged in to your Domain and have a domain client with us to explore further. - -Step 3: Search for Datasets on the Domain -~~~~~~~~~~~~~~~~~~~~~~~ - -Now that we have an authenticated domain client with -us, we will look out for the datasets available -on this domain with the following command: - -:: - - In: - domain_client.datasets - - -|01-upload-data-01-datasets| - - -This should show you all the available datasets -on the domain node along with its own metadata for -each of the datasets. - -Step 4: Select a Dataset and Preview It -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now that we can view the available datasets, we -can fetch a dataset using the index within the -datatsets list and store a pointer to (here -called family_age_dataset) to refer to it easily afterwords. - -:: - - In: - - family_age_dataset=domain_client.datasets[0] - family_age_dataset - - -|01-upload-data-02-pointer-to-dataset| - -.. note:: - Note: We are assuming that you are following the - data-owner tutorial hence we are naming as well - as selecting the family-age dataset. Feel free to - change the variable accordingly for easier - readability based on your use case. - - -Awesome 👏 !! -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You have fetched all the available datasets, created a pointer for one of them and preview it! - -Now that we have a pointer to a dataset on the domain, we are one step -close to performing remote data science and perform various methods. - -What’s Next? ------------- -Alright, so now is the perfect time to utilize the pointer we just created -to a dataset and explore it in detail and see the amazing operations that we -can perfrom on it. - - In the following tutorial, we will see how Data Scientists can explore - a dataset securely. - -.. |01-upload-data-00| image:: ../../_static/personas-image/data-scientist/01-search-for-datasets-00.png - :width: 95% - -.. |01-upload-data-01-datasets| image:: ../../_static/personas-image/data-scientist/01-search-for-datasets-01-datasets.png - :width: 95% - -.. |01-upload-data-02-pointer-to-dataset| image:: ../../_static/personas-image/data-scientist/01-search-for-datasets-02-pointer-to-dataset.png - :width: 95% diff --git a/docs/source/guides/index.rst b/docs/source/guides/index.rst index a302342a005..7f60ef9e966 100644 --- a/docs/source/guides/index.rst +++ b/docs/source/guides/index.rst @@ -22,9 +22,6 @@ while using these new ``privacy-enhancing techniques``. **TIP:** To run all the tutorials interactively in Jupyter Lab on your own machine, type: :: - - pip install -U hagrid - hagrid quickstart Once you have the installation completed, the best place to start is by ``identifying`` your role. @@ -37,12 +34,12 @@ an ``outside party`` they may or may not ``fully trust`` has good intentions. You Will Learn ⬇️ """""""""""""""""""" -| :doc:`Part 1: Deploying your own Domain Server ` -| :doc:`Part 2: Uploading Private Data to a Domain Server ` -| :doc:`Part 3: Creating User Accounts on your Domain Server ` +| :doc:`Part 1: Deploying your own Datasite Server ` +| :doc:`Part 2: Uploading Private Data to a Datasite Server ` +| :doc:`Part 3: Creating User Accounts on your Datasite Server ` | :doc:`Part 4: Joining a Network ` | :doc:`Part 5: Creating a Network <04-create-network>` -| :doc:`Part 6: Configuring Privacy Budget on your Domain Server <05-configure-pb>` +| :doc:`Part 6: Configuring Privacy Budget on your Datasite Server <05-configure-pb>` B. Getting Started with Data Scientist 👩🏽‍🔬 @@ -53,9 +50,9 @@ specific ``question`` using one or more data owners' ``datasets``. You Will Learn ⬇️ """""""""""""""""""" -| :doc:`Part 7: Connect to a Domain` -| :doc:`Part 8: Searching for Datasets on the Domain` -| :doc:`Part 9: Exploring a Dataset in the Domain` +| :doc:`Part 7: Connect to a Datasite` +| :doc:`Part 8: Searching for Datasets on the Datasite` +| :doc:`Part 9: Exploring a Dataset in the Datasite` | :doc:`Part 10: Training a Model` | :doc:`Part 11: Retrieving Secure Results <>` diff --git a/docs/source/index.rst b/docs/source/index.rst index 4f2244cd612..3386ad515cd 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -75,7 +75,7 @@ use and development of PySyft! syft.client syft.external - syft.node + syft.server syft.serde syft.service syft.store diff --git a/docs/source/install_tutorials/have_prerequisites.rst b/docs/source/install_tutorials/have_prerequisites.rst deleted file mode 100644 index 736f909d960..00000000000 --- a/docs/source/install_tutorials/have_prerequisites.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. _have_prerequisites: - -================================== -I have all the dependencies -================================== - -.. toctree:: - :maxdepth: 3 - - -1. **Create a new env specifying the Python version (we recommend Python 3.8/3.9) in the terminal:** - - .. code-block:: bash - - conda create -n syft_env python=3.9 - conda activate syft_env - -2. **Install PySyft and Hagrid** - -To install the OpenMined stack that you need in order to deploy a node, please run: - -.. code-block:: bash - - pip install -U syft hagrid - - -PySyft is a library which contains the tools to run privacy preserving machine learning. -Hagrid is a commandline tool that speeds up the deployment of PyGrid, the provider of a peer-to-peer network of -data owners and data scientists who can collectively train AI model using Syft. - -3. **Launch the Doman Node** - -You only have one final step remaining now, before you unleash the power of Hagrid! -The final step is to launch a domain node, which is as easy as: - -.. code-block:: bash - - hagrid launch - -To stop the running domain, run: - -.. code-block:: bash - - hagrid land - -But before stopping it, you can go to ``localhost:8081`` in your `browser `_ to actually interact with the PyGrid Admin UI, where you can manage as a Data Owner your datasets, as well as incoming requests from data scientist. -You can log in using the following credentials: - -.. code-block:: python - - info@openmined.org - - - changethis - -Now you're all set up to fully start using PySyft! diff --git a/docs/source/install_tutorials/linux.rst b/docs/source/install_tutorials/linux.rst deleted file mode 100644 index 694b38be0a9..00000000000 --- a/docs/source/install_tutorials/linux.rst +++ /dev/null @@ -1,187 +0,0 @@ -.. _linux_install: - -================================== -Installation on Linux -================================== - -.. toctree:: - :maxdepth: 3 - -This documentation is to help you install and be able to deploy a Domain Node on Ubuntu Linux, with a version of ``20.04.03`` or newer, in the simplest way possible. - -.. note:: - Do you use a different distribution other than Ubuntu? Don't worry, just replace the ``apt`` & ``apt-get`` with your package manager. - -.. seealso:: - - For more advanced tutorials, such as cloud deployment, ansible, vagrant, kubernetes, or virtualbox deployment, please check - `advanced deployment documentation `__. - - - -1. **Launching a Terminal Instance** - -We will use the Linux Terminal to install all the prerequisites and launch the domain. A quick way to launch the terminal is by pressing ``Ctrl+Alt+T``. Let's go! - -2. **Installing Python 3.9** - -We'll be working with Python 3.9 or newer. To check if you have it installed, you may run: - -.. code-block:: bash - - python3 --version - -Your output should looks something like ``Python 3.x.y`` where x>=9. - -If you don't have the correct version of Python, installing it is as easy as running the following: - -.. code-block:: bash - - sudo apt update - sudo apt install python3.9 - python3 --version - -3. **Installing and using Pip** - -`Pip `__ is the most widely used package installer for Python and will help us to install the required dependencies MUCH easier. -You can install it by running the following: - -.. code-block:: bash - - python -m ensurepip --upgrade - -If you already have it installed, you can check to make sure it's the latest version by running: - -.. code-block:: bash - - python -m pip install --upgrade pip - -Your output should looks something like ``Requirement already satisfied: pip in ``. - -4. **Conda and setting up a virtual environment** - -Conda is a package manager that helps you to easily install a lot of data science and machine learning packages, but also to create a separated environment when a certain set of dependencies need to be installed. -To install Conda, you can: - -a. Download the `Anaconda installer `__. - -b. Run the following code, modifying it depending on where you downloaded the installer (e.g. `~/Downloads/`): - - .. code-block:: bash - - bash ~/Downloads/Anaconda3-2020.02-Linux-x86_64.sh - - .. note:: - - Please note that the naming might be different given it could be a newer version of Anaconda. - -c. Create a new env specifying the Python version (we recommend Python 3.8/3.9) in the terminal: - - .. code-block:: bash - - conda create -n syft_env python=3.9 - conda activate syft_env - - -d. To exit, you can run: - - .. code-block:: bash - - conda deactivate - -5. **Install Jupyter Notebook** - -A very convenient way to interact with a deployed node is via Python, using a Jupyter Notebook. You can install it by running: - -.. code-block:: bash - - pip install jupyterlab - -If you encounter issues, you can also install it using Conda: - -.. code-block:: bash - - conda install -c conda-forge notebook - -To launch the Jupyter Notebook, you can run the following in your terminal: - -.. code-block:: bash - - jupyter notebook - -6. **Installing and configuring Docker** - -`Docker `__ is a framework which allows us to separate the infrastructure needed to run PySyft in an isolated environment called a ``container`` which you can use off the shelf, without many concerns. -If it sounds complicated, please don't worry, we will walk you through all steps, and you'll be done in no time! -Additionally, we will also use `Docker Composite V2 `_, which allows us to run multi-container applications. - - -a. Install **Docker**: - - .. code-block:: bash - - sudo apt-get upgrade docker & docker run hello-world - -b. Install **Docker Composite V2** as described `here `__. - -c. Run the below command to verify the install: - - .. code-block:: bash - - docker compose version - - You should see somthing like ``Docker Compose version 2.x.y`` in the output when runnning the above command. - -d. If you see something else, go through the `instructions here `__ or if you are using Linux, you can try to do: - - .. code-block:: bash - - mkdir -p ~/.docker/cli-plugins - curl -sSL https://github.com/docker/compose/releases/download/v2.2.3/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose - chmod +x ~/.docker/cli-plugins/docker-compose - -e. Also, make sure you can run without sudo: - - .. code-block:: bash - - echo $USER //(should return your username) - sudo usermod -aG docker $USER - -6. **Install PySyft and Hagrid** - -The hardest part is done! To install the OpenMined stack that you need in order to deploy a node, please run: - -.. code-block:: bash - - pip install -U syft hagrid - - -PySyft is a library which contains the tools to run privacy preserving machine learning. -Hagrid is a commandline tool that speeds up the deployment of PyGrid, the provider of a peer-to-peer network of -data owners and data scientists who can collectively train AI model using Syft. - -7. **Launch the Domain Node** - -Congrats for making it this far! You only have one final step remaining, before you unleash the power of Hagrid! -The final step is to launch a domain node, which is as easy as: - -.. code-block:: bash - - hagrid launch - -To stop the running domain, run: - -.. code-block:: bash - - hagrid land - -But before stopping it, you can go to ``localhost:8081`` in your `browser `_ to actually interact with the PyGrid Admin UI, where you can manage as a Data Owner your datasets, as well as incoming requests from data scientist. -You can log in using the following credentials: - -.. code-block:: python - - info@openmined.org - - changethis - -Now you're all set up to fully start using PySyft! diff --git a/docs/source/install_tutorials/osx_11_5_1.rst b/docs/source/install_tutorials/osx_11_5_1.rst deleted file mode 100644 index d7af44e999f..00000000000 --- a/docs/source/install_tutorials/osx_11_5_1.rst +++ /dev/null @@ -1,466 +0,0 @@ -.. _macOS_install: - -================================= -macOS Tutorial (Big Sur - 11.5.1) -================================= - -Welcome to the beginner installation tutorial for domain deployment on your personal macOS machine! - -If your macOS machine runs on M1, follow accurately the special steps listed for your machine in the tutorial. - -Step 1: Double check macOS version (optional) -============================================= -Before you start this tutorial, let's make sure you're running the right version of -macOS. Click the Apple logo at the top left corner, then click "About this Mac" and you'll -see something like: - -|find_osx_version| - -See where this image says "11.5.1"? Yours should say the same! If it does, then you're -ready to begin! - - -Step 2: Open Terminal -===================== - -Almost every step of this tutorial will be conducted within the Terminal app of macOS. Start by -opening up the Terminal application by typing and typing "Terminal". Then hit . -When Terminal opens, it should look something like this (colors may differ). - -|osx_terminal| - -If you see something like this (again... colors my differ), then you're all set to proceed to the next step! - -Step 3: Install Conda -===================== - -(These steps are from https://docs.anaconda.com/anaconda/install/mac-os/ and are copied here -for your convenience and clarity. If any part of your installation doesn't work, please fall -back on the official documentation page.). - -* Step 3.1: Open the Anaconda Installer download page by clicking `here `__. -* Step 3.2: Find the big green "Download" button and click it. It looks like this: - - |conda_button| - -* Step 3.3: When prompted with the download, click 'Save' (saving to your Desktop is fine) - - |click_save| - -* Step 3.4: Navigate to where you saved the file (probably either your Desktop or Downloads folder), and double click the icon. - - When you do so, you might see a warning like the following: - - |conda_icon| - - If so, just click 'Allow' and then you'll see a screen like: - - |conda_install_1| - -* Step 3.5: Click "Continue" and you'll see a screen like this: - - |conda_install_2| - -* Step 3.6: Click "Continue" and you'll see a screen like this: - - |conda_install_3| - -* Step 3.6: Click "Continue" and you'll see a screen like this: - - |conda_install_4| - -* Step 3.6: Click "Accept" and you'll see a screen like this: - - |conda_install_5| - -* Step 3.6: Click "Install" and you'll see a screen like this: - - |conda_install_6| - - After a moment or two a popup will appear like this: - - |conda_install_6_popup| - - Click "OK" and keep waiting... - - After a moment or two a popup *might* appear like this: - - - - Click "OK" and keep waiting... - - While you wait... if you see a dialog like this... - - |conda_install_6_popup_already_installed| - - Then you already have conda installed. Click "OK" and then click "Continue" - until the installation dialog finishes (It'll tell you the installation "Failed" - but that's only because you already have conda installed. ) and then proceed to - Step 4 of this tutorial. - - If, however, you didn't get a warning saying that conda was already installed, - proceed to step 3.7. - -* Step 3.7: Keep waiting until the window changes to this: - - |conda_install_7| - -* Step 3.8: Click "Continue" and you'll see a screen like this: - - - - CONGRATULATIONS!!!! You installed Anaconda!!! You may click the "Close" button and - proceed to Step 4. - -Step 4: Activate Conda Environment -================================== - -* Step 4.1: If you have the 'Terminal' app open from Step 2, quit it (CMD-Q) and -re-open it using the same technique you used in Step 2 to open the application. - (This is to ensure that Terminal is aware of your new conda installation.) - -* Step 4.2: Check to make sure conda is properly installed - - In your freshly opened Terminal window, type the following: - - .. code-block:: bash - - conda --v - - This should print something like "conda 4.10.1". If instead says "conda not found", - return to Step 3 and re-install conda. - -* Step 4.3: Update Conda - - .. code-block:: bash - - conda update conda --y - -* Step 4.4: Create conda virtual environment with Python 3.9 - - .. code-block:: bash - - conda create -n syft_env python=3.9 --y - -* Step 4.5: Activate conda environment - - .. code-block:: bash - - conda activate syft_env - - When you run this command, you'll see the word 'syft_env' in your terminal to indicate that you're - now in the syft virtual environment. For the rest of this tutorial, enter all of your commands - into this particular terminal. If ever you close this window, when you re-open a new Terminal - window, just re-run this step (4.5) and you'll be ready to start again! - - -Step 5: Install Necessary Python Packages -========================================= - -* Step 5.0: If you closed your Terminal window since Step 4, open a new Terminal application window and run the following. - - .. code-block:: bash - - conda activate syft_env - - If your Terminal window is still open from Step 4, you can skip this step and proceed directly to step 5.1. - -* Step 5.1: Update Pip - - Within our virtual environment, we're going to use the 'pip' package manager to install all of our - necessary python libraries. But before we do, we need to make sure we're running the latest version of pip. - You can do so by running the following command. - - .. code-block:: bash - - pip install --upgrade pip - -* Step 5.2: Install Jupyter Lab - - .. code-block:: bash - - pip install jupyterlab - - If you encounter an error when running this command, try the following instead: - - .. code-block:: bash - - conda install -c conda-forge jupyterlab - -* Step 5.3: Confirm you have git installed - - For the python package in step 5,4, you'll need to have git installed. - Most modern macOS machines come with git already installed, but if the following - command doesn't work for you... - - .. code-block:: bash - - git --version - - ...then follow git's installation instructions for macOS here: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git - -* Step 5.4: Install Hagrid - - .. code-block:: bash - - pip install -U hagrid - -* Step 5.5: Install Syft - - .. code-block:: bash - - pip install -U syft - - -Step 6: Install Docker -====================== - -* Step 6.0: If you are using Apple MacOS M1 device, install Rosetta2 prior to installing Docker: - - .. code-block:: bash - - softwareupdate --install-rosetta - -* Step 6.1: Open the macOS Docker Install Page: https://docs.docker.com/desktop/mac/install/ - - |docker_install_1| - -* Step 6.2: Click whichever button corresponds to the chip in your macOS ('Mac with Intel chip' if you're not sure). When you do so you'll see something that looks like this: - - |docker_install_2| - -* Step 6.3: Once you hit Save, Docker.dmg will save onto your hard disk (likely in Desktop or Downloads). Find it and double click it. - - |docker_install_3| - -* Step 6.4: Once you double clicked Docker.dmg, a window should come up that looks like: - - |docker_install_4| - -* Step 6.5: In the window that appeared, drag the Docker logo into the Applications folder. A dialog might appear which takes a few minutes to disappear as files are transferred. - -* Step 6.6: Once the dialog closes, find the 'Docker' application in your Applications folder: - - |docker_install_5| - -* Step 6.7: When you double-click it, you'll see a dialog like the following: - - - -* Step 6.8: Click "Open" and after a few moments the following screen will appear: - - |docker_install_6| - -* Step 6.9: The yellow color in the bottom right means Docker is still booting. Wait until it's green to proceed. It will look like: - - |docker_install_12| - - Do not close docker. Proceed to the next step. - - -Step 7: Increase the RAM Docker uses to 8GB -=========================================== - -* Step 7.0: If the Docker window has been closed, look at teh top bar of your screen on the right for a small whale logo that looks like this: - - |docker_logo| - - Click the logo and then click "Dashboard" to bring up the Docker window you may recognize from Step 6. - - |docker_install_7| - -* Step 7.1: Click the Gear icon in the top right corner of the Docker window and you'll see a screen like so: - - |docker_install_8| - -* Step 7.2: Click "Resources" and the window will change to: - - |docker_install_9| - -* Step 7.3: Drag the small blue circle next to "Memory" until 8GB of memory have been allocated. Your window will look like this. - - |docker_install_10| - -* Step 7.4: Click "Apply & Restart" and then wait until the bottom left tab returns from Yellow to Green. - - - -* Step 7.5: You are done! You may now proceed back to the main Docker dashboard by clicking the "X" in the top right corner of the docker window, taking you to a dashboard that looks like: - - |docker_install_11| - -Congratulations! You're now fully installed and ready to go!!! You may now close your Terminal! - -Step 8: Test Hagrid (optional) -============================== - -* Step 8.1: Launch Hagrid - - Just to make sure our installation is correct and working. Open a new terminal and run the following: - - .. code-block:: bash - - conda activate syft_env - hagrid launch test - - Wait several minutes. You should see LOTS of logging. The logging will occasionally hang during downloads. If your - internet is slow you'll need to be patient. The logging should eventually stop with the message "Application startup complete." - - |hagrid_startup_complete| - - You can then load "http://localhost:8081" to see a UI deployed which looks like: - - |pygrid_ui| - - Congratulations! Looks like everything was installed properly! - -* Step 8.2: Launch Juptyer Lab - - With hagrid still running, open a new terminal (Command-N if you have Terminal selected) and run the following: - - .. code-block:: bash - - conda activate syft_env - jupyter lab - - A new browser window should open up. - - |syft_1| - -* Step 8.3: Open a new Jupyter Notebook by clicking the "Python 3" square icon (with the python logo). The window will change to: - - |syft_2| - -* Step 8.4: Enter the following code into the top cell and then hit "Shift Enter". - - - .. code-block:: python - - import syft as sy - domain = sy.login(email="info@openmined.org", password="changethis", port=8081) - - - After typing , you should see the following output (or something similar): - - |syft_3| - - And if so, Congratulations!!! You're 100% setup and we've tested to make sure! - -* Step 8.5: Close Jupyter Lab - - Close the jupyter lab browser tab. Then find the terminal window where we ran 'jupyter lab' and close the terminal window. If - a dialog box pops up saying "Do you want to terminate running processes in this window?", click "Terminate" - -* Step 8.6: Land Hagrid - - Open a new terminal window and run: - - .. code-block:: bash - - conda activate syft_env - hagrid land test - conda deactivate syft_env - -Well done! - -.. |osx_terminal| image:: ../_static/install_tutorials/osx_terminal.png - :width: 50% - -.. |find_osx_version| image:: ../_static/install_tutorials/find_osx_version.png - :width: 50% - -.. |conda_button| image:: ../_static/install_tutorials/conda_button.png - :width: 50% - -.. |click_save| image:: ../_static/install_tutorials/click_save.png - :width: 50% - -.. |conda_icon| image:: ../_static/install_tutorials/conda_icon.png - :width: 50% - -.. |conda_install_1| image:: ../_static/install_tutorials/conda_install_1.png - :width: 50% - -.. |conda_install_2| image:: ../_static/install_tutorials/conda_install_2.png - :width: 50% - -.. |conda_install_3| image:: ../_static/install_tutorials/conda_install_3.png - :width: 50% - -.. |conda_install_4| image:: ../_static/install_tutorials/conda_install_4.png - :width: 50% - -.. |conda_install_5| image:: ../_static/install_tutorials/conda_install_5.png - :width: 50% - -.. |conda_install_6| image:: ../_static/install_tutorials/conda_install_6.png - :width: 50% - -.. |conda_install_6_popup| image:: ../_static/install_tutorials/conda_install_6_popup.png - :width: 50% - -.. |conda_install_6_popup_already_installed| image:: ../_static/install_tutorials/conda_install_6_popup_already_installed.png - :width: 50% - -.. |conda_install_6_popup_access| image:: ../_static/install_tutorials/conda_install_6_popup_access.png - :width: 50% - -.. |conda_install_7| image:: ../_static/install_tutorials/conda_install_7.png - :width: 50% - -.. |conda_install_8| image:: ../_static/install_tutorials/conda_install_8.png - :width: 50% - -.. |docker_install_1| image:: ../_static/install_tutorials/docker_install_1.png - :width: 50% - -.. |docker_install_2| image:: ../_static/install_tutorials/docker_install_2.png - :width: 50% - -.. |docker_install_3| image:: ../_static/install_tutorials/docker_install_3.png - :width: 50% - -.. |docker_install_4| image:: ../_static/install_tutorials/docker_install_4.png - :width: 50% - -.. |docker_install_5| image:: ../_static/install_tutorials/docker_install_5.png - :width: 50% - -.. |docker_install_6| image:: ../_static/install_tutorials/docker_install_6.png - :width: 50% - -.. |docker_install_7| image:: ../_static/install_tutorials/docker_install_7.png - :width: 50% - -.. |docker_install_8| image:: ../_static/install_tutorials/docker_install_8.png - :width: 50% - -.. |docker_install_9| image:: ../_static/install_tutorials/docker_install_9.png - :width: 50% - -.. |docker_install_10| image:: ../_static/install_tutorials/docker_install_10.png - :width: 50% - -.. |docker_install_11| image:: ../_static/install_tutorials/docker_install_11.png - :width: 50% - -.. |docker_install_12| image:: ../_static/install_tutorials/docker_install_12.png - :width: 50% - -.. |docker_logo| image:: ../_static/install_tutorials/docker_logo.png - :width: 50% - -.. |hagrid_startup_complete| image:: ../_static/install_tutorials/hagrid_startup_complete.png - :width: 50% - -.. |pygrid_ui| image:: ../_static/install_tutorials/pygrid_ui.png - :width: 50% - -.. |syft_1| image:: ../_static/install_tutorials/syft_1.png - :width: 50% - -.. |syft_2| image:: ../_static/install_tutorials/syft_2.png - :width: 50% - -.. |syft_3| image:: ../_static/install_tutorials/syft_3.png - :width: 50% diff --git a/docs/source/install_tutorials/overview.rst b/docs/source/install_tutorials/overview.rst deleted file mode 100644 index 895ff982cee..00000000000 --- a/docs/source/install_tutorials/overview.rst +++ /dev/null @@ -1,103 +0,0 @@ -Beginner-level PySyft and PyGrid Installation Tutorials -******************************************************* - -.. toctree:: - :maxdepth: 3 - -Welcome to the domain deployment installation tutorials! -This section of our documentation is designed to be the -simplest way to get you started deploying a PyGrid Domain -to an OSX, Linux, or Windows machine and interacting with it -as a data scientist using PySyft. If you're looking -for cloud deployment, or more advanced tutorials such as -ansible, vagrant, kubernetes, or virtualbox deployment, please see the -`advanced deployment documentation `__. - -The purpose of these tutorials is to help you install everything -you need to run a Domain node from your personal machine (such -as if you're running through OpenMined -`courses `__ -or -`tutorials `__). -To that end, we will also be installing everything you might need to run Jupyter -notebooks with PySyft installed, such as if you're pretending to be -both Data Owner and Data Scientist as a part of a tutorial or course. - -Step 1: Are you on OSX, Windows, or Linux? -========================================== - -Installation differs greatly depending on whether your personal machine is -running OSX, Linux, or Windows. PySyft and PyGrid are relatively new pieces -of software so not all versions of these are supported. However, the first -step of your journey is to figure out which operating system you are running -and choose the right tutorial for installation. Then within the dropdowns below, -choose which version is right for you. Once you've found the right version, -and completed the tutorial for that version, you'll be all done!!! Good luck! - -There are 3 types of operating systems for you to choose from: OSX, Linux, and Windows. - -OSX Tutorials -~~~~~~~~~~~~~ - -If you know you're running OSX but you're not sure what version you're running, -click the Apple logo at the top left corner, then click "About this Mac" and you'll -see something like: - -|find_osx_version| - -See where this image says "11.5.1"? Figure out what number yours says in that place -and use that number to determine which of these installation tutorials you should -follow to complete your installation. If you don't see your number, choose the -closest that you can. - -#. `Big Sur (11.5.1) `__. - -Linux Tutorials -~~~~~~~~~~~~~~~ - -If you know that you're running Linux but you're not sure what version you're running, -open up a command line and type: - -.. code-block:: bash - - $ lsb_release -a - -Which should print something like the following: - -|find_ubuntu_version| - -See where this image says "20.04.3"? Figure out what number yours says in that place - -#. `Ubuntu (20.04.3 - Focal Fossa) `__. - -Windows Tutorials -~~~~~~~~~~~~~~~~~ - -If you know that you're running Windows but you're not sure what version you're running, -press (Windows Key + R) and then in the text box that appears type: - -.. code-block:: bash - - $ winver - -and hit (Enter)! This should print something like the following: - -|find_windows_version| - -See where this image says "Windows 10" and "20H2"? Figure out what numbers yours says in those place -and use those number to determine which of these installation tutorials you should -follow to complete your installation. If you don't see one of your numbers, choose the -closest that you can. - -#. `Windows 10 (20H2) `__. - -Best of luck on your journey! - -.. |find_osx_version| image:: ../_static/install_tutorials/find_osx_version.png - :width: 50% - -.. |find_ubuntu_version| image:: ../_static/install_tutorials/find_ubuntu_version.png - :width: 50% - -.. |find_windows_version| image:: ../_static/install_tutorials/find_windows_version.png - :width: 50% diff --git a/docs/source/install_tutorials/windows.rst b/docs/source/install_tutorials/windows.rst deleted file mode 100644 index 708ebca4b4b..00000000000 --- a/docs/source/install_tutorials/windows.rst +++ /dev/null @@ -1,207 +0,0 @@ -.. _windows_install: - -================= -Windows Tutorials -================= - -The following instructions are for Windows 10 version 2004 or higher. - -Now, traditionally, getting things as big and imposing as PySyft to work on Windows is... really, really challenging. -Luckily for us, we've got a few tricks up our sleeves to make the process super easy. - -So sit back, relax, grab a few cookies, and *enjoy!* - -Step 1: Enabling WSL2 -===================== - -Our first and most important step is going to be to enable the Windows Subsystem for Linux (WSL). -This lets you run a Linux-based environment (including most command line tools and applications!) directly on Windows, -unmodified, and without any of the drawbacks of more traditional solutions like virtual machines or dual-booting. - - -Installing this incredible piece of software is as easy as opening PowerShell or Command Prompt in the Start Menu, and entering:: - - wsl --install - -And that's it! It'll start installing all the dependencies and getting things in order. -If you run into any issues here, please refer to `this link `_, which covers common WSL installation issues. - -.. Specifying an alternate way to install wsl along with distro from microsoft store start -**Alternate way** -================= - -**Install WSL from Microsoft Store** -If the command line has you feeling confused, fear not! There's a more user-friendly approach to installing WSL on Windows. We can bypass the command line altogether and download a package of all the components from the Microsoft Store. Not only that, but this method runs WSL isolated from Windows 11 and updates will be available through the Microsoft Store, so you won't have to wait for the next version of the operating system to install the newest version. - -To install WSL from the Microsoft Store, use these steps: - - -1. Enable Virtual Machine Platform -================================== - - - Open **Start** - - Search for **Turn Windows Features on or off** and click the - top result to open the app - - Check the **Virtual Machine Platform** - - Click the **OK** button - - Click the **Restart button** - -After completing these steps, you can download the app from the Microsoft Store. - - 2. Install Windows Subsystem for Linux app - ========================================== - -- Open the `Windows Subsystem for Linux Store Page `_ -- Click the **Get** button -- Click the **Open** button -- Click the **Get** button again - - 3. Install Linux Distro - ======================= -- Open **Microsoft Store** app. -- Search for Linux distro. For example `Ubuntu `_` -- Click the **Get** button. -- Click the **Open** button. - -*Congratulations! Once you complete the steps, WSL will install on Windows 11, including the support for Linux GUI apps and the Linux distribution.* - -*To access the command line for your Linux distribution, search for "wsl" in the search bar and select the top result, which should be a penguin logo* - - .. end - -Step 2: Setting up Linux User Info -================================== - -Well done! You've *almost* got an entire Linux kernel and distribution on your machine, and you did this with **barely one line of code!** -There's just one last step needed. And luckily for us, it's an easy one... - -We now have to add a new User to our brand new and shiny Linux distro. To do this, we'll have to pick a username and password. -Please note- this account, this password- doesn't have any relation with your regular Windows username or password. It's specific to the Linux -distro that you just installed. - -Once you provide a username and password, **congratulations!** You have a fully fledged Linux distro. You may not have realized it, but you've just unlocked -a whole new universe of possibilities and interesting tools. - -Step 3: Updating & Upgrading -============================ - -Now that you have a shiny new copy of Linux, your next step will be to update and upgrade it. -This is pretty easy to do in Linux, and it's something we can do with *just one command!* - -In your new Ubuntu terminal, enter the following command:: - - sudo apt update && sudo apt upgrade - -You might need to enter the password of the account you created in Step 2. You might also need to press Y and hit enter to allow the updates. -But you're on a roll- nothing will stop you from getting the most up-to-date, and secure version of your Linux distro! - -Note: We'd actually recommend doing this reasonably often (once every few days) to maintain a safe and up-to-date distro. - -Optional: Installing Windows Terminal -===================================== - -We'd recommend installing the Windows Terminal, and using that to launch your Linux Distribution instead of PowerShell, Command Prompt, or the default -Ubuntu shell that comes bundled in. - -This isn't strictly necessary, but it doesn't take too long, improves the command line experience, and will probably make you happier. - -Please go `here `_ if you're interested. - -Step 4: Installing Conda -======================== - -Wow! We've made it pretty far together in a pretty short amount of time. - -We've already installed a Linux distribution, (and if you followed the Optional step, have a *swanky* new terminal!) and we're getting *really* close to installing our software. -Our next step is an important one. It'll help us make sure our software can install without any conflicts, and once installed, that it will be stable, and work as intended! - -We're going to use a tool called Anaconda to do this. It'll help us create something called a "Virtual Environment." - -To install Anaconda, please follow the yellow brick road I lay down here below: - -- `Head to the Anaconda website `_, and find the latest Linux installer. -- Right click the installer, and select **"Copy Link Address"** -- Head back to your WSL terminal, and type "wget " and then right click next to it. This should paste the link you copied, which should produce something like:: - - wget https://repo.anaconda.com/archive/Anaconda3-2022.05-Linux-x86_64.sh - -- You got it! Not only did you get it, you made it look **easy.** Now just hit enter. -- At this point, Conda will start installing. Type "yes" and hit Enter for all the various prompts that follow (Accepting the Terms and Conditions, Running Conda Init, etc) -- Once this is done, close and restart your WSL terminal. -- Once restarted, verify that conda is working using the following command:: - - conda env list - -Wait wait wait wait just a second. -Do you realize what just happened? - -You've just successfully installed Anaconda!! Hooray! -Trust me, your life is about to become a LOT easier. - - -- Let's now tap into your newfound powers with Anaconda and create a new virtual environment called "syft_env" by running the following in your WSL shell:: - - conda create -n syft_env python=3.9 --y - -- Let's verify that we created our "syft_env" successfully with the following command (Deja Vu, anyone?):: - - conda env list - -- You should see two environments in the output. Hooray! Now let's activate the syft virtual env, and let the fun *really* begin:: - - conda activate syft_env - -- Now let's use it to conveniently install a few packages:: - - sudo apt install python3-pip - pip3 install pandas matplotlib numpy - pip3 install jupyterlab - -- If the last command fails, try the following instead:: - - conda install -c conda-forge jupyterlab - - -Step 5: Become the Docker Doctor -================================ - -The last tool needed to complete your arsenal is called Docker. -You can install it by following the instructions `here `_. - -Note: The windows user account that launches wsl 2 has to be added to the local group "docker-users". On Windows 10 Home, run netplwiz to add the Windows user to the group "docker-users". - -Once you have it running, you just have to ensure the following: -- You've allocated a sufficient amount of RAM (we recommend atleast 8GB, but you can get by with less) -- You're using the WSL2 backend - -Congratulations, you have reached the end of your journey. Now it is time for your **ultimate test!** Deploying a domain node. - -Note that your ultimate test is **optional**- you can do this part later. - - -Step 6: Install Hagrid and PySyft -================================= - -- With the power of WSL and Anaconda, installing our software is as easy as:: - - pip3 install syft - pip3 install hagrid - - -Optional: Deploy a Domain Node! -=============================== - -Everything we've done so far has been to make this next part as easy as possible. This is the moment we've all been waiting for. - -To launch a domain node called "test_domain", ensure your Virtual Environment ("syft_env" in the steps above) is active, that Docker Desktop is running, and run the command below on your WSL terminal:: - - hagrid launch test_domain - -Note: If you get the error message "test_domain is not valid for node_type please use one of the following options: ['domain', 'network']" then rerun the command by changing test_domain to domain. - -You should see the containers begin to appear on Docker! - -**CONGRATULATIONS!!!** - -You have reached the promise land. You're ready to begin remote data science. -It was a pleasure walking you through the installation process. Now be sure to use your newfound powers and abilities for good! diff --git a/justfile b/justfile new file mode 100644 index 00000000000..ed7a5c6bb6d --- /dev/null +++ b/justfile @@ -0,0 +1,550 @@ +# Rules for new commands +# - Start with a verb +# - Keep it short (max. 3 words) +# - Group commands by context. Include group name in the command name. +# - Mark things private that are util functions with [private] or _var +# - Don't over-engineer, keep it simple. +# - Don't break existing commands + +set dotenv-load + +# --------------------------------------------------------------------------------------------------------------------- +# K3D cluster names +# Note: These are private (_ prefix) because we don't want it to be editable from CLI. +_name_default := "syft-dev" +_name_high := "syft-high" +_name_low := "syft-low" +_name_gw := "syft-gw" +_name_signoz := "signoz" + +# K3D Registry name is used only in k3d. +_name_registry := "registry.localhost" + +# Kubernetes namespaces for the deployments +# Note: These are private (_ prefix) because we don't want it to be editable from CLI. +_ns_default := "syft" +_ns_high := "high" +_ns_low := "low" +_ns_gw := "gw" + +# Kubernetes context names generated for the K3D clusters +# Note: These are private (_ prefix) because we don't want it to be editable from CLI. +_ctx_default := "k3d-" + _name_default +_ctx_high := "k3d-" + _name_high +_ctx_low := "k3d-" + _name_low +_ctx_gw := "k3d-" + _name_gw +_ctx_signoz := "k3d-" + _name_signoz + +# --------------------------------------------------------------------------------------------------------------------- + +# Static Ports for the clusters +port_default := "8080" +port_high := port_default +port_low := "8081" +port_gw := "8082" +port_signoz_ui := "3301" +port_signoz_otel := "4317" +port_registry := "5800" + +# Registry URL is used for +# - setting up the registry for k3d clusters +# - setting up the --var CONTAINER_REGISTRY for devspace deployments +# Note: Do not add http:// or https:// prefix +registry_url := "k3d-" + _name_registry + ":" + port_registry + +# Signoz OTel endpoint is used for setting up the Otel collector +signoz_otel_url := "http://host.k3d.internal:" + port_signoz_otel + +# --------------------------------------------------------------------------------------------------------------------- +# devspace profiles (comma-separated) +profiles := "" + +# enable tracing by adding "tracing" profile in devspace +tracing := "true" + +# add tracing profile if enabled +# This is private ( _prefix) to have a simple `just tracing=true ...` +_g_profiles := if tracing == "true" { profiles + ",tracing" } else { profiles } + +# --------------------------------------------------------------------------------------------------------------------- +# this might break if you have alias python = python3 or either of the executable not pointing to the correct one +# just fix your system instead of making of fixing this +python_path := `which python || which python3` + +# --------------------------------------------------------------------------------------------------------------------- + +@default: + just --list + +# --------------------------------------------------------------------------------------------------------------------- + +# Start a local registry on http://k3d-registry.local:5800 (port_registry=5800 or registry_url="gcr.io/path/to/registry") +[group('registry')] +start-registry: + k3d --version + @-docker volume create k3d-registry-vol + @-k3d registry create {{ _name_registry }} --port {{ port_registry }} -v k3d-registry-vol:/var/lib/registry --no-help + + if ! grep -q {{ _name_registry }} /etc/hosts; then \ + sudo {{ python_path }} scripts/patch_hosts.py --add-k3d-registry --fix-docker-hosts; \ + fi + + @curl --silent --retry 5 --retry-all-errors http://{{ registry_url }}/v2/_catalog | jq + @echo "\033[1;32mRegistring running at http://{{ registry_url }}\033[0m" + +[group('registry')] +delete-registry: + -k3d registry delete {{ _name_registry }} + -docker volume rm k3d-registry-vol + +# --------------------------------------------------------------------------------------------------------------------- + +# Launch a Datasite high-side cluster on http://localhost:8080 (port_high=8080) +[group('highside')] +start-high: (create-cluster _name_high port_high) + +# Stop the Datasite high-side cluster +[group('highside')] +delete-high: (delete-cluster _name_high) + +# Deploy Syft to the high-side cluster +[group('highside')] +deploy-high: (deploy-devspace _ctx_high _ns_default) + +# Reset Syft DB state in the high-side cluster +[group('highside')] +reset-high: (reset-syft _ctx_high _ns_default) + +# Remove namespace from the high-side cluster +[group('highside')] +cleanup-high: (yank-ns _ctx_high _ns_default) + +[group('highside')] +wait-high: (wait-pods _ctx_high _ns_default) + +# K9s into the Datasite High cluster +[group('highside')] +k9s-high: + k9s --context {{ _ctx_high }} + +# --------------------------------------------------------------------------------------------------------------------- + +# Launch a Datasite low-side cluster on http://localhost:8081 (port_low=8081) +[group('lowside')] +start-low: (create-cluster _name_low port_low) + +# Stop the Datasite low-side cluster +[group('lowside')] +delete-low: (delete-cluster _name_low) + +# Deploy Syft to the low-side cluster +[group('lowside')] +deploy-low: (deploy-devspace _ctx_low _ns_default "-p datasite-low") + +# Reset Syft DB state in the low-side cluster +[group('lowside')] +reset-low: (reset-syft _ctx_low _ns_default) + +# Remove namespace from the low-side cluster +[group('lowside')] +cleanup-low: (yank-ns _ctx_low _ns_default) + +[group('lowside')] +wait-low: (wait-pods _ctx_low _ns_default) + +# K9s into the Datesite Low cluster +[group('lowside')] +k9s-low: + k9s --context {{ _ctx_low }} + +# --------------------------------------------------------------------------------------------------------------------- + +# Launch a Gateway cluster on http://localhost:8083 (port_gw=8083) +[group('gateway')] +start-gw: (create-cluster _name_gw port_gw) + +# Delete the Gateway cluster +[group('gateway')] +delete-gw: (delete-cluster _name_gw) + +# Deploy Syft to the gateway cluster +[group('gateway')] +deploy-gw: (deploy-devspace _ctx_gw _ns_default "-p gateway") + +# Reset Syft DB state in the gateway cluster +[group('gateway')] +reset-gw: (reset-syft _ctx_gw _ns_default) + +# Remove namespace from the gateway cluster +[group('gateway')] +cleanup-gw: (yank-ns _ctx_gw _ns_default) + +[group('gateway')] +wait-gw: (wait-pods _ctx_gw _ns_default) + +# K9s into the Gateway cluster +[group('gateway')] +k9s-gw: + k9s --context {{ _ctx_gw }} + +# --------------------------------------------------------------------------------------------------------------------- + +# Launch SigNoz. UI=http://localhost:3301 OTEL=http://localhost:4317 (port_signoz_ui=3301 port_signoz_otel=4317) +[group('signoz')] +start-signoz: && (apply-signoz _ctx_signoz) (setup-signoz _ctx_signoz) + k3d cluster create {{ _name_signoz }} \ + --port {{ port_signoz_ui }}:3301@loadbalancer \ + --port {{ port_signoz_otel }}:4317@loadbalancer \ + --k3s-arg "--disable=metrics-server@server:*" + +# Remove SigNoz from the cluster +[group('signoz')] +delete-signoz: (delete-cluster _name_signoz) + +# Remove all SigNoz data without deleting +[group('signoz')] +reset-signoz: + @kubectl exec --context {{ _ctx_signoz }} -n platform chi-signoz-clickhouse-cluster-0-0-0 --container clickhouse -- \ + clickhouse-client --multiline --multiquery "\ + TRUNCATE TABLE signoz_analytics.rule_state_history_v0; \ + TRUNCATE TABLE signoz_logs.logs_v2; \ + TRUNCATE TABLE signoz_logs.logs; \ + TRUNCATE TABLE signoz_logs.usage; \ + TRUNCATE TABLE signoz_metrics.usage; \ + TRUNCATE TABLE signoz_traces.durationSort; \ + TRUNCATE TABLE signoz_traces.signoz_error_index_v2; \ + TRUNCATE TABLE signoz_traces.signoz_index_v2; \ + TRUNCATE TABLE signoz_traces.signoz_spans; \ + TRUNCATE TABLE signoz_traces.top_level_operations; \ + TRUNCATE TABLE signoz_traces.usage_explorer; \ + TRUNCATE TABLE signoz_traces.usage;" + + @echo "Done. Traces & logs are cleared, but graphs may still show old content." + +# K9s into the Signoz cluster +[group('signoz')] +k9s-signoz: + k9s --context {{ _ctx_signoz }} + +[group('signoz')] +[private] +apply-collector kube_context: + @echo "Installing SigNoz OTel Collector in kubernetes context {{ kube_context }}" + helm install k8s-infra k8s-infra \ + --repo https://charts.signoz.io \ + --kube-context {{ kube_context }} \ + --set global.deploymentEnvironment=local \ + --set clusterName={{ kube_context }} \ + --set otelCollectorEndpoint={{ signoz_otel_url }} \ + --set otelInsecure=true \ + --set presets.otlpExporter.enabled=true \ + --set presets.loggingExporter.enabled=true + +# Remove SigNoz from the cluster +[group('signoz')] +delete-collector: + helm uninstall k8s-infra + +[group('signoz')] +[private] +apply-signoz kube_context: + @echo "Installing SigNoz in kube context {{ kube_context }}" + helm install signoz signoz \ + --repo https://charts.signoz.io \ + --kube-context {{ kube_context }} \ + --namespace platform \ + --create-namespace \ + --version 0.52.0 \ + --set frontend.service.type=LoadBalancer \ + --set otelCollector.service.type=LoadBalancer \ + --set otelCollectorMetrics.service.type=LoadBalancer + +[group('signoz')] +[private] +setup-signoz kube_context: + #!/bin/bash + set -euo pipefail + + SIGNOZ_URL="http://localhost:3301" + USERNAME="admin@localhost" + PASSWORD="password" + DASHBOARDS=( + "https://raw.githubusercontent.com/SigNoz/dashboards/refs/heads/main/k8s-infra-metrics/kubernetes-pod-metrics-detailed.json" + "https://raw.githubusercontent.com/SigNoz/dashboards/refs/heads/main/k8s-infra-metrics/kubernetes-node-metrics-detailed.json" + "https://raw.githubusercontent.com/SigNoz/dashboards/refs/heads/main/k8s-infra-metrics/kubernetes-cluster-metrics.json" + ) + + echo "Waiting for SigNoz frontend to be available..." + bash ./packages/grid/scripts/wait_for.sh service signoz-frontend \ + --namespace platform --context {{ kube_context }} &> /dev/null + + echo "Setting up SigNoz account..." + curl -s --retry 5 --retry-all-errors -X POST \ + -H "Content-Type: application/json" \ + --data "{\"email\":\"$USERNAME\",\"name\":\"admin\",\"orgName\":\"openmined\",\"password\":\"$PASSWORD\"}" \ + "$SIGNOZ_URL/api/v1/register" + + echo "Adding some dashboards..." + AUTH_TOKEN=$(curl -s -X POST \ + -H "Content-Type: application/json" \ + -d "{\"email\":\"$USERNAME\",\"password\":\"$PASSWORD\"}" \ + "$SIGNOZ_URL/api/v1/login" | jq -r .accessJwt) + + if [ -z "$AUTH_TOKEN" ] || [ "$AUTH_TOKEN" = "null" ]; then + echo "Could not set up dashboards. But you can do it manually from the dashboard." + exit 0 + fi + + for URL in "${DASHBOARDS[@]}"; do + curl -s -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $AUTH_TOKEN" \ + -d "$(curl -s --retry 3 --retry-all-errors "$URL")" \ + "$SIGNOZ_URL/api/v1/dashboards" &> /dev/null + done + + printf "\nSignoz is ready and running on %s\n" "$SIGNOZ_URL" + printf "Email: \033[1;36m%s\033[0m\n" "$USERNAME" + printf "Password: \033[1;36m%s\033[0m\n" "$PASSWORD" + +# --------------------------------------------------------------------------------------------------------------------- + +# List all clusters +[group('cluster')] +list-clusters: + k3d cluster list + +# Stop all clusters +[group('cluster')] +delete-clusters: + k3d cluster delete --all + +[group('cluster')] +[private] +create-cluster cluster_name port *args='': start-registry && (apply-coredns "k3d-" + cluster_name) (apply-collector "k3d-" + cluster_name) + k3d cluster create {{ cluster_name }} \ + --port {{ port }}:80@loadbalancer \ + --k3s-arg "--disable=metrics-server@server:*" \ + --registry-use {{ registry_url }} {{ args }} + +[group('cluster')] +[private] +delete-cluster *args='': + #!/bin/bash + set -euo pipefail + + # remove the k3d- prefix + ARGS=$(echo "{{ args }}" | sed -e 's/k3d-//g') + k3d cluster delete $ARGS + +[group('cluster')] +[private] +apply-coredns kube_context: + @echo "Applying custom CoreDNS config" + + kubectl apply -f ./scripts/k8s-coredns-custom.yml --context {{ kube_context }} + kubectl delete pod -n kube-system -l k8s-app=kube-dns --context {{ kube_context }} + +# --------------------------------------------------------------------------------------------------------------------- + +[group('devspace')] +[private] +deploy-devspace kube_context namespace *args='': + #!/bin/bash + set -euo pipefail + + cd packages/grid + + PROFILE="{{ _g_profiles }}" + PROFILE=$(echo "$PROFILE" | sed -E 's/^,*|,*$//g') + if [ -n "$PROFILE" ]; then + PROFILE="-p $PROFILE" + fi + + echo "Deploying to kube context {{ kube_context }}" + + devspace deploy -b \ + --no-warn \ + --kube-context {{ kube_context }} \ + --namespace {{ namespace }} \ + $PROFILE \ + {{ args }} \ + --var CONTAINER_REGISTRY={{ registry_url }} + +[group('devspace')] +[private] +purge-devspace kube_context namespace: + #!/bin/bash + set -euo pipefail + + cd packages/grid + devspace purge --force-purge --kube-context {{ kube_context }} --no-warn --namespace {{ namespace }} + sleep 3 + +# --------------------------------------------------------------------------------------------------------------------- + +[group('cloud')] +[private] +check-platform: + #!/bin/bash + set -euo pipefail + + OSTYPE=$(uname -sm) + MSG="==================================================================================================\n\ + Deploying dev->cloud k8s (x64 nodes) requires images to be built with --platform=linux/amd64\n\ + On Apple Silicon, cross-platform image is unstable on different providers\n\n\ + Current status:\n\ + ✅ | Docker Desktop | 4.34.0+ | *Enable* containerd and *uncheck* 'Use Rosetta for x86_64/amd64...'\n\ + ❌ | OrbStack | 1.7.2 | Rosetta: gets stuck & qemu: errors with 'illegal instruction'\n\ + ❌ | Lima VM/Colima | 0.23.2 | Rosetta: gets stuck & qemu: errors with 'illegal instruction'\n\ + ==================================================================================================" + + if [[ "$OSTYPE" == "Darwin arm64" ]]; then + echo -e $MSG + fi + +[group('cloud')] +[private] +deploy-cloud kube_context registry_url namespace profile: check-platform + #!/bin/bash + + CONTEXT_NAME=$(kubectl config get-contexts -o=name | grep "{{ kube_context }}") + + if [ -z "$CONTEXT_NAME" ]; then + echo "Context not found: {{ kube_context }}. Authorized with cloud providers to get relevant K8s cluster contexts" + exit 1 + fi + + set -euo pipefail + + # cloud deployments always have tracing false + platform=amd64 + just tracing=false registry_url={{ registry_url }} \ + deploy-devspace $CONTEXT_NAME {{ namespace }} "-p {{ profile }} --var PLATFORM=amd64" + +[group('cloud')] +[private] +purge-cloud kube_context namespace: + #!/bin/bash + + CONTEXT_NAME=$(kubectl config get-contexts -o=name | grep "{{ kube_context }}") + + if [ -z "$CONTEXT_NAME" ]; then + echo "Context not found: {{ kube_context }}. Authorized with cloud providers to get relevant K8s cluster contexts" + exit 1 + fi + + set -euo pipefail + + just purge-devspace $CONTEXT_NAME {{ namespace }} + kubectl delete ns {{ namespace }} --force --grace-period=0 --context $CONTEXT_NAME + +# --------------------------------------------------------------------------------------------------------------------- + +# Auth all components required for deploying Syft to Google Cloud +[group('cloud-gcp')] +auth-gcloud: + #!/bin/bash + set -euo pipefail + + # login to gcloud + ACCOUNT=$(gcloud config get-value account) + if [ -z "$ACCOUNT" ]; then + gcloud auth login + fi + + echo "Logged in as \"$(gcloud config get-value account)\"" + + # install gke-gcloud-auth-plugin + gke_installed=$(gcloud components list --only-local-state --filter gke-gcloud-auth-plugin --format=list 2>/dev/null) + if [ -z "$gke_installed" ]; then + gcloud components install gke-gcloud-auth-plugin + echo "Installed gke-gcloud-auth-plugin" + fi + +# Deploy local code as datasite-high to Google Kubernetes Engine +[group('cloud-gcp')] +deploy-gcp-high gcp_cluster gcp_registry_url namespace="syft": (deploy-cloud gcp_cluster gcp_registry_url namespace "gcp") + +# Deploy local code as datasite-high to Google Kubernetes Engine +[group('cloud-gcp')] +deploy-gcp-low gcp_cluster gcp_registry_url namespace="syft": (deploy-cloud gcp_cluster gcp_registry_url namespace "gcp-low") + +# Purge deployment from a cluster +[group('cloud-gcp')] +purge-gcp gcp_cluster namespace="syft": (purge-cloud gcp_cluster namespace) + +# --------------------------------------------------------------------------------------------------------------------- + +[group('cloud-az')] +auth-az tenant="creditsopenmined.onmicrosoft.com": + #!/bin/bash + + # login to azure + ACCOUNT=$(az account show --query user.name) + if [ -z "$ACCOUNT" ]; then + az login --tenant {{ tenant }} + fi + + echo "Logged in as $(az account show --query user.name)" + +# Deploy local code as datasite-high to Azure Kubernetes Service +[group('cloud-az')] +deploy-az-high aks_cluster az_registry namespace="syft": (deploy-cloud aks_cluster az_registry namespace "azure") + +# --------------------------------------------------------------------------------------------------------------------- + +# Reset Syft state in a cluster +[group('utils')] +reset-syft kube_context namespace: + scripts/reset_k8s.sh --context {{ kube_context }} --namespace {{ namespace }} + +# Delete all local clusters and registry +[group('utils')] +delete-all: delete-clusters delete-registry + +# Prune local docker cache. Run atleast once a month. +[group('utils')] +prune-docker: + -docker container prune -f + -docker volume prune -af + -docker image prune -af + -docker system prune -af --volumes + +# Delete all resources in a namespace +[group('utils')] +yank-ns kube_context namespace: + # delete pods 𝙛 𝙖 𝙨 𝙩 + -kubectl delete statefulsets --all --context {{ kube_context }} --namespace {{ namespace }} --now + -kubectl delete deployments --all --context {{ kube_context }} --namespace {{ namespace }} --now + -kubectl delete pods --all --namespace {{ namespace }} --grace-period=0 --force + + # delete resources 𝙛 𝙖 𝙨 𝙩 + -kubectl delete configmap --all --context {{ kube_context }} --namespace {{ namespace }} --now + -kubectl delete secrets --all --context {{ kube_context }} --namespace {{ namespace }} --now + -kubectl delete ingress --all --context {{ kube_context }} --namespace {{ namespace }} --now + + # delete namespace NOT 𝙛 𝙖 𝙨 𝙩 :( + -kubectl delete ns {{ namespace }} --context {{ kube_context }} --grace-period=0 --force --timeout=5s + + # Too slow... yanking it + -kubectl get ns {{ namespace }} --context {{ kube_context }} -o json | jq '.spec.finalizers = []' | \ + kubectl replace --context {{ kube_context }} --raw /api/v1/namespaces/{{ namespace }}/finalize -f - + + @echo "Done" + +# Wait for all pods to be ready in a namespace +[group('utils')] +@wait-pods kube_context namespace: + echo "Waiting for all pods to be ready in cluster={{ kube_context }} namespace={{ namespace }}" + # Wait for at least one pod to appear (timeout after 5 minutes) + timeout 300 bash -c 'until kubectl get pods --context {{ kube_context }} --namespace {{ namespace }} 2>/dev/null | grep -q ""; do sleep 5; done' + + kubectl wait --for=condition=ready pod --all --timeout=300s --context {{ kube_context }} --namespace {{ namespace }} + + # if the above doesn't wait as we expect the drop the above and use the below + # @bash packages/grid/scripts/wait_for.sh service proxy --context {{ kube_context }} --namespace {{ namespace }} + # @bash packages/grid/scripts/wait_for.sh service frontend --context {{ kube_context }} --namespace {{ namespace }} + # @bash packages/grid/scripts/wait_for.sh service postgres --context {{ kube_context }} --namespace {{ namespace }} + # @bash packages/grid/scripts/wait_for.sh service seaweedfs --context {{ kube_context }} --namespace {{ namespace }} + # @bash packages/grid/scripts/wait_for.sh service backend --context {{ kube_context }} --namespace {{ namespace }} + echo "All pods are ready" diff --git a/notebooks/.gitignore b/notebooks/.gitignore new file mode 100644 index 00000000000..c19599ba18f --- /dev/null +++ b/notebooks/.gitignore @@ -0,0 +1,2 @@ +**/secrets.json +**/settings.yaml \ No newline at end of file diff --git a/notebooks/Testing/Veilid/Alice-Python-Server.ipynb b/notebooks/Testing/Veilid/Alice-Python-Server.ipynb deleted file mode 100644 index 3e1b7065c2c..00000000000 --- a/notebooks/Testing/Veilid/Alice-Python-Server.ipynb +++ /dev/null @@ -1,281 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "c74990eb-d769-4117-8c88-e9210136606e", - "metadata": {}, - "source": [ - "## Alice Python Server" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20df98d8-de6c-496c-b30e-6421ac99401c", - "metadata": {}, - "outputs": [], - "source": [ - "# third party\n", - "import requests" - ] - }, - { - "cell_type": "markdown", - "id": "54885cd0-f803-4911-8423-e595dc4cd7c3", - "metadata": {}, - "source": [ - "### 1. Create DHT Key and Private Route" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41d82ff3-ceda-4569-8178-8758ef635cb0", - "metadata": {}, - "outputs": [], - "source": [ - "host = \"localhost\"\n", - "port = 4000" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d9f3cca-66a7-4e6c-a332-b38a8f5c02db", - "metadata": {}, - "outputs": [], - "source": [ - "res = requests.post(f\"http://{host}:{port}/generate_vld_key\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "81c6aa9d-26b4-4672-a059-643edfeeed95", - "metadata": {}, - "outputs": [], - "source": [ - "res.content" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a9487e3-f5c8-468e-acd0-261e21bc3e14", - "metadata": {}, - "outputs": [], - "source": [ - "res = requests.get(f\"http://{host}:{port}/retrieve_vld_key\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5b87e9e6-244f-47f7-a31a-fa7cbce65b88", - "metadata": {}, - "outputs": [], - "source": [ - "self_vld_key = res.json()[\"message\"]\n", - "print(\"=\" * 30)\n", - "print(self_vld_key)\n", - "print(\"=\" * 30)" - ] - }, - { - "cell_type": "markdown", - "id": "a8c70d99-6814-453d-80bf-d141c40ba24e", - "metadata": {}, - "source": [ - "### Send AppMessage using VLD Key to Self" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7495805-817d-44d9-ad62-32407b42316c", - "metadata": {}, - "outputs": [], - "source": [ - "# Cannot send messages to self, due to local routing feature not\n", - "# available in direct routing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aca01ec6-1bbe-44b5-ad4a-053ba1edcfe6", - "metadata": {}, - "outputs": [], - "source": [ - "# json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to me again\"}\n", - "# app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff09ab92-3423-483a-abf3-51e8c2448cf9", - "metadata": {}, - "outputs": [], - "source": [ - "# app_message.content" - ] - }, - { - "cell_type": "markdown", - "id": "4d0d9e39-bf05-4ef3-b00a-2bb605f041ee", - "metadata": {}, - "source": [ - "### Send AppCall using VLD Key to Self" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b8bc9f54-b2f0-4f88-8897-f640866ba2ed", - "metadata": {}, - "outputs": [], - "source": [ - "# json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to app call\"}\n", - "# app_call = requests.post(f\"http://{host}:{port}/app_call\", json=json_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2c1c4148-461a-459e-846a-fad332a7ce3a", - "metadata": {}, - "outputs": [], - "source": [ - "# app_call.json()" - ] - }, - { - "cell_type": "markdown", - "id": "ddba6e22-96ee-46d7-8251-fcaa4140253b", - "metadata": {}, - "source": [ - "### Ping Peer " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3de4b843-f3a2-4d96-bd48-121ae2b6f197", - "metadata": {}, - "outputs": [], - "source": [ - "peer_vld_key = str(input(\"Enter Peer VLD Key\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "575c3441-cd11-4a42-ab4e-0bde3e5d5c72", - "metadata": {}, - "outputs": [], - "source": [ - "peer_vld_key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64d0b338-a439-4982-b739-24c056833be1", - "metadata": {}, - "outputs": [], - "source": [ - "res = requests.post(f\"http://{host}:{port}/ping/{peer_vld_key}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ce13553-dae5-442e-bd56-2dddb526c0f2", - "metadata": {}, - "outputs": [], - "source": [ - "res.json()" - ] - }, - { - "cell_type": "markdown", - "id": "fd824cca-2a7f-4ea9-9e67-1c06d1f8bec2", - "metadata": {}, - "source": [ - "### Send AppMessage using VLD Key to Peer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2e2c1341-d840-4429-b3e5-093d8e90365e", - "metadata": {}, - "outputs": [], - "source": [ - "json_data = {\"vld_key\": peer_vld_key, \"message\": \"How are you doing , Bob\"}\n", - "app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" - ] - }, - { - "cell_type": "markdown", - "id": "153377f6-698e-4013-9be3-0833b71ee0c4", - "metadata": {}, - "source": [ - "### Send Proxy Message " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "271d7316-eaab-438c-9192-55a4e44b9dea", - "metadata": {}, - "outputs": [], - "source": [ - "res = requests.get(\n", - " f\"http://{host}:{port}/proxy\",\n", - " json={\"url\": \"https://www.google.com\", \"method\": \"GET\", \"vld_key\": self_vld_key},\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77e1ad1d-379a-4899-8805-c703ad437c0d", - "metadata": {}, - "outputs": [], - "source": [ - "res.content" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73c1f0b0-d240-4964-a88b-365ea89b1bdd", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.8" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/Alice-Veilid-Peer.ipynb b/notebooks/Testing/Veilid/Alice-Veilid-Peer.ipynb deleted file mode 100644 index 0541d90a921..00000000000 --- a/notebooks/Testing/Veilid/Alice-Veilid-Peer.ipynb +++ /dev/null @@ -1,302 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "3ab7de7e-d23e-4cfc-895d-0bd02d9bc17f", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "\n", - "# stdlib\n", - "import asyncio\n", - "\n", - "# third party\n", - "import veilid\n", - "from veilid import KeyPair" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "461ef888-76c1-4256-ad69-cbf405d830e0", - "metadata": {}, - "outputs": [], - "source": [ - "app_message_queue: asyncio.Queue = asyncio.Queue()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ad857491-4cad-4a21-bf10-8035f05ef52a", - "metadata": {}, - "outputs": [], - "source": [ - "host = \"localhost\"\n", - "port = 5959" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d5984fd-30eb-4c9f-8bb7-e0a827b6de2c", - "metadata": {}, - "outputs": [], - "source": [ - "async def noop_callback(update: veilid.VeilidUpdate):\n", - " if update.kind == veilid.VeilidUpdateKind.APP_MESSAGE:\n", - " print(\"Received App Message\")\n", - " await app_message_queue.put(update)\n", - "\n", - "\n", - "async def connect(host: str, port: int):\n", - " conn = await veilid.json_api_connect(host, port, noop_callback)\n", - " return conn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1296f362-60ff-471a-807b-6d96dbf36403", - "metadata": {}, - "outputs": [], - "source": [ - "conn = await connect(host, port)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "418b05e9-eaac-4bb5-b61a-e300221b10ba", - "metadata": {}, - "outputs": [], - "source": [ - "# route_id, blob = await conn.new_private_route()\n", - "# Stable and reliable route\n", - "# Creating a new one\n", - "route_id, blob = await conn.new_custom_private_route(\n", - " [veilid.CryptoKind.CRYPTO_KIND_VLD0],\n", - " veilid.Stability.RELIABLE,\n", - " veilid.Sequencing.ENSURE_ORDERED,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dade6f0b-27f7-4317-84e0-83f5db2f93ea", - "metadata": {}, - "outputs": [], - "source": [ - "route_id, blob" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "de7eb3e2", - "metadata": {}, - "outputs": [], - "source": [ - "# Creating a new routing context\n", - "# Since it is safe by default , we could remove default safety\n", - "router = await (await conn.new_routing_context()).with_default_safety()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f76a7403", - "metadata": {}, - "outputs": [], - "source": [ - "router" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ad3b3dbc", - "metadata": {}, - "outputs": [], - "source": [ - "alice_record = await router.create_dht_record(veilid.DHTSchema.dflt(1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3922a41b", - "metadata": {}, - "outputs": [], - "source": [ - "# Creation of a Record in DHT DFLT schema , creates a new public and private key pair for the owner\n", - "# that is different from the NodeID public key\n", - "alice_private_key = alice_record.owner_secret\n", - "alice_public_key = alice_record.owner" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77dd5ed2", - "metadata": {}, - "outputs": [], - "source": [ - "alice_private_key, alice_public_key, alice_record.key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fb3c60bb", - "metadata": {}, - "outputs": [], - "source": [ - "# Close the record\n", - "await router.close_dht_record(alice_record.key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73249f4a", - "metadata": {}, - "outputs": [], - "source": [ - "key_pair = KeyPair.from_parts(key=alice_public_key, secret=alice_private_key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b63cb28c", - "metadata": {}, - "outputs": [], - "source": [ - "key_pair" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3afe0d6a", - "metadata": {}, - "outputs": [], - "source": [ - "record_open = await router.open_dht_record(alice_record.key, key_pair)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b12b2281", - "metadata": {}, - "outputs": [], - "source": [ - "record_open.key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11d2d05d", - "metadata": {}, - "outputs": [], - "source": [ - "await router.set_dht_value(record_open.key, 0, blob)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "833bf7e0-c3f9-4280-aa8d-5b1302b00f0f", - "metadata": {}, - "outputs": [], - "source": [ - "record_open.key[5::]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "df056893-c98d-45b4-9eff-c3e6301ce7e4", - "metadata": {}, - "outputs": [], - "source": [ - "self_prr = await conn.import_remote_private_route(blob)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75b7efff-3b20-4446-ad8d-94ea0f5f4f26", - "metadata": {}, - "outputs": [], - "source": [ - "message_send = await router.app_message(self_prr, b\"Hello to me\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b6764bf2-30c7-4baa-81e8-c097be06dbea", - "metadata": {}, - "outputs": [], - "source": [ - "value = await app_message_queue.get()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2d52874c-ad2e-46ef-a9c9-7447661bc7fa", - "metadata": {}, - "outputs": [], - "source": [ - "assert value.kind == veilid.VeilidUpdateKind.APP_MESSAGE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b690487b-20fb-4b36-9076-33915a184354", - "metadata": {}, - "outputs": [], - "source": [ - "value.detail.message" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9e0453a0-4c6d-4d83-aa01-232cab545653", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/Bob-Python-Server.ipynb b/notebooks/Testing/Veilid/Bob-Python-Server.ipynb deleted file mode 100644 index 35deb460032..00000000000 --- a/notebooks/Testing/Veilid/Bob-Python-Server.ipynb +++ /dev/null @@ -1,202 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "a003292f-d8f6-4888-b47d-9e0e9b1309ec", - "metadata": {}, - "source": [ - "## Bob Python Server" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "338b22f9-938e-4628-9636-14c192e42e49", - "metadata": {}, - "outputs": [], - "source": [ - "# third party\n", - "import requests" - ] - }, - { - "cell_type": "markdown", - "id": "f1279a42-f391-4ec8-b711-e9a05d601ce2", - "metadata": {}, - "source": [ - "### 1. Create DHT Key and Private Route" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "755d48fe-9471-4474-b47f-d344d31604aa", - "metadata": {}, - "outputs": [], - "source": [ - "host = \"localhost\"\n", - "port = 4001" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f14915f1-2535-424b-bdd9-23efab16bb43", - "metadata": {}, - "outputs": [], - "source": [ - "res = requests.post(f\"http://{host}:{port}/generate_vld_key\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29aa597d-660e-4524-82ac-62c119e10fdf", - "metadata": {}, - "outputs": [], - "source": [ - "res.content" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "632ccceb-f742-4c8a-b00f-c55e6333fdc1", - "metadata": {}, - "outputs": [], - "source": [ - "res = requests.get(f\"http://{host}:{port}/retrieve_vld_key\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7b8581a-a73d-4d15-97ec-2869aff00e90", - "metadata": {}, - "outputs": [], - "source": [ - "self_vld_key = res.json()[\"message\"]\n", - "print(\"=\" * 30)\n", - "print(self_vld_key)\n", - "print(\"=\" * 30)" - ] - }, - { - "cell_type": "markdown", - "id": "616f208c-fead-40cc-9391-416b59d7dc15", - "metadata": {}, - "source": [ - "### Send AppMessage using DHT Key to Self" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e810776-491d-4170-a9c5-bf7eaf2995bd", - "metadata": {}, - "outputs": [], - "source": [ - "# Cannot send messages to self, due to local routing feature not\n", - "# available in direct routing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "538913ae-29be-41a5-9608-4c694ccb392b", - "metadata": {}, - "outputs": [], - "source": [ - "# json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to me\"}\n", - "# app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" - ] - }, - { - "cell_type": "markdown", - "id": "3ed2c114-eab7-4be7-bd89-d5ec3a7ec4c2", - "metadata": {}, - "source": [ - "### Send AppCall using DHT Key to Self" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "db49c78d-9767-4358-aa00-e740ce04e000", - "metadata": {}, - "outputs": [], - "source": [ - "# json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to app call\"}\n", - "# app_call = requests.post(f\"http://{host}:{port}/app_call\", json=json_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9bc0a69e-7cff-42fc-8859-e5de6edacdeb", - "metadata": {}, - "outputs": [], - "source": [ - "# app_call.json()" - ] - }, - { - "cell_type": "markdown", - "id": "73eee970-bb61-4014-9380-1944587b929a", - "metadata": {}, - "source": [ - "### Send AppMessage using DHT Key to Peer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9e5671f6-1ffd-410c-b72a-6fb39f68fe93", - "metadata": {}, - "outputs": [], - "source": [ - "peer_vld_key = input(\"Enter Peer VLD Key\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8a753450-19e3-4603-ae93-a48bfbc7f829", - "metadata": {}, - "outputs": [], - "source": [ - "json_data = {\"vld_key\": peer_vld_key, \"message\": \"Hello Alice\"}\n", - "app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0cf79332-1a88-4d02-87b7-53c19d4fd1ad", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/Bob-Veilid-Peer.ipynb b/notebooks/Testing/Veilid/Bob-Veilid-Peer.ipynb deleted file mode 100644 index 4c974d0bea4..00000000000 --- a/notebooks/Testing/Veilid/Bob-Veilid-Peer.ipynb +++ /dev/null @@ -1,211 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "3ab7de7e-d23e-4cfc-895d-0bd02d9bc17f", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "\n", - "# third party\n", - "from utils import get_typed_key\n", - "import veilid" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ad857491-4cad-4a21-bf10-8035f05ef52a", - "metadata": {}, - "outputs": [], - "source": [ - "host = \"localhost\"\n", - "port = 5960" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d5984fd-30eb-4c9f-8bb7-e0a827b6de2c", - "metadata": {}, - "outputs": [], - "source": [ - "async def noop_callback(*args, **kwargs):\n", - " return\n", - "\n", - "\n", - "async def connect(host: str, port: int):\n", - " conn = await veilid.json_api_connect(host, port, noop_callback)\n", - " return conn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1296f362-60ff-471a-807b-6d96dbf36403", - "metadata": {}, - "outputs": [], - "source": [ - "conn = await connect(host, port)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "de7eb3e2", - "metadata": {}, - "outputs": [], - "source": [ - "# Creating a new routing context\n", - "router = await (await conn.new_routing_context()).with_default_safety()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cbb26e88-6935-471a-a248-a46fdcba8e18", - "metadata": {}, - "outputs": [], - "source": [ - "conn.new_routing_context?" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f76a7403", - "metadata": {}, - "outputs": [], - "source": [ - "router" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "40370b01", - "metadata": {}, - "outputs": [], - "source": [ - "# Get this DHT Key from the Previous Notebook\n", - "# paste only the string party without VLD0: prefix\n", - "alice_dht_key_str = input(\"Enter Alice's DHT Key: \")\n", - "dht_key = get_typed_key(alice_dht_key_str)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ad3b3dbc", - "metadata": {}, - "outputs": [], - "source": [ - "alice_record = await router.open_dht_record(key=dht_key, writer=None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12fdf804", - "metadata": {}, - "outputs": [], - "source": [ - "alice_record_value = await router.get_dht_value(\n", - " key=dht_key, subkey=0, force_refresh=True\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4e44fc7", - "metadata": {}, - "outputs": [], - "source": [ - "alice_record_value.data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c4db5bf7", - "metadata": {}, - "outputs": [], - "source": [ - "# Import the private route sent by Alice:\n", - "prr_alice = await conn.import_remote_private_route(alice_record_value.data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e249e970-30f1-4121-98e0-e1df0db37e4b", - "metadata": {}, - "outputs": [], - "source": [ - "prr_alice" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6a268e77-84d0-43ad-a9b9-21582992ea64", - "metadata": {}, - "outputs": [], - "source": [ - "message = b\"Hello Alice\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e76b3680-7e84-4e7f-8c8e-450a6de6786d", - "metadata": {}, - "outputs": [], - "source": [ - "message_send = await router.app_message(prr_alice, message)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62bd12ec-1aef-4459-90a9-a4402a8b3a68", - "metadata": {}, - "outputs": [], - "source": [ - "message_send" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13b87c97-0c10-4112-82c7-6b283a31cc28", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/Large-Message-Testing.ipynb b/notebooks/Testing/Veilid/Large-Message-Testing.ipynb deleted file mode 100644 index 46d1980a5c4..00000000000 --- a/notebooks/Testing/Veilid/Large-Message-Testing.ipynb +++ /dev/null @@ -1,397 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instructions\n", - "\n", - "1. Follow these instructions from `packages/grid/veilid/development.md` to build veilid docker containers:\n", - " ```bash\n", - " cd packages/grid/veilid && docker build -f veilid.dockerfile -t veilid:0.1 .\n", - " ```\n", - "2. From within the `packages/grid/veilid` directory run the receiver docker container on port 4000:\n", - " ```bash\n", - " docker run -it -e DEV_MODE=True -p 4000:4000 -v $(pwd)/server:/app/server veilid:0.1\n", - " ```\n", - "3. On a separate terminal tab/window, cd into `packages/grid/veilid` directory again and run the sender docker container on port 4001:\n", - " ```bash\n", - " docker run -it -e DEV_MODE=True -p 4001:4000 -v $(pwd)/server:/app/server veilid:0.1\n", - " ```\n", - "4. Follow and run the below cells to test out sending large messages through Veilid. You may also use the **`Run All`** notebook function once the above two docker containers are up and running." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1. Set up imports" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import json\n", - "import logging\n", - "from pprint import pprint\n", - "import random\n", - "import time\n", - "\n", - "# third party\n", - "import requests\n", - "\n", - "logging.basicConfig(level=logging.INFO, format=\"%(message)s\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2. Set up receiver" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "RECEIVER_HOST = \"localhost\"\n", - "RECEIVER_PORT = 4000\n", - "RECEIVER_BASE_ADDRESS = f\"http://{RECEIVER_HOST}:{RECEIVER_PORT}\"\n", - "\n", - "requests.post(f\"{RECEIVER_BASE_ADDRESS}/generate_vld_key\")\n", - "res = requests.get(f\"{RECEIVER_BASE_ADDRESS}/retrieve_vld_key\")\n", - "receiver_vld_key = res.json()[\"message\"]\n", - "logging.info(f\"{'=' * 30}\\n{receiver_vld_key}\\n{'=' * 30}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3. Set up sender" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "SENDER_HOST = \"localhost\"\n", - "SENDER_PORT = 4001\n", - "SENDER_BASE_ADDRESS = f\"http://{SENDER_HOST}:{SENDER_PORT}\"\n", - "\n", - "requests.post(f\"{SENDER_BASE_ADDRESS}/generate_vld_key\")\n", - "res = requests.get(f\"{SENDER_BASE_ADDRESS}/retrieve_vld_key\")\n", - "sender_vld_key = res.json()[\"message\"]\n", - "logging.info(f\"{'=' * 30}\\n{sender_vld_key}\\n{'=' * 30}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4. Declare utility functions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def send_test_request(request_size_bytes, response_size_bytes):\n", - " \"\"\"\n", - " Send a test request of the specified size and receive a response of.\n", - "\n", - " Args:\n", - " request_size_bytes (int): Size of the request body in bytes.\n", - " response_size_bytes (int): Expected size of the response body in bytes.\n", - "\n", - " Returns:\n", - " tuple: A tuple containing the total transfer size, total time taken and success status.\n", - " \"\"\"\n", - " message = build_vld_message(request_size_bytes, response_size_bytes)\n", - " json_data = {\n", - " \"vld_key\": receiver_vld_key,\n", - " \"message\": message,\n", - " }\n", - "\n", - " logging.info(f\"Sending message of size {len(message) // 1024} KB...\")\n", - "\n", - " start = time.time()\n", - " app_call = requests.post(f\"{SENDER_BASE_ADDRESS}/app_call\", json=json_data)\n", - " end = time.time()\n", - "\n", - " response = app_call.content\n", - " response_len = len(response)\n", - " response = response.decode()\n", - " response_pretty = (\n", - " response if len(response) <= 100 else f\"{response[:50]}...{response[-50:]}\"\n", - " )\n", - "\n", - " total_xfer = request_size_bytes + response_size_bytes\n", - " total_time = round(end - start, 2)\n", - "\n", - " success = \"received_request_body_length\" in response\n", - " logging.info(f\"[{total_time}s] Response({response_len} B): {response_pretty}\")\n", - " return total_xfer, total_time, success\n", - "\n", - "\n", - "def build_vld_message(request_size_bytes, response_size_bytes):\n", - " \"\"\"\n", - " Build a message of length `request_size_bytes`. Padded with random characters.\n", - "\n", - " Args:\n", - " request_size_bytes (int): Size of the request body in bytes.\n", - " response_size_bytes (int): Expected size of the response body in bytes.\n", - "\n", - " Returns:\n", - " dict: The constructed request body.\n", - " \"\"\"\n", - " endpoint = f\"{RECEIVER_BASE_ADDRESS}/test_veilid_streamer\"\n", - " message = {\n", - " \"method\": \"POST\",\n", - " \"url\": endpoint,\n", - " \"json\": {\n", - " \"expected_response_length\": response_size_bytes,\n", - " \"random_padding\": \"\",\n", - " },\n", - " }\n", - " padding_length = request_size_bytes - len(json.dumps(message))\n", - " random_padding = generate_random_alphabets(padding_length)\n", - " message[\"json\"][\"random_padding\"] = random_padding\n", - " return json.dumps(message)\n", - "\n", - "\n", - "def generate_random_alphabets(length):\n", - " return \"\".join([random.choice(\"abcdefghijklmnopqrstuvwxyz\") for _ in range(length)])\n", - "\n", - "\n", - "def bytes_to_human_readable(size_in_bytes):\n", - " if size_in_bytes >= (2**20):\n", - " size_in_mb = size_in_bytes / (2**20)\n", - " return f\"{size_in_mb:.2f} MB\"\n", - " else:\n", - " size_in_kb = size_in_bytes / (2**10)\n", - " return f\"{size_in_kb:.2f} KB\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 5. Run manual tests" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "MIN_MESSAGE_SIZE = 1024\n", - "MAX_CHUNK_SIZE = 32744 # minus 24 bytes for single chunk header\n", - "\n", - "\n", - "def get_random_single_chunk_size():\n", - " return random.randint(MIN_MESSAGE_SIZE, MAX_CHUNK_SIZE)\n", - "\n", - "\n", - "def get_random_multi_chunk_size():\n", - " return random.randint(2 * MAX_CHUNK_SIZE, 3 * MAX_CHUNK_SIZE)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def test_for_single_chunk_request_and_single_chunk_response():\n", - " request_size = get_random_single_chunk_size()\n", - " response_size = get_random_single_chunk_size()\n", - " total_xfer, total_time, success = send_test_request(request_size, response_size)\n", - " result = \"Success\" if success else \"Failure\"\n", - " logging.info(\n", - " f\"[{request_size} B ⇅ {response_size} B] \"\n", - " f\"Transferred {bytes_to_human_readable(total_xfer)} \"\n", - " f\"in {total_time}s; \"\n", - " f\"Result: {result}\"\n", - " )\n", - "\n", - "\n", - "test_for_single_chunk_request_and_single_chunk_response()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def test_for_multi_chunk_request_and_single_chunk_response():\n", - " request_size = get_random_multi_chunk_size()\n", - " response_size = get_random_single_chunk_size()\n", - " total_xfer, total_time, success = send_test_request(request_size, response_size)\n", - " result = \"Success\" if success else \"Failure\"\n", - " logging.info(\n", - " f\"[{request_size} B ⇅ {response_size} B] \"\n", - " f\"Transferred {bytes_to_human_readable(total_xfer)} \"\n", - " f\"in {total_time}s; \"\n", - " f\"Result: {result}\"\n", - " )\n", - "\n", - "\n", - "test_for_multi_chunk_request_and_single_chunk_response()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def test_for_single_chunk_request_and_multi_chunk_response():\n", - " request_size = get_random_single_chunk_size()\n", - " response_size = get_random_multi_chunk_size()\n", - " total_xfer, total_time, success = send_test_request(request_size, response_size)\n", - " result = \"Success\" if success else \"Failure\"\n", - " logging.info(\n", - " f\"[{request_size} B ⇅ {response_size} B] \"\n", - " f\"Transferred {bytes_to_human_readable(total_xfer)} \"\n", - " f\"in {total_time}s; \"\n", - " f\"Result: {result}\"\n", - " )\n", - "\n", - "\n", - "test_for_single_chunk_request_and_multi_chunk_response()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def test_for_multi_chunk_request_and_multi_chunk_response():\n", - " request_size = get_random_multi_chunk_size()\n", - " response_size = get_random_multi_chunk_size()\n", - " total_xfer, total_time, success = send_test_request(request_size, response_size)\n", - " result = \"Success\" if success else \"Failure\"\n", - " logging.info(\n", - " f\"[{request_size} B ⇅ {response_size} B] \"\n", - " f\"Transferred {bytes_to_human_readable(total_xfer)} \"\n", - " f\"in {total_time}s; \"\n", - " f\"Result: {result}\"\n", - " )\n", - "\n", - "\n", - "test_for_multi_chunk_request_and_multi_chunk_response()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 6. Run benchmarks on requests-responses of sizes from 1 KB to 512 MB" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "benchmarks = {}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Baseline tests (Tests with single chunk messages i.e. 1 KB to 32 KB)\n", - "for powers_of_two in range(0, 6): # Test from 1 KB to 32 KB\n", - " message_size = 2**powers_of_two * 1024\n", - " total_xfer, total_time, success = send_test_request(message_size, message_size)\n", - " if success:\n", - " benchmarks[bytes_to_human_readable(total_xfer)] = total_time\n", - "pprint(benchmarks, sort_dicts=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Tests with smaller messages\n", - "for powers_of_two in range(6, 13): # Test from 64 KB to 4 MB\n", - " message_size = 2**powers_of_two * 1024\n", - " total_xfer, total_time, success = send_test_request(message_size, message_size)\n", - " if success:\n", - " benchmarks[bytes_to_human_readable(total_xfer)] = total_time\n", - "pprint(benchmarks, sort_dicts=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Tests with larger messages\n", - "for powers_of_two in range(13, 16): # Test from 8 MB to 32 MB\n", - " message_size = 2**powers_of_two * 1024\n", - " total_xfer, total_time, success = send_test_request(message_size, message_size)\n", - " if success:\n", - " benchmarks[bytes_to_human_readable(total_xfer)] = total_time\n", - "pprint(benchmarks, sort_dicts=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Tests with super large messages\n", - "for powers_of_two in range(16, 19): # Test from 64 MB to 256 MB\n", - " message_size = 2**powers_of_two * 1024\n", - " total_xfer, total_time, success = send_test_request(message_size, message_size)\n", - " if success:\n", - " benchmarks[bytes_to_human_readable(total_xfer)] = total_time\n", - "pprint(benchmarks, sort_dicts=False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "PySyft", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/Testing/Veilid/Veilid Route-Connection-Testing.ipynb b/notebooks/Testing/Veilid/Veilid Route-Connection-Testing.ipynb deleted file mode 100644 index bd2ea78c1c6..00000000000 --- a/notebooks/Testing/Veilid/Veilid Route-Connection-Testing.ipynb +++ /dev/null @@ -1,116 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "a8d2d5a4-5512-4a24-aafd-7133d64c22fc", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5a5a1b05-336d-4523-ae85-4022783acf85", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "from syft.client.client import VeilidConnection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "963f96e5-8d62-44b2-a975-faa23624bbd4", - "metadata": {}, - "outputs": [], - "source": [ - "veilid_conn = VeilidConnection(dht_key=\"test\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f2d6083b-527f-46be-a582-15f4404950b5", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "from syft.service.network.routes import connection_to_route\n", - "from syft.service.network.routes import route_to_connection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9e8e508f-c527-43f4-98d1-7e7c6ef0dfb3", - "metadata": {}, - "outputs": [], - "source": [ - "veilid_route = connection_to_route(veilid_conn)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7aba2e02-46c7-46a2-ab11-9253e05fd2fe", - "metadata": {}, - "outputs": [], - "source": [ - "veilid_route.dht_key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d50eec2-a7ed-49f6-b90c-082cd8c40e0a", - "metadata": {}, - "outputs": [], - "source": [ - "re_veilid_conn = route_to_connection(veilid_route)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ea5d2d73-1cbc-496a-a6b6-4136e9423394", - "metadata": {}, - "outputs": [], - "source": [ - "re_veilid_conn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a535caf0-d1e6-40b9-842b-066ce2b6b897", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/Veilid-Connection-Test.ipynb b/notebooks/Testing/Veilid/Veilid-Connection-Test.ipynb deleted file mode 100644 index c38143c7c35..00000000000 --- a/notebooks/Testing/Veilid/Veilid-Connection-Test.ipynb +++ /dev/null @@ -1,554 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "df3d4dbb-e179-4995-9507-1f82cb417fc5", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "from syft.client.client import connect" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cc7f02fb-b4f8-4615-a39f-dca2752b58b2", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=8080)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4d9ce704-36e6-455b-a633-fe943848420c", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.api.services.veilid.generate_dht_key()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ef053ef6-e31a-4634-8d5e-2e8ff2e002de", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.api.services.veilid.retrieve_dht_key()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "670f2e09-3409-4545-be3a-17e1b2a97cd2", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client = sy.login_as_guest(\n", - " dht_key=\"VLD0:OBeFkuuQz6LIofeIIzC5Y-zwR96NoKqbojqGCcNKu8c\",\n", - " vld_forward_proxy=\"http://localhost:4000\",\n", - " vld_reverse_proxy=\"http://proxy\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "63a9a5f3-a004-4523-bf70-e3ebee06408e", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.api" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2195bbd-5ef1-4a53-8886-1b2ea6854bc3", - "metadata": {}, - "outputs": [], - "source": [ - "connect_client = connect(\n", - " dht_key=\"VLD0:OBeFkuuQz6LIofeIIzC5Y-zwR96NoKqbojqGCcNKu8c\",\n", - " vld_forward_proxy=\"http://localhost:4000\",\n", - " vld_reverse_proxy=\"http://proxy\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "71261091-1cfc-428f-9087-7f24395a2750", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client = sy.login(\n", - " dht_key=\"VLD0:OBeFkuuQz6LIofeIIzC5Y-zwR96NoKqbojqGCcNKu8c\",\n", - " vld_forward_proxy=\"http://localhost:4000\",\n", - " vld_reverse_proxy=\"http://proxy\",\n", - " email=\"info@openmined.org\",\n", - " password=\"changethis\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ec181b37-71cc-411b-8b6c-0f149e45c79c", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.api" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "293b55c9-9f9b-4702-b74f-6dfe9b5eee8d", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f469470-6280-466f-85e3-ed655484178e", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client = sy.login_as_guest(port=8080)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "141a0871-d322-4508-b0b1-68ad1654dcda", - "metadata": {}, - "outputs": [], - "source": [ - "res = sy.serialize(domain_client.api, to_bytes=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61192beb-a4f7-495f-adf5-f2294ec5a199", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "af269af3-f55b-4f3d-8cc1-cbe8ee10d327", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "95facdab-92ab-42cf-b976-a9b646ae2901", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8de7d433-c26b-43e9-9a45-d960cfb18645", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7c7a97e3-9585-485f-ad41-2982bf935564", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eb073a52-1c7a-4c02-bce3-0782c6f89064", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "98f58488-e927-4e44-a885-04740f8c8b31", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5adb6185-9f49-444c-ae26-702e17bcfabf", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6ed88528-1e23-4585-89ca-0e3cfa098d37", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "985f6211-efa8-4850-b2fa-280b064032ff", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f3abeb1-228c-45ff-acc9-fbc2314c6e31", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83aee788-4a14-4e41-b924-53dcbebe8e14", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d2d9fa5-9098-4d79-a35e-2da46f615ef7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "id": "18613355-f3bd-45c3-8ac3-97165dd6e28d", - "metadata": {}, - "source": [ - "## Debugging" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f2d4a8ea-f9e5-4411-bf68-0d4ed25f3fa6", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77f7d4b4-7ea2-4a61-8a67-a2dacbfd054f", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9bf0aa58-b6a1-463a-8d14-76f74dcc6d7c", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "raw", - "id": "1142383d-82df-49f5-ad5f-ede5fde39b20", - "metadata": {}, - "source": [ - "import lzma" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c8026971-b496-4a24-b84f-b57d898f15d9", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import lzma" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "69542e59-2ba3-4721-8c39-192258180114", - "metadata": {}, - "outputs": [], - "source": [ - "len(res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61f3fb0e-50e1-4cca-94cf-490e5bde974b", - "metadata": {}, - "outputs": [], - "source": [ - "comp = lzma.compress(res)\n", - "print(len(comp))\n", - "decom = lzma.decompress(comp)\n", - "print(len(decom))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee53df6e-e979-4011-8fe7-24141f7df001", - "metadata": {}, - "outputs": [], - "source": [ - "# third party\n", - "from pympler import asizeof" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8db1d8a9-ee4b-4efa-a69b-1d735ceaf129", - "metadata": {}, - "outputs": [], - "source": [ - "asizeof.asizeof(domain_client.api)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f3bdfb82-687e-49a7-a268-2bb0e74364cc", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import sys\n", - "\n", - "# third party\n", - "from pympler import asizeof" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41944d4d-7613-461e-a6e7-905514bb08da", - "metadata": {}, - "outputs": [], - "source": [ - "for attr_name, attr_value in domain_client.api.__dict__.items():\n", - " if attr_name != \"refresh_api_callback\":\n", - " res = sy.serialize(attr_value, to_bytes=True)\n", - " immediate_size = sys.getsizeof(res)\n", - " total_size = asizeof.asizeof(res)\n", - " print(\n", - " f\"{attr_name}: immediate size = {immediate_size} bytes, total size = {total_size} bytes\"\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a1d4ad18-7fb0-4ec7-966d-cf86a6b280f1", - "metadata": {}, - "outputs": [], - "source": [ - "count = 0\n", - "for i in domain_client.api.lib_endpoints.values():\n", - " count += 1\n", - " print(count, \" \", i.module_path)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59ad85c9-6acb-4fbd-b9e7-25a0e34d8f6c", - "metadata": {}, - "outputs": [], - "source": [ - "len(res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8ee3d56b-298e-4706-9e93-055960f41654", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import zlib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4509185-ba56-42d4-aaf3-84341cdeaa52", - "metadata": {}, - "outputs": [], - "source": [ - "%%time\n", - "c = zlib.compress(res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b9b7539e-06ce-4a92-bf8e-6a65331f3ee1", - "metadata": {}, - "outputs": [], - "source": [ - "len(c)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1eb8fc1d-1d8a-4301-bd36-618393e6ff8a", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import lzma" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d8b9cabe-382d-4085-861d-ca55d99a938e", - "metadata": {}, - "outputs": [], - "source": [ - "%%time\n", - "lc = lzma.compress(res)\n", - "print(len(lc))\n", - "ld = lzma.decompress(lc)\n", - "print(len(ld))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a8462ce-6de8-472b-8685-72665f36f940", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import gzip" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f357c7d-059d-46b5-bf03-c8acb5a3e7df", - "metadata": {}, - "outputs": [], - "source": [ - "%%time\n", - "c2 = gzip.compress(res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9b4647a5-ec95-4f22-9ac2-104f30600cf5", - "metadata": {}, - "outputs": [], - "source": [ - "len(sy.serialize(domain_client.api.endpoints, to_bytes=True))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d7b89fe-b270-40c0-bc18-066f9be62569", - "metadata": {}, - "outputs": [], - "source": [ - "# res = veilid_conn.get_node_metadata(credentials=None)\n", - "res = b\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4f6a98cf-ad5b-4ad0-87c7-b8cdc7d0678d", - "metadata": {}, - "outputs": [], - "source": [ - "res" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ab82cd6-c080-46dd-b15d-da0c904e967e", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cba2d15b-826d-4f6b-82d1-bb70ba0e439d", - "metadata": {}, - "outputs": [], - "source": [ - "type(json.loads(res))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cbfda25f-5b2e-4c55-a906-1ca78497623f", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/Veilid-Gateway-Testing.ipynb b/notebooks/Testing/Veilid/Veilid-Gateway-Testing.ipynb deleted file mode 100644 index 0e3754724cd..00000000000 --- a/notebooks/Testing/Veilid/Veilid-Gateway-Testing.ipynb +++ /dev/null @@ -1,226 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "a300f01b-8357-43ca-9c64-c489839603e8", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9aeed160-94d3-49c1-98c5-7795c6df7280", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=9082)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7a79ee9-68bf-4a93-935e-32f42e332f97", - "metadata": {}, - "outputs": [], - "source": [ - "gateway_client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=9081)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2d66293-b573-4cdf-8721-9d91a620dd9d", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.api.services.veilid.generate_vld_key()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e3b10d70-1c30-42e2-98bd-86af6a228455", - "metadata": {}, - "outputs": [], - "source": [ - "gateway_client.api.services.veilid.generate_vld_key()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7500007e-e5f6-4c4a-bbc3-46f2357d2433", - "metadata": {}, - "outputs": [], - "source": [ - "domain_route = domain_client.api.services.veilid.get_veilid_route()\n", - "gateway_route = gateway_client.api.services.veilid.get_veilid_route()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "82bee827-ea59-4255-9c32-5b9e10e5676f", - "metadata": {}, - "outputs": [], - "source": [ - "gateway_route.vld_key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "960f6b4c-3073-45ec-93cf-54c384262d0b", - "metadata": {}, - "outputs": [], - "source": [ - "domain_route.vld_key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e3e916e7-2897-4d63-b8b8-a913a2baed8a", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.connect_to_gateway(gateway_client, protocol=\"veilid\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ee4b39c1-01d5-4cae-9115-a0d83667c31a", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.peers[0].node_routes[0].vld_key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6c56a7d4-88dc-43e0-b092-4c443734e3c3", - "metadata": {}, - "outputs": [], - "source": [ - "gateway_client.api.services.network.get_all_peers()[0].node_routes[0].vld_key" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8febe455-4b82-478f-85b5-d1e2e104fb1a", - "metadata": {}, - "outputs": [], - "source": [ - "gateway_client.peers" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6871cb-37bf-4570-94cd-b993906c11f8", - "metadata": {}, - "outputs": [], - "source": [ - "domain_peer = gateway_client.api.services.network.get_all_peers()[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "91c303f5-36af-4a65-a81a-7cb24f5c3494", - "metadata": {}, - "outputs": [], - "source": [ - "connection = gateway_client.connection.with_proxy(domain_peer.id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5b744210-dddb-4a20-a32e-146b0a92678c", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "from syft.node.credentials import SyftSigningKey" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "66b4f4c5-780d-4259-8360-2692ade1358f", - "metadata": {}, - "outputs": [], - "source": [ - "metadata = connection.get_node_metadata(credentials=SyftSigningKey.generate())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "508e9374-37ca-412b-af34-631994f80ff7", - "metadata": {}, - "outputs": [], - "source": [ - "proxy_client = gateway_client.domains[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2081964a-12da-428d-b543-7ba1a4c82600", - "metadata": {}, - "outputs": [], - "source": [ - "admin_client = proxy_client.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "68a6e4bb-d6f6-4173-a8bb-dc70ea52c0b5", - "metadata": {}, - "outputs": [], - "source": [ - "admin_client" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "760f17f0-b44c-4e71-ae93-ba9f4c291fd9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/Testing/Veilid/utils.py b/notebooks/Testing/Veilid/utils.py deleted file mode 100644 index 814ff62758a..00000000000 --- a/notebooks/Testing/Veilid/utils.py +++ /dev/null @@ -1,12 +0,0 @@ -# third party -import veilid - - -def get_typed_key(key: str) -> veilid.types.TypedKey: - return veilid.types.TypedKey.from_value( - kind=veilid.CryptoKind.CRYPTO_KIND_VLD0, value=key - ) - - -# state = await conn.get_state() -# state.config.config diff --git a/notebooks/api/0.8/00-load-data.ipynb b/notebooks/api/0.8/00-load-data.ipynb index 23faa4756a4..372746f368a 100644 --- a/notebooks/api/0.8/00-load-data.ipynb +++ b/notebooks/api/0.8/00-load-data.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Loading data into Syft Domain Server as a Data Owner\n", + "# Loading data into Syft Datasite Server as a Data Owner\n", "\n", "Welcome to Syft! This tutorial consists of 4 Jupyter notebooks that covers the basics of Syft which includes\n", "* [Uploading a private dataset as a Data Owner](./00-load-data.ipynb)\n", @@ -30,7 +30,7 @@ }, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -55,7 +55,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -66,8 +66,8 @@ }, "outputs": [], "source": [ - "# Launch a fresh domain server named \"test-domain-1\" in dev mode on the local machine\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)" + "# Launch a fresh datasite server named \"test-datasite-1\" in dev mode on the local machine\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", dev_mode=True, reset=True)" ] }, { @@ -78,8 +78,17 @@ }, "outputs": [], "source": [ - "# log into the node with default root credentials\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# log into the server with default root credentials\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client" ] }, { @@ -91,7 +100,7 @@ "outputs": [], "source": [ "# List the available API\n", - "domain_client.api" + "datasite_client.api" ] }, { @@ -114,7 +123,7 @@ "outputs": [], "source": [ "# Check for existing Data Subjects\n", - "data_subjects = domain_client.data_subject_registry.get_all()" + "data_subjects = datasite_client.data_subject_registry.get_all()" ] }, { @@ -206,7 +215,7 @@ "outputs": [], "source": [ "# Adds the data subject and all its members to the registry\n", - "response = domain_client.data_subject_registry.add_data_subject(country)\n", + "response = datasite_client.data_subject_registry.add_data_subject(country)\n", "response" ] }, @@ -230,7 +239,7 @@ "outputs": [], "source": [ "# Lets look at the data subjects added to the data\n", - "data_subjects = domain_client.data_subject_registry.get_all()\n", + "data_subjects = datasite_client.data_subject_registry.get_all()\n", "data_subjects" ] }, @@ -348,7 +357,8 @@ }, "outputs": [], "source": [ - "dataset.set_description(\"Canada Trade Data\")" + "dataset.set_description(\"Canada Trade Data Markdown Description\")\n", + "dataset.set_summary(\"Canada Trade Data Short Summary\")" ] }, { @@ -374,7 +384,7 @@ "dataset.add_contributor(\n", " name=\"Andrew Trask\",\n", " email=\"andrew@openmined.org\",\n", - " note=\"Andrew runs this domain and prepared the dataset metadata.\",\n", + " note=\"Andrew runs this datasite and prepared the dataset metadata.\",\n", ")\n", "\n", "dataset.add_contributor(\n", @@ -438,7 +448,7 @@ "ctf.add_contributor(\n", " name=\"Andrew Trask\",\n", " email=\"andrew@openmined.org\",\n", - " note=\"Andrew runs this domain and prepared the asset.\",\n", + " note=\"Andrew runs this datasite and prepared the asset.\",\n", ")" ] }, @@ -538,7 +548,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Upload Syft Dataset to Domain Server" + "### Upload Syft Dataset to Datasite Server" ] }, { @@ -549,7 +559,17 @@ }, "outputs": [], "source": [ - "domain_client.upload_dataset(dataset)" + "upload_res = datasite_client.upload_dataset(dataset)\n", + "upload_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(upload_res, sy.SyftSuccess), upload_res" ] }, { @@ -558,8 +578,8 @@ "metadata": {}, "outputs": [], "source": [ - "# We can list all the datasets on the Domain Server by invoking the following\n", - "datasets = domain_client.datasets.get_all()\n", + "# We can list all the datasets on the Datasite Server by invoking the following\n", + "datasets = datasite_client.datasets.get_all()\n", "datasets" ] }, @@ -571,7 +591,7 @@ }, "outputs": [], "source": [ - "assert len(datasets) == 1" + "assert len(datasets) == 1, len(datasets)" ] }, { @@ -589,7 +609,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Reading the Syft Dataset from Domain Server\n", + "### Reading the Syft Dataset from Datasite Server\n", "\n", "Following the logical hierarchy of `Dataset`, `Asset`, and its variant, we can read the data as follows" ] @@ -603,7 +623,7 @@ "outputs": [], "source": [ "# Reading the mock dataset\n", - "mock = domain_client.datasets[0].assets[0].mock" + "mock = datasite_client.datasets[0].assets[0].mock" ] }, { @@ -627,7 +647,7 @@ "source": [ "# Reading the real dataset\n", "# NOTE: Private data can be accessed by the Data Owners, but NOT the Data Scientists\n", - "real = domain_client.datasets[0].assets[0].data" + "real = datasite_client.datasets[0].assets[0].data" ] }, { @@ -645,12 +665,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a new Data Scientist account on the Domain Server\n", + "### Create a new Data Scientist account on the Datasite Server\n", "\n", "Signup is disabled by default.\n", - "An Admin/DO can enable it by `domain_client.settings.allow_guest_signup(enable=True)`\n", + "An Admin/DO can enable it by `datasite_client.settings.allow_guest_signup(enable=True)`\n", "\n", - "Refer to notebook [07-domain-register-control-flow](./07-domain-register-control-flow.ipynb) for more information." + "Refer to notebook [07-datasite-register-control-flow](./07-datasite-register-control-flow.ipynb) for more information." ] }, { @@ -659,7 +679,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.register(\n", + "datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -677,9 +697,9 @@ }, "outputs": [], "source": [ - "# Cleanup local domain server\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "# Cleanup local datasite server\n", + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { @@ -692,7 +712,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "syft_3.12", "language": "python", "name": "python3" }, @@ -706,7 +726,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.12.4" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/01-submit-code.ipynb b/notebooks/api/0.8/01-submit-code.ipynb index 71f40191867..5535d7996ce 100644 --- a/notebooks/api/0.8/01-submit-code.ipynb +++ b/notebooks/api/0.8/01-submit-code.ipynb @@ -22,7 +22,7 @@ }, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -36,11 +36,10 @@ "outputs": [], "source": [ "# third party\n", - "import pandas as pd\n", "\n", "# syft absolute\n", "import syft as sy\n", - "from syft.client.api import NodeIdentity\n", + "from syft.client.api import ServerIdentity\n", "from syft.service.request.request import RequestStatus\n", "\n", "sy.requires(SYFT_VERSION)" @@ -50,7 +49,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -61,15 +60,15 @@ }, "outputs": [], "source": [ - "# Launch and connect to test-domain-1 server we setup in the previous notebook\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" + "# Launch and connect to test-datasite-1 server we setup in the previous notebook\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=\"auto\", dev_mode=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Every `node` exposes a \"guest\" client that allows some basic read operations on the node without creating an account." + "Every `server` exposes a \"guest\" client that allows some basic read operations on the server without creating an account." ] }, { @@ -80,7 +79,7 @@ }, "outputs": [], "source": [ - "guest_domain_client = node.client" + "guest_datasite_client = server.client" ] }, { @@ -92,7 +91,7 @@ "outputs": [], "source": [ "# Print this to see the few commands that are available for the guest client\n", - "guest_domain_client" + "guest_datasite_client" ] }, { @@ -102,14 +101,14 @@ "outputs": [], "source": [ "# This will return the public credentials of the guest client\n", - "guest_credentials = guest_domain_client.credentials" + "guest_credentials = guest_datasite_client.credentials" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Login into the Domain with Data Scientist credentials that we created in [00-load-data.ipynb](./00-load-data.ipynb) notebook" + "Login into the Datasite with Data Scientist credentials that we created in [00-load-data.ipynb](./00-load-data.ipynb) notebook" ] }, { @@ -120,7 +119,7 @@ }, "outputs": [], "source": [ - "jane_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")\n", + "jane_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")\n", "jane_client" ] }, @@ -139,7 +138,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Explore available Syft Datasets in the Domain Node" + "### Explore available Syft Datasets in the Datasite Server" ] }, { @@ -222,18 +221,7 @@ "metadata": {}, "outputs": [], "source": [ - "# cannot access the private data\n", - "asset.data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Test\n", - "assert not isinstance(asset.data, pd.DataFrame) # returns a permission error" + "assert asset.data is None" ] }, { @@ -306,7 +294,7 @@ " dp.enable_features(\"contrib\")\n", "\n", " aggregate = 0.0\n", - " base_lap = dp.m.make_base_laplace(\n", + " base_lap = dp.m.make_laplace(\n", " dp.atom_domain(T=float),\n", " dp.absolute_distance(T=float),\n", " scale=5.0,\n", @@ -338,7 +326,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can validate your code against the mock data, before submitting it to the Domain Server" + "You can validate your code against the mock data, before submitting it to the Datasite Server" ] }, { @@ -385,11 +373,11 @@ "source": [ "# Tests\n", "assert len(sum_trade_value_mil.kwargs) == 1\n", - "node_identity = NodeIdentity.from_api(jane_client.api)\n", - "assert node_identity in sum_trade_value_mil.kwargs\n", - "assert \"trade_data\" in sum_trade_value_mil.kwargs[node_identity]\n", + "server_identity = ServerIdentity.from_api(jane_client.api)\n", + "assert server_identity in sum_trade_value_mil.kwargs\n", + "assert \"trade_data\" in sum_trade_value_mil.kwargs[server_identity]\n", "assert (\n", - " sum_trade_value_mil.input_policy_init_kwargs[node_identity][\"trade_data\"]\n", + " sum_trade_value_mil.input_policy_init_kwargs[server_identity][\"trade_data\"]\n", " == asset.action_id\n", ")" ] @@ -409,7 +397,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Submit your code to the Domain Server\n", + "### Submit your code to the Datasite Server\n", "\n", "We start by creating new Syft Project" ] @@ -461,7 +449,8 @@ "outputs": [], "source": [ "# create the same code request with the exact same function should return an error\n", - "result = new_project.create_code_request(sum_trade_value_mil, jane_client)" + "with sy.raises(sy.SyftException, show=True):\n", + " result = new_project.create_code_request(sum_trade_value_mil, jane_client)" ] }, { @@ -481,8 +470,8 @@ }, "outputs": [], "source": [ - "# Once we start the project, it will submit the project along with the code request to the Domain Server\n", - "project = new_project.start()\n", + "# Once we start the project, it will submit the project along with the code request to the Datasite Server\n", + "project = new_project.send()\n", "project" ] }, @@ -494,7 +483,7 @@ }, "outputs": [], "source": [ - "assert isinstance(project, sy.service.project.project.Project)" + "assert isinstance(project, sy.service.project.project.Project), project" ] }, { @@ -537,19 +526,8 @@ }, "outputs": [], "source": [ - "result = jane_client.code.sum_trade_value_mil(trade_data=asset)\n", - "result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assert isinstance(result, sy.SyftError)" + "with sy.raises(sy.SyftException, show=True):\n", + " jane_client.code.sum_trade_value_mil(trade_data=asset)" ] }, { @@ -569,10 +547,10 @@ }, "outputs": [], "source": [ - "# Cleanup local domain server\n", + "# Cleanup local datasite server\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { @@ -584,11 +562,6 @@ } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -599,7 +572,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/02-review-code-and-approve.ipynb b/notebooks/api/0.8/02-review-code-and-approve.ipynb index 4faedd441c0..1aa6b67683e 100644 --- a/notebooks/api/0.8/02-review-code-and-approve.ipynb +++ b/notebooks/api/0.8/02-review-code-and-approve.ipynb @@ -22,7 +22,7 @@ }, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -45,7 +45,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Login to Syft Domain Server" + "### Login to Syft Datasite Server" ] }, { @@ -56,8 +56,8 @@ }, "outputs": [], "source": [ - "# Launch and connect to test-domain-1 server we setup in the previous notebook\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" + "# Launch and connect to test-datasite-1 server we setup in the previous notebook\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=\"auto\", dev_mode=True)" ] }, { @@ -68,17 +68,17 @@ }, "outputs": [], "source": [ - "# Log into the node with default root credentials\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# Log into the server with default root credentials\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Selecting Project in the Syft Domain Server\n", + "### Selecting Project in the Syft Datasite Server\n", "\n", - "Let's see all the projects that are created by Data Scientists in this Domain Server" + "Let's see all the projects that are created by Data Scientists in this Datasite Server" ] }, { @@ -89,7 +89,7 @@ }, "outputs": [], "source": [ - "domain_client.projects" + "datasite_client.projects" ] }, { @@ -101,7 +101,7 @@ "outputs": [], "source": [ "# Select the project you want to work with\n", - "project = domain_client.projects[0]\n", + "project = datasite_client.projects[0]\n", "project" ] }, @@ -290,6 +290,21 @@ "print(op.policy_code)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Policies provided by Syft are available before approving the code,\n", + "# Custom policies are only safe to use once the code is approved.\n", + "\n", + "assert func.output_policy is not None\n", + "assert func.input_policy is not None\n", + "\n", + "func.output_policy" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -310,7 +325,7 @@ "outputs": [], "source": [ "# Let's grab the actual executable function that was submitted by the user\n", - "users_function = func.unsafe_function" + "users_function = func.run" ] }, { @@ -346,23 +361,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Sharing results back to the Data Scientist" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By calling this function we attach the result of the function to the original request\n", + "### Approving a request\n", "\n", - "`request.accept_by_depositing_result(real_result)`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's say we accidentally submit incorrect results, we can correct it by using override it using the `force=True` flag" + "By calling `request.approve()`, the data scientist can execute their function on the real data, and obtain the result" ] }, { @@ -374,31 +375,7 @@ "outputs": [], "source": [ "# Uploaded wrong result - we shared mock_result instead of the real_result\n", - "result = request.accept_by_depositing_result(mock_result)\n", - "result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assert isinstance(result, sy.SyftSuccess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# upload correct result\n", - "result = request.accept_by_depositing_result(real_result, force=True)\n", + "result = request.approve()\n", "result" ] }, @@ -472,7 +449,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = request.accept_by_depositing_result(real_result)\n", + "result = request.approve()\n", "result" ] }, @@ -494,10 +471,10 @@ }, "outputs": [], "source": [ - "# Cleanup local domain server\n", + "# Cleanup local datasite server\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { @@ -509,11 +486,6 @@ } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -524,7 +496,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/03-data-scientist-download-result.ipynb b/notebooks/api/0.8/03-data-scientist-download-result.ipynb index 81ce4f783fc..672131cedeb 100644 --- a/notebooks/api/0.8/03-data-scientist-download-result.ipynb +++ b/notebooks/api/0.8/03-data-scientist-download-result.ipynb @@ -22,7 +22,7 @@ }, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -43,7 +43,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Login to Syft Domain Server" + "### Login to Syft Datasite Server" ] }, { @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", dev_mode=True)" + "server = sy.orchestra.launch(name=\"test-datasite-1\", dev_mode=True)" ] }, { @@ -65,7 +65,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "datasite_client = server.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -77,6 +77,15 @@ "As the Syft Function policy requires exact input match, let's get the asset for which we wanted to run our function on." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.datasets" + ] + }, { "cell_type": "code", "execution_count": null, @@ -86,7 +95,7 @@ "outputs": [], "source": [ "# Get the canada_trade_flow asset from the Canada Trade dataset\n", - "asset = domain_client.datasets[0].assets[0]\n", + "asset = datasite_client.datasets[0].assets[0]\n", "asset" ] }, @@ -103,7 +112,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.code.sum_trade_value_mil" + "datasite_client.code.sum_trade_value_mil" ] }, { @@ -121,7 +130,7 @@ }, "outputs": [], "source": [ - "result_pointer = domain_client.code.sum_trade_value_mil(trade_data=asset)\n", + "result_pointer = datasite_client.code.sum_trade_value_mil(trade_data=asset)\n", "result_pointer" ] }, @@ -161,7 +170,7 @@ }, "outputs": [], "source": [ - "ops = domain_client.code[-1].output_policy\n", + "ops = datasite_client.code[-1].output_policy\n", "ops" ] }, @@ -171,7 +180,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.code" + "datasite_client.code" ] }, { @@ -189,8 +198,8 @@ }, "outputs": [], "source": [ - "assert isinstance(ops.is_valid, sy.SyftError)\n", - "assert ops.count > 0" + "assert not ops.is_valid()\n", + "assert ops.count() > 0" ] }, { @@ -201,10 +210,10 @@ }, "outputs": [], "source": [ - "# Cleanup local domain\n", + "# Cleanup local datasite\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { @@ -216,11 +225,6 @@ } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -231,7 +235,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/04-jax-example.ipynb b/notebooks/api/0.8/04-jax-example.ipynb deleted file mode 100644 index 6f1e413d83b..00000000000 --- a/notebooks/api/0.8/04-jax-example.ipynb +++ /dev/null @@ -1,416 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", - "package_string = f'\"syft{SYFT_VERSION}\"'\n", - "# %pip install {package_string} -q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# third party\n", - "import haiku as hk\n", - "import jax\n", - "from jax import random\n", - "\n", - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(SYFT_VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "key = random.PRNGKey(42)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "train_data = random.uniform(key, shape=(4, 28, 28, 1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assert round(train_data.sum()) == 1602" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "train = sy.ActionObject.from_obj(train_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "type(train.syft_action_data), train.id, train.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "train_domain_obj = domain_client.api.services.action.set(train)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "class MLP(hk.Module):\n", - " def __init__(self, out_dims, name=None):\n", - " super().__init__(name=name)\n", - " self.out_dims = out_dims\n", - "\n", - " def __call__(self, x):\n", - " x = x.reshape((x.shape[0], -1))\n", - " x = hk.Linear(128)(x)\n", - " x = jax.nn.relu(x)\n", - " x = hk.Linear(self.out_dims)(x)\n", - " return x\n", - "\n", - "\n", - "def _forward_fn_linear1(x):\n", - " module = MLP(out_dims=10)\n", - " return module(x)\n", - "\n", - "\n", - "model = hk.transform(_forward_fn_linear1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "weights = model.init(key, train.syft_action_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assert isinstance(weights, dict)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "w = sy.ActionObject.from_obj(weights)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "type(w.syft_action_data), w.id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "weight_domain_obj = domain_client.api.services.action.set(w)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "@sy.syft_function(\n", - " input_policy=sy.ExactMatch(weights=weight_domain_obj.id, data=train_domain_obj.id),\n", - " output_policy=sy.SingleExecutionExactOutput(),\n", - ")\n", - "def train_mlp(weights, data):\n", - " # third party\n", - " import haiku as hk\n", - " import jax\n", - "\n", - " class MLP(hk.Module):\n", - " def __init__(self, out_dims, name=None):\n", - " super().__init__(name=name)\n", - " self.out_dims = out_dims\n", - "\n", - " def __call__(self, x):\n", - " x = x.reshape((x.shape[0], -1))\n", - " x = hk.Linear(128)(x)\n", - " x = jax.nn.relu(x)\n", - " x = hk.Linear(self.out_dims)(x)\n", - " return x\n", - "\n", - " def _forward_fn_linear1(x):\n", - " module = MLP(out_dims=10)\n", - " return module(x)\n", - "\n", - " model = hk.transform(_forward_fn_linear1)\n", - " rng_key = jax.random.PRNGKey(42)\n", - " output = model.apply(params=weights, x=data, rng=rng_key)\n", - " return output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "pointer = train_mlp(weights=weight_domain_obj, data=train_domain_obj)\n", - "output = pointer.get()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assert round(output.sum(), 2) == -0.86" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "request = domain_client.code.request_code_execution(train_mlp)\n", - "request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "request.approve()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "domain_client._api = None\n", - "_ = domain_client.api" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "result_ptr = domain_client.code.train_mlp(weights=w.id, data=train.id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "result = result_ptr.get()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assert round(float(result.sum()), 2) == -0.86" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/api/0.8/04-pytorch-example.ipynb b/notebooks/api/0.8/04-pytorch-example.ipynb new file mode 100644 index 00000000000..a6985d9215e --- /dev/null +++ b/notebooks/api/0.8/04-pytorch-example.ipynb @@ -0,0 +1,434 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'\n", + "# %pip install {package_string} -q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# third party\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", dev_mode=True, reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Set the random seed for reproducibility\n", + "torch.manual_seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Generate random data\n", + "train_data = torch.rand((4, 28, 28, 1))\n", + "train_data.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "assert torch.round(train_data.sum()) == 1557" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "train = sy.ActionObject.from_obj(train_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "type(train.syft_action_data), train.id, train.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "train_datasite_obj = train.send(datasite_client)\n", + "type(train_datasite_obj)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "train_datasite_obj" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "assert torch.round(train_datasite_obj.syft_action_data.sum()) == 1557" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "class MLP(nn.Module):\n", + " def __init__(self, out_dims):\n", + " super().__init__()\n", + " self.out_dims = out_dims\n", + " self.linear1 = nn.Linear(784, 128)\n", + " self.linear2 = nn.Linear(128, out_dims)\n", + "\n", + " def forward(self, x):\n", + " x = x.view(x.size(0), -1)\n", + " x = self.linear1(x)\n", + " x = F.relu(x)\n", + " x = self.linear2(x)\n", + " return x\n", + "\n", + "\n", + "model = MLP(out_dims=10)\n", + "model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "weights = model.state_dict()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "assert isinstance(weights, dict)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "w = sy.ActionObject.from_obj(weights)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "type(w.syft_action_data), w.id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "weight_datasite_obj = w.send(datasite_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "@sy.syft_function(\n", + " input_policy=sy.ExactMatch(\n", + " weights=weight_datasite_obj.id, data=train_datasite_obj.id\n", + " ),\n", + " output_policy=sy.SingleExecutionExactOutput(),\n", + ")\n", + "def train_mlp(weights, data):\n", + " # third party\n", + " import torch\n", + " import torch.nn as nn\n", + " import torch.nn.functional as F\n", + "\n", + " class MLP(nn.Module):\n", + " def __init__(self, out_dims):\n", + " super().__init__()\n", + " self.out_dims = out_dims\n", + " self.linear1 = nn.Linear(784, 128)\n", + " self.linear2 = nn.Linear(128, out_dims)\n", + "\n", + " def forward(self, x):\n", + " x = x.view(x.size(0), -1)\n", + " x = self.linear1(x)\n", + " x = F.relu(x)\n", + " x = self.linear2(x)\n", + " return x\n", + "\n", + " # Initialize the model\n", + " model = MLP(out_dims=10)\n", + "\n", + " # Load weights into the model\n", + " model.load_state_dict(weights)\n", + "\n", + " # Perform a forward pass\n", + " model.eval() # Set the model to evaluation mode\n", + " with torch.no_grad(): # Disable gradient calculation\n", + " output = model(data)\n", + "\n", + " return output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "pointer = train_mlp(weights=weight_datasite_obj, data=train_datasite_obj)\n", + "output = pointer.get()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "assert torch.allclose(torch.sum(output), torch.tensor(1.3907))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "request = datasite_client.code.request_code_execution(train_mlp)\n", + "request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "request.approve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "datasite_client._api = None\n", + "_ = datasite_client.api" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "result_ptr = datasite_client.code.train_mlp(weights=w.id, data=train.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "result = result_ptr.get()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "assert torch.allclose(torch.sum(result), torch.tensor(1.3907))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "if server.server_type.value == \"python\":\n", + " server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": true + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/api/0.8/05-custom-policy.ipynb b/notebooks/api/0.8/05-custom-policy.ipynb index 85a763a02f8..7b383ad0d73 100644 --- a/notebooks/api/0.8/05-custom-policy.ipynb +++ b/notebooks/api/0.8/05-custom-policy.ipynb @@ -9,7 +9,7 @@ }, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -41,7 +41,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)" + "server = sy.orchestra.launch(name=\"test-datasite-1\", dev_mode=True, reset=True)" ] }, { @@ -53,13 +53,35 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { "cell_type": "code", "execution_count": null, "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.register(\n", + " email=\"newuser@openmined.org\", name=\"John Doe\", password=\"pw\", password_verify=\"pw\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "client_low_ds = server.login(email=\"newuser@openmined.org\", password=\"pw\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", "metadata": { "tags": [] }, @@ -72,12 +94,16 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "7", "metadata": { "tags": [] }, "outputs": [], "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "\n", "class RepeatedCallPolicy(sy.CustomOutputPolicy):\n", " n_calls: int = 0\n", " downloadable_output_args: list[str] = []\n", @@ -87,31 +113,36 @@ " self.downloadable_output_args = (\n", " downloadable_output_args if downloadable_output_args is not None else []\n", " )\n", - " self.n_calls = n_calls + 1\n", + " self.n_calls = n_calls\n", " self.state = {\"counts\": 0}\n", "\n", " def public_state(self):\n", " return self.state[\"counts\"]\n", "\n", - " def apply_output(self, context, outputs):\n", + " def update_policy(self, context, outputs):\n", + " self.state[\"counts\"] += 1\n", + "\n", + " def apply_to_output(self, context, outputs, update_policy=True):\n", " if hasattr(outputs, \"syft_action_data\"):\n", " outputs = outputs.syft_action_data\n", " output_dict = {}\n", " if self.state[\"counts\"] < self.n_calls:\n", " for output_arg in self.downloadable_output_args:\n", " output_dict[output_arg] = outputs[output_arg]\n", - "\n", - " self.state[\"counts\"] += 1\n", + " if update_policy:\n", + " self.update_policy(context, outputs)\n", " else:\n", " return None\n", + " return output_dict\n", "\n", - " return output_dict" + " def is_valid(self, context):\n", + " return self.state[\"counts\"] < self.n_calls" ] }, { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "8", "metadata": { "tags": [] }, @@ -123,7 +154,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "9", "metadata": { "tags": [] }, @@ -135,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "10", "metadata": { "tags": [] }, @@ -147,7 +178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "11", "metadata": { "tags": [] }, @@ -159,7 +190,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "12", "metadata": { "tags": [] }, @@ -167,14 +198,14 @@ "source": [ "print(policy.init_kwargs)\n", "a_obj = sy.ActionObject.from_obj({\"y\": [1, 2, 3]})\n", - "x = policy.apply_output(None, a_obj)\n", + "x = policy.apply_to_output(None, a_obj)\n", "x[\"y\"]" ] }, { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "13", "metadata": { "tags": [] }, @@ -186,7 +217,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "14", "metadata": { "tags": [] }, @@ -200,24 +231,152 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "15", "metadata": {}, "outputs": [], "source": [ - "domain_client.api.services.action.set(x_pointer)" + "x_pointer = x_pointer.send(datasite_client)" ] }, { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "\n", + "# syft absolute\n", + "from syft import SyftException\n", + "from syft import UID\n", + "from syft.client.api import AuthedServiceContext\n", + "from syft.client.api import ServerIdentity\n", + "\n", + "\n", + "class CustomExactMatch(sy.CustomInputPolicy):\n", + " def __init__(self, *args: Any, **kwargs: Any) -> None:\n", + " pass\n", + "\n", + " def filter_kwargs(self, kwargs, context): # stdlib\n", + " allowed_inputs = self.allowed_ids_only(\n", + " allowed_inputs=self.inputs, kwargs=kwargs, context=context\n", + " )\n", + " results = self.retrieve_from_db(\n", + " allowed_inputs=allowed_inputs,\n", + " context=context,\n", + " )\n", + " return results\n", + "\n", + " def retrieve_from_db(self, allowed_inputs, context):\n", + " # syft absolute\n", + " from syft import ServerType\n", + " from syft.service.action.action_object import TwinMode\n", + "\n", + " action_service = context.server.get_service(\"actionservice\")\n", + " code_inputs = {}\n", + "\n", + " # When we are retrieving the code from the database, we need to use the server's\n", + " # verify key as the credentials. This is because when we approve the code, we\n", + " # we allow the private data to be used only for this specific code.\n", + " # but we are not modifying the permissions of the private data\n", + "\n", + " root_context = AuthedServiceContext(\n", + " server=context.server, credentials=context.server.verify_key\n", + " )\n", + " if context.server.server_type != ServerType.DATASITE:\n", + " raise SyftException(\n", + " public_message=f\"Invalid server type for code submission: {context.server.server_type}\"\n", + " )\n", + "\n", + " for var_name, arg_id in allowed_inputs.items():\n", + " code_inputs[var_name] = action_service.get(\n", + " context=root_context,\n", + " uid=arg_id,\n", + " twin_mode=TwinMode.NONE,\n", + " )\n", + " return code_inputs\n", + "\n", + " def allowed_ids_only(\n", + " self,\n", + " allowed_inputs,\n", + " kwargs,\n", + " context,\n", + " ):\n", + " # syft absolute\n", + " from syft import ServerType\n", + "\n", + " if context.server.server_type != ServerType.DATASITE:\n", + " raise SyftException(\n", + " public_message=f\"Invalid server type for code submission: {context.server.server_type}\"\n", + " )\n", + "\n", + " server_identity = ServerIdentity(\n", + " server_name=context.server.name,\n", + " server_id=context.server.id,\n", + " verify_key=context.server.signing_key.verify_key,\n", + " )\n", + " allowed_inputs = allowed_inputs.get(server_identity, {})\n", + "\n", + " filtered_kwargs = {}\n", + " for key in allowed_inputs.keys():\n", + " if key in kwargs:\n", + " value = kwargs[key]\n", + " uid = value\n", + "\n", + " if not isinstance(uid, UID):\n", + " uid = getattr(value, \"id\", None)\n", + "\n", + " if uid != allowed_inputs[key]:\n", + " raise SyftException(\n", + " public_message=f\"Input with uid: {uid} for `{key}` not in allowed inputs: {allowed_inputs}\"\n", + " )\n", + "\n", + " filtered_kwargs[key] = value\n", + "\n", + " return filtered_kwargs\n", + "\n", + " def is_valid(\n", + " self,\n", + " context,\n", + " usr_input_kwargs,\n", + " ):\n", + " filtered_input_kwargs = self.filter_kwargs(\n", + " kwargs=usr_input_kwargs,\n", + " context=context,\n", + " )\n", + "\n", + " expected_input_kwargs = set()\n", + " for _inp_kwargs in self.inputs.values():\n", + " for k in _inp_kwargs.keys():\n", + " if k not in usr_input_kwargs:\n", + " raise SyftException(\n", + " public_message=f\"Function missing required keyword argument: '{k}'\"\n", + " )\n", + " expected_input_kwargs.update(_inp_kwargs.keys())\n", + "\n", + " permitted_input_kwargs = list(filtered_input_kwargs.keys())\n", + "\n", + " not_approved_kwargs = set(expected_input_kwargs) - set(permitted_input_kwargs)\n", + " if len(not_approved_kwargs) > 0:\n", + " raise SyftException(\n", + " public_message=f\"Function arguments: {not_approved_kwargs} are not approved yet.\"\n", + " )\n", + "\n", + " return True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", "metadata": { "tags": [] }, "outputs": [], "source": [ "@sy.syft_function(\n", - " input_policy=sy.ExactMatch(x=x_pointer),\n", + " input_policy=CustomExactMatch(x=x_pointer),\n", " output_policy=RepeatedCallPolicy(n_calls=10, downloadable_output_args=[\"y\"]),\n", ")\n", "def func(x):\n", @@ -227,32 +386,64 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "x = CustomExactMatch(x=x_pointer)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", "metadata": { "tags": [] }, "outputs": [], "source": [ - "request = domain_client.code.request_code_execution(func)\n", + "request = client_low_ds.code.request_code_execution(func)\n", "request" ] }, { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "request_id = request.id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", "metadata": { "tags": [] }, "outputs": [], "source": [ - "domain_client.code.get_all()" + "client_low_ds.code.get_all()" ] }, { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "for request in datasite_client.requests:\n", + " if request.id == request_id:\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", "metadata": { "tags": [] }, @@ -264,142 +455,145 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "# Custom policies need to be approved before they can be viewed and used\n", + "assert func.input_policy is None\n", + "assert func.output_policy is None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", "metadata": { "tags": [] }, "outputs": [], "source": [ - "result = func.unsafe_function(x=x_pointer)\n", + "result = func.run(x=x_pointer)\n", "result" ] }, { "cell_type": "code", "execution_count": null, - "id": "19", - "metadata": { - "tags": [] - }, + "id": "26", + "metadata": {}, "outputs": [], "source": [ - "final_result = request.accept_by_depositing_result(result)\n", - "final_result" + "request.approve()" ] }, { "cell_type": "code", "execution_count": null, - "id": "20", - "metadata": { - "tags": [] - }, + "id": "27", + "metadata": {}, "outputs": [], "source": [ - "res_ptr = domain_client.code.func(x=x_pointer)\n", - "res_ptr" + "assert func.input_policy is not None" ] }, { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "28", "metadata": {}, "outputs": [], "source": [ - "res = res_ptr.get()\n", - "res" + "assert func.output_policy is not None" ] }, { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "29", "metadata": { "tags": [] }, "outputs": [], "source": [ - "assert (res[\"y\"] == np.array([2, 3, 4])).all()" + "res_ptr = client_low_ds.code.func(x=x_pointer)\n", + "res_ptr" ] }, { "cell_type": "code", "execution_count": null, - "id": "23", - "metadata": { - "tags": [] - }, + "id": "30", + "metadata": {}, "outputs": [], "source": [ - "assert set(res.keys()) == set(\"y\")" + "res = res_ptr.get()\n", + "res" ] }, { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "31", "metadata": { "tags": [] }, "outputs": [], "source": [ - "domain_client.code.get_all()[0].output_policy" + "assert (res[\"y\"] == np.array([2, 3, 4])).all()" ] }, { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "32", "metadata": { "tags": [] }, "outputs": [], "source": [ - "domain_client.api.services.policy.get_all()" + "assert set(res.keys()) == set(\"y\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "33", "metadata": { "tags": [] }, "outputs": [], "source": [ - "output_policy = domain_client.api.services.policy.get_all()\n", - "output_policy" + "for code in datasite_client.code.get_all():\n", + " if code.service_func_name == \"func\":\n", + " break\n", + "print(code.output_policy.state)\n", + "assert code.output_policy.state == {\"counts\": 1}" ] }, { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "34", "metadata": { "tags": [] }, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "35", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -410,7 +604,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/06-multiple-code-requests.ipynb b/notebooks/api/0.8/06-multiple-code-requests.ipynb index 868cb20b91b..a92cb9c326a 100644 --- a/notebooks/api/0.8/06-multiple-code-requests.ipynb +++ b/notebooks/api/0.8/06-multiple-code-requests.ipynb @@ -9,7 +9,7 @@ }, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -41,7 +41,9 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", reset=True, dev_mode=True)" + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\", port=\"auto\", reset=True, dev_mode=True\n", + ")" ] }, { @@ -53,7 +55,7 @@ }, "outputs": [], "source": [ - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -93,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "dataset2 = sy.Dataset(name=\"My Sample Dataset - II\")\n", + "dataset2 = sy.Dataset(name=\"Age Dataset\")\n", "asset2 = sy.Asset(name=\"Sample Data - II\")\n", "asset2.set_obj(sample_data * 10)\n", "asset2.set_mock(mock_sample_data * 10, mock_is_real=False)\n", @@ -142,7 +144,7 @@ }, "outputs": [], "source": [ - "ds_client = node.login(email=\"sheldon@caltech.edu\", password=\"abc123\")" + "ds_client = server.login(email=\"sheldon@caltech.edu\", password=\"abc123\")" ] }, { @@ -174,7 +176,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(datasets) == 2\n", + "assert len(datasets) == 1\n", "dataset_ptr = datasets[0]\n", "dataset_ptr" ] @@ -203,7 +205,7 @@ "\n", " # compute sum\n", " res = data.sum()\n", - " base_lap = dp.m.make_base_laplace(\n", + " base_lap = dp.m.make_laplace(\n", " dp.atom_domain(T=float),\n", " dp.absolute_distance(T=float),\n", " scale=10.0,\n", @@ -250,7 +252,7 @@ }, "outputs": [], "source": [ - "project = new_project.start()\n", + "project = new_project.send()\n", "\n", "project" ] @@ -304,7 +306,7 @@ "\n", " # compute mean\n", " mean = data.mean()\n", - " base_lap = dp.m.make_base_laplace(\n", + " base_lap = dp.m.make_laplace(\n", " dp.atom_domain(T=float),\n", " dp.absolute_distance(T=float),\n", " scale=10.0,\n", @@ -358,7 +360,7 @@ "metadata": {}, "outputs": [], "source": [ - "# The Domain Owner retrieves by name or uid for approval\n", + "# The Datasite Owner retrieves by name or uid for approval\n", "root_client_project = root_client.projects.get_by_uid(project.id)\n", "assert isinstance(root_client_project, sy.service.project.project.Project)" ] @@ -477,8 +479,9 @@ "metadata": {}, "outputs": [], "source": [ - "datasets = ds_client.datasets.search(name=\"My Sample Dataset - II\")\n", - "dataset_ptr2 = datasets[0]" + "datasets = ds_client.datasets.search(name=\"Age Dataset\")\n", + "dataset_ptr2 = datasets[0]\n", + "dataset_ptr2" ] }, { @@ -489,23 +492,14 @@ "outputs": [], "source": [ "# Validate if input policy is violated\n", - "sum_ptr = ds_client.code.calculate_sum(data=dataset_ptr2.assets[0])" + "with sy.raises(sy.SyftException, show=True):\n", + " sum_ptr = ds_client.code.calculate_sum(data=dataset_ptr2.assets[0])" ] }, { "cell_type": "code", "execution_count": null, "id": "35", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(sum_ptr, sy.SyftError), sum_ptr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36", "metadata": { "tags": [] }, @@ -517,7 +511,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "36", "metadata": { "tags": [] }, @@ -529,7 +523,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "37", "metadata": { "tags": [] }, @@ -541,31 +535,26 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "38", "metadata": { "tags": [] }, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.deployment_type.value in [\"python\", \"single_container\"]:\n", + " server.land()" ] }, { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "39", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -576,7 +565,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/07-datasite-register-control-flow.ipynb b/notebooks/api/0.8/07-datasite-register-control-flow.ipynb new file mode 100644 index 00000000000..5bcf3d187b3 --- /dev/null +++ b/notebooks/api/0.8/07-datasite-register-control-flow.ipynb @@ -0,0 +1,386 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Registering Users in Syft Datasite Server\n", + "\n", + "By default users are not allowed to create a new account on the Syft Datasite Server. This notebook is a tutorial for Data Owners to enable guest signups on their deployments." + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "### Import packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'\n", + "# %pip install {package_string} -q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Launch a Syft Datasite Server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# log into the server with default root credentials\n", + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "#### By default registration is disabled. Only `root_client` can register" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# The assumed state of this test is a server with signup set to False\n", + "# however if the tox task has set it to True you need to overwrite the setting\n", + "# before running the tests\n", + "# root_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user using root credentials\n", + "response_1 = root_client.register(\n", + " email=\"joker@gotham.com\",\n", + " password=\"joker123\",\n", + " password_verify=\"joker123\",\n", + " name=\"Joker\",\n", + ")\n", + "response_1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " server.register(\n", + " email=\"batman@gotham.com\",\n", + " password=\"1rIzHAx6uQaP\",\n", + " password_verify=\"1rIzHAx6uQaP\",\n", + " name=\"Batman\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user as a GUEST\n", + "with sy.raises(sy.SyftException, show=True):\n", + " server.register(\n", + " email=\"batman@gotham.com\",\n", + " password=\"1rIzHAx6uQaP\",\n", + " password_verify=\"1rIzHAx6uQaP\",\n", + " name=\"Batman\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user as a GUEST\n", + "with sy.raises(sy.SyftException, show=True):\n", + " server.register(\n", + " email=\"robin@gotham.com\",\n", + " password=\"5v1ei4OM2N4m\",\n", + " password_verify=\"5v1ei4OM2N4m\",\n", + " name=\"Robin\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "assert root_client.settings.get().signup_enabled is False" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "#### Now, if root user enable registration, then the guest clients can also register" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# Get the current settings of the server\n", + "root_client.settings.get()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# Enable guest signups\n", + "root_client.settings.allow_guest_signup(enable=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "# Refresh the root client to fetch the updated settings\n", + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user as a GUEST\n", + "response_2 = server.register(\n", + " email=\"batman@gotham.com\",\n", + " password=\"1rIzHAx6uQaP\",\n", + " password_verify=\"1rIzHAx6uQaP\",\n", + " name=\"Batman\",\n", + ")\n", + "response_2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user as a GUEST\n", + "response_3 = server.register(\n", + " email=\"robin@gotham.com\",\n", + " password=\"5v1ei4OM2N4m\",\n", + " password_verify=\"5v1ei4OM2N4m\",\n", + " name=\"Robin\",\n", + ")\n", + "response_3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "assert root_client.settings.get().signup_enabled is True\n", + "assert isinstance(response_1, sy.SyftSuccess)\n", + "assert isinstance(response_2, sy.SyftSuccess)\n", + "assert isinstance(response_3, sy.SyftSuccess)" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "### Toggle signup again" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "# Refresh the root client\n", + "root_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# Refresh the root client to fetch the updated settings\n", + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user as a GUEST\n", + "with sy.raises(sy.SyftException, show=True):\n", + " server.register(\n", + " email=\"bane@gotham.com\",\n", + " password=\"SKY5cC2zQPRP\",\n", + " password_verify=\"SKY5cC2zQPRP\",\n", + " name=\"Bane\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "# Register a new user as a GUEST\n", + "with sy.raises(sy.SyftException, show=True):\n", + " server.register(\n", + " email=\"riddler@gotham.com\",\n", + " password=\"7eVGUuNDyH8P\",\n", + " password_verify=\"7eVGUuNDyH8P\",\n", + " name=\"Riddler\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "assert root_client.settings.get().signup_enabled is False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "# Cleanup local datasite server\n", + "\n", + "if server.server_type.value == \"python\":\n", + " server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/api/0.8/07-domain-register-control-flow.ipynb b/notebooks/api/0.8/07-domain-register-control-flow.ipynb deleted file mode 100644 index 5bd493a47c9..00000000000 --- a/notebooks/api/0.8/07-domain-register-control-flow.ipynb +++ /dev/null @@ -1,381 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Registering Users in Syft Domain Server\n", - "\n", - "By default users are not allowed to create a new account on the Syft Domain Server. This notebook is a tutorial for Data Owners to enable guest signups on their deployments." - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "### Import packages" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", - "package_string = f'\"syft{SYFT_VERSION}\"'\n", - "# %pip install {package_string} -q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(SYFT_VERSION)" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "### Launch a Syft Domain Server" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "# log into the node with default root credentials\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "#### By default registration is disabled. Only `root_client` can register" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# The assumed state of this test is a node with signup set to False\n", - "# however if the tox task has set it to True you need to overwrite the setting\n", - "# before running the tests\n", - "# root_client.settings.allow_guest_signup(enable=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user using root credentials\n", - "response_1 = root_client.register(\n", - " email=\"joker@gotham.com\",\n", - " password=\"joker123\",\n", - " password_verify=\"joker123\",\n", - " name=\"Joker\",\n", - ")\n", - "response_1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user as a GUEST\n", - "response_2 = node.register(\n", - " email=\"batman@gotham.com\",\n", - " password=\"1rIzHAx6uQaP\",\n", - " password_verify=\"1rIzHAx6uQaP\",\n", - " name=\"Batman\",\n", - ")\n", - "response_2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user as a GUEST\n", - "response_3 = node.register(\n", - " email=\"robin@gotham.com\",\n", - " password=\"5v1ei4OM2N4m\",\n", - " password_verify=\"5v1ei4OM2N4m\",\n", - " name=\"Robin\",\n", - ")\n", - "response_3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "assert root_client.settings.get().signup_enabled is False\n", - "assert isinstance(response_1, sy.SyftSuccess)\n", - "assert isinstance(response_2, sy.SyftError)\n", - "assert isinstance(response_3, sy.SyftError)" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": {}, - "source": [ - "#### Now, if root user enable registration, then the guest clients can also register" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "# Get the current settings of the node\n", - "root_client.settings.get()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "# Enable guest signups\n", - "root_client.settings.allow_guest_signup(enable=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "# Refresh the root client to fetch the updated settings\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user as a GUEST\n", - "response_2 = node.register(\n", - " email=\"batman@gotham.com\",\n", - " password=\"1rIzHAx6uQaP\",\n", - " password_verify=\"1rIzHAx6uQaP\",\n", - " name=\"Batman\",\n", - ")\n", - "response_2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user as a GUEST\n", - "response_3 = node.register(\n", - " email=\"robin@gotham.com\",\n", - " password=\"5v1ei4OM2N4m\",\n", - " password_verify=\"5v1ei4OM2N4m\",\n", - " name=\"Robin\",\n", - ")\n", - "response_3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "assert root_client.settings.get().signup_enabled is True\n", - "assert isinstance(response_1, sy.SyftSuccess)\n", - "assert isinstance(response_2, sy.SyftSuccess)\n", - "assert isinstance(response_3, sy.SyftSuccess)" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "### Toggle signup again" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": {}, - "outputs": [], - "source": [ - "# Refresh the root client\n", - "root_client.settings.allow_guest_signup(enable=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": {}, - "outputs": [], - "source": [ - "# Refresh the root client to fetch the updated settings\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user as a GUEST\n", - "response_2 = node.register(\n", - " email=\"bane@gotham.com\",\n", - " password=\"SKY5cC2zQPRP\",\n", - " password_verify=\"SKY5cC2zQPRP\",\n", - " name=\"Bane\",\n", - ")\n", - "response_2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "# Register a new user as a GUEST\n", - "response_3 = node.register(\n", - " email=\"riddler@gotham.com\",\n", - " password=\"7eVGUuNDyH8P\",\n", - " password_verify=\"7eVGUuNDyH8P\",\n", - " name=\"Riddler\",\n", - ")\n", - "response_3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "assert root_client.settings.get().signup_enabled is False\n", - "assert isinstance(response_1, sy.SyftSuccess)\n", - "assert isinstance(response_2, sy.SyftError)\n", - "assert isinstance(response_3, sy.SyftError)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [ - "# Cleanup local domain server\n", - "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/api/0.8/08-code-version.ipynb b/notebooks/api/0.8/08-code-version.ipynb index 4168770ca88..c1d60295d8e 100644 --- a/notebooks/api/0.8/08-code-version.ipynb +++ b/notebooks/api/0.8/08-code-version.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "%pip install {package_string} -q" ] @@ -41,7 +41,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=\"auto\", dev_mode=True)" ] }, { @@ -59,15 +59,15 @@ "metadata": {}, "outputs": [], "source": [ - "# log into the node with default root credentials\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# log into the server with default root credentials\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a new Data Scientist account on the Domain Server\n" + "### Create a new Data Scientist account on the Datasite Server\n" ] }, { @@ -76,9 +76,9 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.register(\n", - " name=\"Jane Doe\",\n", - " email=\"jane@caltech.edu\",\n", + "datasite_client.register(\n", + " name=\"Janet Doe\",\n", + " email=\"janet@caltech.edu\",\n", " password=\"abc123\",\n", " password_verify=\"abc123\",\n", " institution=\"Caltech\",\n", @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client = node.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "janet_client = server.login(email=\"janet@caltech.edu\", password=\"abc123\")" ] }, { @@ -128,7 +128,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client.code.request_code_execution(code=test_func)" + "janet_client.code.request_code_execution(code=test_func)" ] }, { @@ -137,7 +137,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client.code" + "janet_client.code" ] }, { @@ -153,7 +153,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client.code_history" + "janet_client.code_history" ] }, { @@ -189,7 +189,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client.code.request_code_execution(code=test_func)" + "janet_client.code.request_code_execution(code=test_func)" ] }, { @@ -198,7 +198,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client.code_history.test_func" + "janet_client.code_history.test_func" ] }, { @@ -214,7 +214,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -232,7 +232,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client.code_histories[\"jane@caltech.edu\"]" + "admin_client.code_histories[\"janet@caltech.edu\"]" ] }, { @@ -241,7 +241,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client.code_histories[\"jane@caltech.edu\"].test_func[0]" + "admin_client.code_histories[\"janet@caltech.edu\"].test_func[0]" ] }, { @@ -250,7 +250,7 @@ "metadata": {}, "outputs": [], "source": [ - "test_func_history = admin_client.code_histories[\"jane@caltech.edu\"].test_func" + "test_func_history = admin_client.code_histories[\"janet@caltech.edu\"].test_func" ] }, { @@ -259,7 +259,8 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client.code_histories[\"jane@caltech.edu\"].test_func[-1]" + "with sy.raises(sy.SyftException, show=True):\n", + " admin_client.code_histories[\"janet@caltech.edu\"].test_func[-1]" ] }, { @@ -268,9 +269,9 @@ "metadata": {}, "outputs": [], "source": [ - "# # Cleanup local domain server\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "# # Cleanup local datasite server\n", + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { @@ -282,11 +283,6 @@ } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -297,7 +293,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.2" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/09-blob-storage.ipynb b/notebooks/api/0.8/09-blob-storage.ipynb index 713fd53f6f4..f5fc5219790 100644 --- a/notebooks/api/0.8/09-blob-storage.ipynb +++ b/notebooks/api/0.8/09-blob-storage.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -33,10 +33,9 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", " dev_mode=True,\n", - " in_memory_workers=True,\n", " reset=True,\n", " create_producer=True,\n", ")" @@ -48,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -90,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "data_ptr = domain_client.upload_files([a_file, b_file])" + "data_ptr = datasite_client.upload_files([a_file, b_file])" ] }, { @@ -128,7 +127,7 @@ "metadata": {}, "outputs": [], "source": [ - "lines_file_pptr = domain_client.upload_files(x_file)[0].syft_action_data" + "lines_file_pptr = datasite_client.upload_files(x_file)[0].syft_action_data" ] }, { @@ -170,7 +169,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.upload_dataset(ds)" + "datasite_client.upload_dataset(ds)" ] }, { @@ -220,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "single_data_ptr = domain_client.upload_files(a_file)\n", + "single_data_ptr = datasite_client.upload_files(a_file)\n", "single_data_ptr" ] }, @@ -238,14 +237,14 @@ "outputs": [], "source": [ "if False:\n", - " domain_client.upload_files(\"./path/to/folder\")" + " datasite_client.upload_files(\"./path/to/folder\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Cleanup local domain server" + "#### Cleanup local datasite server" ] }, { @@ -254,8 +253,8 @@ "metadata": {}, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { @@ -267,11 +266,6 @@ } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -282,7 +276,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/api/0.8/10-container-images.ipynb b/notebooks/api/0.8/10-container-images.ipynb deleted file mode 100644 index 5e23dd76388..00000000000 --- a/notebooks/api/0.8/10-container-images.ipynb +++ /dev/null @@ -1,1485 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", - "package_string = f'\"syft{SYFT_VERSION}\"'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import os\n", - "import time\n", - "\n", - "# third party\n", - "import docker\n", - "import numpy as np\n", - "\n", - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(SYFT_VERSION)\n", - "\n", - "# syft absolute\n", - "from syft.service.worker.image_registry import SyftImageRegistry\n", - "from syft.service.worker.worker_image import SyftWorkerImage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# Uncomment this to run the whole docker based custom workers\n", - "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"container_stack\"\n", - "# os.environ[\"DEV_MODE\"] = \"True\"\n", - "\n", - "\n", - "# Disable inmemory worker for container stack\n", - "running_as_container = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\") in (\n", - " \"container_stack\",\n", - " \"k8s\",\n", - ")\n", - "in_memory_workers = not running_as_container" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "domain = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", - " dev_mode=True,\n", - " create_producer=True,\n", - " in_memory_workers=in_memory_workers,\n", - " reset=True,\n", - " port=8081,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client = domain.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "We should see a default worker pool" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.worker_pools" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "syft_base_worker_tag = (\n", - " \"local-dev\"\n", - " if (bool(os.environ[\"DEV_MODE\"]) and running_as_container)\n", - " else sy.__version__\n", - ")\n", - "syft_base_worker_tag" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "#### Submit Dockerfile" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "custom_dockerfile_str = f\"\"\"\n", - "FROM openmined/grid-backend:{syft_base_worker_tag}\n", - "\n", - "RUN pip install pydicom\n", - "\n", - "\"\"\".strip()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "docker_config = sy.DockerWorkerConfig(dockerfile=custom_dockerfile_str)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "# test image build locally\n", - "test_build_res = docker_config.test_image_build(tag=\"openmined/custom-worker:0.7.8\")\n", - "test_build_res" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(test_build_res, sy.SyftSuccess), str(test_build_res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "assert docker_config.dockerfile == custom_dockerfile_str" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "submit_result = domain_client.api.services.worker_image.submit_dockerfile(\n", - " docker_config=docker_config\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "submit_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(submit_result, sy.SyftSuccess), str(submit_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": {}, - "outputs": [], - "source": [ - "dockerfile_list = domain_client.images.get_all()\n", - "dockerfile_list" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "assert len(domain_client.images.get_all()) == 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "workerimage: SyftWorkerImage = None\n", - "for image in dockerfile_list:\n", - " if not image.is_prebuilt and image.config.dockerfile == custom_dockerfile_str:\n", - " workerimage = image\n", - " break\n", - "\n", - "assert isinstance(workerimage, SyftWorkerImage), str(workerimage)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": {}, - "outputs": [], - "source": [ - "workerimage" - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "#### Setup Local Registry" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": {}, - "outputs": [], - "source": [ - "# third party\n", - "\n", - "\n", - "class LocalRegistryContainer:\n", - " def __init__(self):\n", - " self.name = \"local_registry\"\n", - " self.client = docker.from_env()\n", - "\n", - " def start(self, host_port=5678):\n", - " existing = self.get()\n", - " if existing:\n", - " return existing\n", - "\n", - " result = self.client.containers.run(\n", - " \"registry:2\",\n", - " name=self.name,\n", - " detach=True,\n", - " ports={\"5000/tcp\": host_port},\n", - " labels={\"orgs.openmined.syft\": \"local-registry\"},\n", - " )\n", - "\n", - " return result\n", - "\n", - " def teardown(self):\n", - " existing = self.get()\n", - " if existing:\n", - " existing.stop()\n", - " existing.remove()\n", - "\n", - " def get(self):\n", - " try:\n", - " result = self.client.containers.get(self.name)\n", - " if result.status == \"running\":\n", - " return result\n", - " except docker.errors.NotFound:\n", - " return None\n", - "\n", - "\n", - "local_registry_container = LocalRegistryContainer()" - ] - }, - { - "cell_type": "markdown", - "id": "23", - "metadata": {}, - "source": [ - "#### Add Local Registry in Syft" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "registry_add_result = domain_client.api.services.image_registry.add(\"localhost:5678\")\n", - "registry_add_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(registry_add_result, sy.SyftSuccess), str(registry_add_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [ - "images = domain_client.api.services.image_registry.get_all()\n", - "assert len(images) == 1\n", - "images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27", - "metadata": {}, - "outputs": [], - "source": [ - "local_registry = images[0]\n", - "local_registry" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(local_registry, SyftImageRegistry), str(local_registry)" - ] - }, - { - "cell_type": "markdown", - "id": "29", - "metadata": {}, - "source": [ - "#### Build Image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30", - "metadata": {}, - "outputs": [], - "source": [ - "pull = False if syft_base_worker_tag == \"local-dev\" else True\n", - "pull" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "31", - "metadata": {}, - "outputs": [], - "source": [ - "docker_tag = \"openmined/custom-worker:0.7.8\"\n", - "\n", - "registry_uid = local_registry.id if running_as_container else local_registry.id\n", - "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", - " image_uid=workerimage.id,\n", - " tag=docker_tag,\n", - " registry_uid=registry_uid,\n", - " pull=pull,\n", - ")\n", - "docker_build_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32", - "metadata": {}, - "outputs": [], - "source": [ - "workerimage.config.dockerfile" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(docker_build_result, sy.SyftSuccess), str(docker_build_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "34", - "metadata": {}, - "outputs": [], - "source": [ - "image_list = domain_client.images.get_all()\n", - "image_list" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "35", - "metadata": {}, - "outputs": [], - "source": [ - "for image in image_list:\n", - " if image.id == workerimage.id:\n", - " workerimage = (\n", - " image # we can also index with string using the repo_with_tag format\n", - " )\n", - "\n", - "if running_as_container:\n", - " image_list[workerimage.built_image_tag]\n", - " assert image_list[workerimage.built_image_tag] == workerimage\n", - "\n", - "workerimage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36", - "metadata": {}, - "outputs": [], - "source": [ - "def get_image_hash(tag) -> str:\n", - " client = docker.from_env()\n", - " try:\n", - " image = client.images.get(tag)\n", - " return image.id\n", - " except docker.errors.ImageNotFound:\n", - " return None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37", - "metadata": {}, - "outputs": [], - "source": [ - "if running_as_container:\n", - " assert workerimage.image_hash == get_image_hash(\n", - " workerimage.built_image_tag\n", - " ), \"Worker Image image_hash does not match with built image hash\"" - ] - }, - { - "cell_type": "markdown", - "id": "38", - "metadata": {}, - "source": [ - "#### Push Image to Local Registry" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "39", - "metadata": {}, - "outputs": [], - "source": [ - "push_result = None\n", - "if running_as_container:\n", - " # stdlib\n", - " from time import sleep\n", - "\n", - " local_registry_container.start()\n", - " sleep(5)\n", - "\n", - " push_result = domain_client.api.services.worker_image.push(workerimage.id)\n", - " assert isinstance(push_result, sy.SyftSuccess), str(push_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "40", - "metadata": {}, - "outputs": [], - "source": [ - "push_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41", - "metadata": {}, - "outputs": [], - "source": [ - "if running_as_container:\n", - " # third party\n", - " import requests\n", - "\n", - " base_url = f\"http://{workerimage.image_identifier.registry_host}\"\n", - " expected_tag = workerimage.image_identifier.tag\n", - "\n", - " repos = requests.get(f\"{base_url}/v2/_catalog\").json()[\"repositories\"]\n", - " tags = requests.get(f\"{base_url}/v2/openmined/custom-worker/tags/list\").json()\n", - " tags = tags[\"tags\"]\n", - "\n", - " assert (\n", - " \"openmined/custom-worker\" in repos\n", - " ), f\"'openmined/custom-worker' not uploaded to local registry | {repos}\"\n", - " assert (\n", - " expected_tag in tags\n", - " ), f\"'openmined/custom-worker' with tag {expected_tag} not available | {tags}\"" - ] - }, - { - "cell_type": "markdown", - "id": "42", - "metadata": {}, - "source": [ - "#### Delete locally built image to force pull from local registry" - ] - }, - { - "cell_type": "markdown", - "id": "43", - "metadata": {}, - "source": [ - "This should make the subsequent `worker_pool.launch` pull from registry at 'localhost:5678`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "44", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "from time import sleep\n", - "\n", - "\n", - "def remove_local_image(tag):\n", - " client = docker.from_env()\n", - " try:\n", - " client.images.remove(tag)\n", - " except docker.errors.ImageNotFound:\n", - " pass\n", - "\n", - "\n", - "if running_as_container:\n", - " remove_local_image(workerimage.built_image_tag)\n", - " sleep(5)" - ] - }, - { - "cell_type": "markdown", - "id": "45", - "metadata": {}, - "source": [ - "#### Create Worker Pool From Image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "46", - "metadata": {}, - "outputs": [], - "source": [ - "worker_pool_name = \"my_first_worker_pool\"\n", - "worker_pool_res = domain_client.api.services.worker_pool.launch(\n", - " name=worker_pool_name,\n", - " image_uid=workerimage.id,\n", - " num_workers=3,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "47", - "metadata": {}, - "outputs": [], - "source": [ - "assert len(worker_pool_res) == 3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "48", - "metadata": {}, - "outputs": [], - "source": [ - "for status in worker_pool_res:\n", - " assert status.error is None\n", - " if running_as_container:\n", - " assert status.worker.image.image_hash == get_image_hash(\n", - " workerimage.built_image_tag\n", - " ), \"Worker Pool Image image_hash does not match with built image hash\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "49", - "metadata": {}, - "outputs": [], - "source": [ - "worker_pool_list = domain_client.worker_pools\n", - "worker_pool_list" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "50", - "metadata": {}, - "outputs": [], - "source": [ - "assert len(domain_client.worker_pools.get_all()) == 2\n", - "worker_pool = None\n", - "for pool in worker_pool_list:\n", - " if pool.name == worker_pool_name:\n", - " worker_pool = pool\n", - " break\n", - "assert worker_pool is not None\n", - "assert len(worker_pool.workers) == 3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51", - "metadata": {}, - "outputs": [], - "source": [ - "# We can filter pools based on the image id upon which the pools were built\n", - "domain_client.api.services.worker_pool.filter_by_image_id(image_uid=workerimage.id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "52", - "metadata": {}, - "outputs": [], - "source": [ - "# Delete the second worker\n", - "second_worker = worker_pool.workers[1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "53", - "metadata": {}, - "outputs": [], - "source": [ - "second_worker" - ] - }, - { - "cell_type": "markdown", - "id": "54", - "metadata": {}, - "source": [ - "#### Get Worker Logs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55", - "metadata": {}, - "outputs": [], - "source": [ - "raw_worker_logs = domain_client.api.services.worker.logs(\n", - " uid=second_worker.id,\n", - " raw=True,\n", - ")\n", - "raw_worker_logs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "56", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(raw_worker_logs, bytes)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "57", - "metadata": {}, - "outputs": [], - "source": [ - "worker_logs = domain_client.api.services.worker.logs(\n", - " uid=second_worker.id,\n", - ")\n", - "worker_logs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(worker_logs, str)" - ] - }, - { - "cell_type": "markdown", - "id": "59", - "metadata": {}, - "source": [ - "#### Delete Worker from Pool" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60", - "metadata": {}, - "outputs": [], - "source": [ - "worker_delete_res = domain_client.api.services.worker.delete(\n", - " uid=second_worker.id,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61", - "metadata": {}, - "outputs": [], - "source": [ - "worker_delete_res" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(worker_delete_res, sy.SyftSuccess), str(worker_delete_res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "63", - "metadata": {}, - "outputs": [], - "source": [ - "# Refetch the worker pool\n", - "# Ensure that the deleted worker's id is not present\n", - "for pool in domain_client.api.services.worker_pool.get_all():\n", - " if pool.name == worker_pool_name:\n", - " worker_pool = pool\n", - "assert len(worker_pool.workers) == 2\n", - "for worker in worker_pool.workers:\n", - " assert second_worker.id != worker.id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64", - "metadata": {}, - "outputs": [], - "source": [ - "worker_pool" - ] - }, - { - "cell_type": "markdown", - "id": "65", - "metadata": {}, - "source": [ - "### Syft function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "66", - "metadata": {}, - "outputs": [], - "source": [ - "data = np.array([1, 2, 3])\n", - "data_action_obj = sy.ActionObject.from_obj(data)\n", - "\n", - "data_pointer = domain_client.api.services.action.set(data_action_obj)\n", - "data_pointer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "67", - "metadata": {}, - "outputs": [], - "source": [ - "@sy.syft_function(\n", - " input_policy=sy.ExactMatch(x=data_pointer),\n", - " output_policy=sy.SingleExecutionExactOutput(),\n", - " worker_pool_name=worker_pool_name,\n", - ")\n", - "def custom_worker_func(x):\n", - " return {\"y\": x + 1}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "68", - "metadata": {}, - "outputs": [], - "source": [ - "custom_worker_func" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "69", - "metadata": {}, - "outputs": [], - "source": [ - "assert custom_worker_func.worker_pool_name == worker_pool.name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70", - "metadata": {}, - "outputs": [], - "source": [ - "request = domain_client.code.request_code_execution(custom_worker_func)\n", - "request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "71", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.requests[-1].approve(approve_nested=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "72", - "metadata": {}, - "outputs": [], - "source": [ - "job = domain_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", - "job" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73", - "metadata": {}, - "outputs": [], - "source": [ - "worker_pool = domain_client.worker_pools[worker_pool_name]\n", - "worker_pool" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "74", - "metadata": {}, - "outputs": [], - "source": [ - "job.wait()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75", - "metadata": {}, - "outputs": [], - "source": [ - "assert job.status.value == \"completed\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "76", - "metadata": {}, - "outputs": [], - "source": [ - "job = domain_client.jobs[-1]\n", - "job" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "77", - "metadata": {}, - "outputs": [], - "source": [ - "job.job_worker_id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78", - "metadata": {}, - "outputs": [], - "source": [ - "# Disabling it due to Race Condition Error\n", - "# assert job.job_worker_id is not None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "79", - "metadata": {}, - "outputs": [], - "source": [ - "# Sleeping so that consumer state is updated\n", - "time.sleep(5)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "80", - "metadata": {}, - "outputs": [], - "source": [ - "# Once the work is done by the worker, its state is returned to idle again.\n", - "consuming_worker_is_now_idle = False\n", - "for worker in domain_client.worker_pools[worker_pool_name].workers:\n", - " if worker.id == job.job_worker_id:\n", - " consuming_worker_is_now_idle = worker.consumer_state.value.lower() == \"idle\"\n", - "\n", - "assert consuming_worker_is_now_idle is True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "81", - "metadata": {}, - "outputs": [], - "source": [ - "# Validate the result received from the syft function\n", - "result = job.wait().get()\n", - "result_matches = result[\"y\"] == data + 1\n", - "assert result_matches.all()" - ] - }, - { - "cell_type": "markdown", - "id": "82", - "metadata": {}, - "source": [ - "#### Worker Image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83", - "metadata": {}, - "outputs": [], - "source": [ - "# delete the remaining workers\n", - "for worker in worker_pool.workers:\n", - " res = domain_client.api.services.worker.delete(\n", - " uid=worker.id,\n", - " )\n", - " assert isinstance(res, sy.SyftSuccess), str(res)\n", - "\n", - "# Adding some sleep to allow containers to be fully removed,\n", - "# before removing the image\n", - "time.sleep(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "84", - "metadata": {}, - "outputs": [], - "source": [ - "delete_res = domain_client.api.services.worker_image.remove(workerimage.id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "85", - "metadata": {}, - "outputs": [], - "source": [ - "delete_res" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "86", - "metadata": {}, - "outputs": [], - "source": [ - "# Since the containers are delete, we should be able to delete the image\n", - "assert isinstance(delete_res, sy.SyftSuccess), str(delete_res)\n", - "delete_res" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "87", - "metadata": {}, - "outputs": [], - "source": [ - "if running_as_container:\n", - " local_registry_container.teardown()" - ] - }, - { - "cell_type": "markdown", - "id": "88", - "metadata": {}, - "source": [ - "#### Worker Pool and Image Creation Request/Approval" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "89", - "metadata": {}, - "outputs": [], - "source": [ - "custom_dockerfile_str_2 = f\"\"\"\n", - "FROM openmined/grid-backend:{syft_base_worker_tag}\n", - "\n", - "RUN pip install opendp\n", - "\"\"\".strip()\n", - "\n", - "docker_config_2 = sy.DockerWorkerConfig(dockerfile=custom_dockerfile_str_2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "90", - "metadata": {}, - "outputs": [], - "source": [ - "submit_result = domain_client.api.services.worker_image.submit_dockerfile(\n", - " docker_config=docker_config_2\n", - ")\n", - "submit_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "91", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "92", - "metadata": {}, - "outputs": [], - "source": [ - "# get the image that's not built\n", - "workerimage_2 = None\n", - "for im in domain_client.images:\n", - " if im.config == docker_config_2:\n", - " workerimage_2 = im" - ] - }, - { - "cell_type": "markdown", - "id": "93", - "metadata": {}, - "source": [ - "##### Build image first then create pool" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "94", - "metadata": {}, - "outputs": [], - "source": [ - "docker_tag_2 = \"openmined/custom-worker-opendp:latest\"\n", - "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", - " image_uid=workerimage_2.id,\n", - " tag=docker_tag_2,\n", - " pull=pull,\n", - ")\n", - "docker_build_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "95", - "metadata": {}, - "outputs": [], - "source": [ - "pool_create_request = domain_client.api.services.worker_pool.pool_creation_request(\n", - " pool_name=\"first-opendp-pool\", num_workers=3, image_uid=workerimage_2.id\n", - ")\n", - "pool_create_request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "96", - "metadata": {}, - "outputs": [], - "source": [ - "assert len(pool_create_request.changes) == 1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "97", - "metadata": {}, - "outputs": [], - "source": [ - "# get the pending request and approve it\n", - "req_result = pool_create_request.approve()\n", - "req_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "98", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(req_result, sy.SyftSuccess), str(req_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "99", - "metadata": {}, - "outputs": [], - "source": [ - "assert domain_client.worker_pools[\"first-opendp-pool\"]\n", - "assert len(domain_client.worker_pools[\"first-opendp-pool\"].worker_list) == 3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "100", - "metadata": {}, - "outputs": [], - "source": [ - "assert len(domain_client.worker_pools.get_all()) == 3" - ] - }, - { - "cell_type": "markdown", - "id": "101", - "metadata": {}, - "source": [ - "##### Request to build the image and create the pool at the same time" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "102", - "metadata": {}, - "outputs": [], - "source": [ - "custom_dockerfile_str_3 = f\"\"\"\n", - "FROM openmined/grid-backend:{syft_base_worker_tag}\n", - "\n", - "RUN pip install recordlinkage\n", - "\"\"\".strip()\n", - "\n", - "docker_config_3 = sy.DockerWorkerConfig(dockerfile=custom_dockerfile_str_3)\n", - "\n", - "docker_tag_3 = \"openmined/custom-worker-recordlinkage:latest\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "103", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "from syft.custom_worker.builder import CustomWorkerBuilder\n", - "from syft.service.response import SyftError\n", - "from syft.service.response import SyftSuccess\n", - "\n", - "\n", - "def test_image_build(config: str, tag: str, pull: bool, **kwargs):\n", - " builder = CustomWorkerBuilder()\n", - " try:\n", - " result = builder.build_image(\n", - " config=config, tag=tag, pull=pull, rm=True, forcerm=True, **kwargs\n", - " )\n", - " return SyftSuccess(message=result.logs)\n", - " except Exception as e:\n", - " return SyftError(message=f\"Failed to build image !! Error: {str(e)}.\")\n", - "\n", - "\n", - "test_build_res = test_image_build(config=docker_config_3, tag=docker_tag_3, pull=pull)\n", - "assert isinstance(test_build_res, sy.SyftSuccess), str(test_build_res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "104", - "metadata": {}, - "outputs": [], - "source": [ - "pool_image_create_request = (\n", - " domain_client.api.services.worker_pool.create_image_and_pool_request(\n", - " pool_name=\"recordlinkage-pool\",\n", - " num_workers=2,\n", - " tag=docker_tag_3,\n", - " config=docker_config_3,\n", - " reason=\"I want to do some more cool data science with PySyft and OpenDP\",\n", - " pull_image=pull,\n", - " )\n", - ")\n", - "pool_image_create_request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "105", - "metadata": {}, - "outputs": [], - "source": [ - "assert len(pool_image_create_request.changes) == 2\n", - "assert pool_image_create_request.changes[0].config == docker_config_3\n", - "assert pool_image_create_request.changes[1].num_workers == 2\n", - "assert pool_image_create_request.changes[1].pool_name == \"recordlinkage-pool\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "106", - "metadata": {}, - "outputs": [], - "source": [ - "# get the pending request and approve it\n", - "req_result = pool_image_create_request.approve()\n", - "req_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "107", - "metadata": {}, - "outputs": [], - "source": [ - "assert isinstance(req_result, sy.SyftSuccess), str(req_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "108", - "metadata": {}, - "outputs": [], - "source": [ - "# Get updated request object and status\n", - "for req in domain_client.requests:\n", - " if req.id == pool_image_create_request.id:\n", - " pool_image_create_request = req\n", - "\n", - "assert pool_image_create_request.status.value == 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "109", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "110", - "metadata": {}, - "outputs": [], - "source": [ - "image_exists = False\n", - "for im in domain_client.images.get_all():\n", - " if im.image_identifier and im.image_identifier.repo_with_tag == docker_tag_3:\n", - " image_exists = True\n", - "assert image_exists" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "111", - "metadata": {}, - "outputs": [], - "source": [ - "assert domain_client.worker_pools[\"recordlinkage-pool\"]\n", - "assert len(domain_client.worker_pools[\"recordlinkage-pool\"].worker_list) == 2" - ] - }, - { - "cell_type": "markdown", - "id": "112", - "metadata": {}, - "source": [ - "#### Clean up workers" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "113", - "metadata": {}, - "outputs": [], - "source": [ - "# delete the remaining workers\n", - "for worker_pool in domain_client.worker_pools:\n", - " for worker in worker_pool.workers:\n", - " res = domain_client.api.services.worker.delete(uid=worker.id, force=True)\n", - " print(res)\n", - " assert isinstance(res, sy.SyftSuccess), str(res)\n", - "\n", - "# Adding some sleep to allow containers to be fully removed,\n", - "# before removing the image\n", - "time.sleep(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "114", - "metadata": {}, - "outputs": [], - "source": [ - "for worker_pool in domain_client.worker_pools:\n", - " assert len(worker_pool.worker_list) == 0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "115", - "metadata": {}, - "outputs": [], - "source": [ - "domain.land()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "116", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/api/0.8/11-container-images-k8s.ipynb b/notebooks/api/0.8/11-container-images-k8s.ipynb index c9663acd3ad..7cca83448f4 100644 --- a/notebooks/api/0.8/11-container-images-k8s.ipynb +++ b/notebooks/api/0.8/11-container-images-k8s.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'" ] }, @@ -22,6 +22,7 @@ "import os\n", "\n", "# third party\n", + "import kr8s\n", "import numpy as np\n", "import requests\n", "\n", @@ -45,23 +46,55 @@ "metadata": {}, "outputs": [], "source": [ - "os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"k8s\"\n", + "def get_kr8s_client():\n", + " return kr8s.api(namespace=\"syft\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "def get_statefulset_by_pool_name(pool_name):\n", + " kr8s_client = get_kr8s_client()\n", + " pool_list = kr8s_client.get(\n", + " \"statefulsets\", label_selector={\"app.kubernetes.io/component\": pool_name}\n", + " )\n", + " if len(pool_list) == 0:\n", + " return None\n", + " return pool_list[0]\n", + "\n", + "\n", + "def is_subset_dict(subset, superset):\n", + " return all(item in superset.items() for item in subset.items())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", "os.environ[\"DEV_MODE\"] = \"True\"\n", "\n", "# Uncomment this to add custom values\n", - "# os.environ[\"NODE_URL\"] = \"http://localhost\"\n", - "# os.environ[\"NODE_PORT\"] = \"8080\"" + "# os.environ[\"SERVER_URL\"] = \"http://localhost\"\n", + "# os.environ[\"SERVER_PORT\"] = \"8080\"" ] }, { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "5", "metadata": {}, "outputs": [], "source": [ - "domain = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", + "datasite = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", " dev_mode=True,\n", ")" ] @@ -69,17 +102,17 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "6", "metadata": {}, "outputs": [], "source": [ - "domain_client = domain.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "domain_client" + "datasite_client = datasite.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_client" ] }, { "cell_type": "markdown", - "id": "5", + "id": "7", "metadata": {}, "source": [ "### Scaling Default Worker Pool" @@ -87,7 +120,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "8", "metadata": {}, "source": [ "We should see a default worker pool" @@ -96,16 +129,16 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "9", "metadata": {}, "outputs": [], "source": [ - "domain_client.worker_pools" + "datasite_client.worker_pools" ] }, { "cell_type": "markdown", - "id": "8", + "id": "10", "metadata": {}, "source": [ "Scale up to 3 workers" @@ -114,25 +147,21 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "11", "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.worker_pool.scale(\n", - " number=3, pool_name=\"default-pool\"\n", - ")\n", - "assert not isinstance(result, sy.SyftError), str(result)\n", - "result" + "datasite_client.api.services.worker_pool.scale(number=3, pool_name=\"default-pool\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "12", "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", + "result = datasite_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", "assert len(result.workers) == 3, str(result.to_dict())\n", "result" ] @@ -140,7 +169,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -153,7 +182,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "14", "metadata": {}, "source": [ "Scale down to 1 worker" @@ -162,25 +191,21 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "15", "metadata": {}, "outputs": [], "source": [ - "default_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", - " number=1, pool_name=\"default-pool\"\n", - ")\n", - "assert not isinstance(default_pool_scale_res, sy.SyftError), str(default_pool_scale_res)\n", - "default_pool_scale_res" + "datasite_client.api.services.worker_pool.scale(number=1, pool_name=\"default-pool\")" ] }, { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "16", "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", + "result = datasite_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", "assert len(result.workers) == 1, str(result.to_dict())\n", "result" ] @@ -188,11 +213,11 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "17", "metadata": {}, "outputs": [], "source": [ - "default_worker_pool = domain_client.api.services.worker_pool.get_by_name(\n", + "default_worker_pool = datasite_client.api.services.worker_pool.get_by_name(\n", " pool_name=\"default-pool\"\n", ")\n", "default_worker_pool" @@ -200,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "18", "metadata": {}, "source": [ "#### Submit Dockerfile" @@ -209,16 +234,18 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "19", "metadata": {}, "outputs": [], "source": [ + "# syft absolute\n", + "from syft.util.util import get_latest_tag\n", + "\n", "registry = os.getenv(\"SYFT_BASE_IMAGE_REGISTRY\", \"docker.io\")\n", - "repo = \"openmined/grid-backend\"\n", + "repo = \"openmined/syft-backend\"\n", "\n", "if \"k3d\" in registry:\n", - " res = requests.get(url=f\"http://{registry}/v2/{repo}/tags/list\")\n", - " tag = res.json()[\"tags\"][0]\n", + " tag = get_latest_tag(registry, repo)\n", "else:\n", " tag = sy.__version__" ] @@ -226,14 +253,14 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "20", "metadata": {}, "outputs": [], "source": [ "custom_dockerfile_str = f\"\"\"\n", "FROM {registry}/{repo}:{tag}\n", "\n", - "RUN pip install pydicom\n", + "RUN uv pip install pydicom\n", "\n", "\"\"\".strip()" ] @@ -241,7 +268,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -251,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -261,12 +288,12 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "23", "metadata": {}, "outputs": [], "source": [ - "submit_result = domain_client.api.services.worker_image.submit_dockerfile(\n", - " docker_config=docker_config\n", + "submit_result = datasite_client.api.services.worker_image.submit(\n", + " worker_config=docker_config\n", ")\n", "submit_result" ] @@ -274,7 +301,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -284,29 +311,28 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "25", "metadata": {}, "outputs": [], "source": [ - "dockerfile_list = domain_client.images.get_all()\n", + "dockerfile_list = datasite_client.images.get_all()\n", "dockerfile_list" ] }, { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "26", "metadata": {}, "outputs": [], "source": [ - "assert not isinstance(dockerfile_list, sy.SyftError), str(dockerfile_list)\n", "assert len(dockerfile_list) == 2" ] }, { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -325,7 +351,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "28", "metadata": {}, "source": [ "#### Add External Registry in Syft" @@ -334,7 +360,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -350,50 +376,38 @@ { "cell_type": "code", "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "registry_add_result = domain_client.api.services.image_registry.add(external_registry)\n", - "registry_add_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ - "assert isinstance(registry_add_result, sy.SyftSuccess), str(registry_add_result)" + "datasite_client.api.services.image_registry.add(external_registry)" ] }, { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ - "image_registry_list = domain_client.api.services.image_registry.get_all()\n", + "image_registry_list = datasite_client.api.services.image_registry.get_all()\n", "image_registry_list" ] }, { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ - "assert not isinstance(image_registry_list, sy.SyftError), str(image_registry_list)\n", "assert len(image_registry_list) == 1" ] }, { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -404,7 +418,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -414,7 +428,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -423,7 +437,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "36", "metadata": {}, "source": [ "#### Build Image" @@ -432,29 +446,18 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "37", "metadata": {}, "outputs": [], "source": [ "docker_tag = \"openmined/custom-worker:0.7.8\"\n", "\n", "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", + "datasite_client.api.services.worker_image.build(\n", " image_uid=workerimage.id,\n", " tag=docker_tag,\n", " registry_uid=registry_uid,\n", - ")\n", - "docker_build_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37", - "metadata": {}, - "outputs": [], - "source": [ - "assert not isinstance(docker_build_result, sy.SyftError), str(docker_build_result)" + ")" ] }, { @@ -464,7 +467,7 @@ "metadata": {}, "outputs": [], "source": [ - "image_list = domain_client.images.get_all()\n", + "image_list = datasite_client.images.get_all()\n", "image_list" ] }, @@ -490,8 +493,7 @@ "assert workerimage is not None, str([image.__dict__ for image in image_list])\n", "assert workerimage.is_built is not None, str(workerimage)\n", "assert workerimage.built_at is not None, str(workerimage)\n", - "assert workerimage.image_hash is not None, str(workerimage)\n", - "assert image_list[workerimage.built_image_tag] == workerimage" + "assert workerimage.image_hash is not None, str(workerimage)" ] }, { @@ -509,13 +511,11 @@ "metadata": {}, "outputs": [], "source": [ - "push_result = None\n", - "push_result = domain_client.api.services.worker_image.push(\n", + "datasite_client.api.services.worker_image.push(\n", " workerimage.id,\n", " username=external_registry_username,\n", " password=external_registry_password,\n", - ")\n", - "push_result" + ")" ] }, { @@ -524,16 +524,6 @@ "id": "43", "metadata": {}, "outputs": [], - "source": [ - "assert isinstance(push_result, sy.SyftSuccess), str(push_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "44", - "metadata": {}, - "outputs": [], "source": [ "base_url = f\"http://{workerimage.image_identifier.registry_host}\"\n", "expected_tag = workerimage.image_identifier.tag\n", @@ -552,7 +542,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "#### Create Worker Pool From Image" @@ -561,35 +551,38 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ "worker_pool_name = \"custom-pool\"\n", - "worker_pool_res = domain_client.api.services.worker_pool.launch(\n", - " name=worker_pool_name,\n", + "custom_pool_pod_annotations = {\"test-custom-pool\": \"Test annotation for custom pool\"}\n", + "custom_pool_pod_labels = {\"test-custom-pool\": \"test_label_for_custom_pool\"}\n", + "worker_pool_res = datasite_client.api.services.worker_pool.launch(\n", + " pool_name=worker_pool_name,\n", " image_uid=workerimage.id,\n", " num_workers=3,\n", - " reg_username=external_registry_username,\n", - " reg_password=external_registry_password,\n", + " registry_username=external_registry_username,\n", + " registry_password=external_registry_password,\n", + " pod_annotations=custom_pool_pod_annotations,\n", + " pod_labels=custom_pool_pod_labels,\n", ")" ] }, { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "46", "metadata": {}, "outputs": [], "source": [ - "assert not isinstance(worker_pool_res, sy.SyftError), str(worker_pool_res)\n", "assert len(worker_pool_res) == 3" ] }, { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -600,14 +593,41 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "48", "metadata": {}, "outputs": [], "source": [ - "worker_pool_list = domain_client.worker_pools.get_all()\n", + "worker_pool_list = datasite_client.worker_pools.get_all()\n", "worker_pool_list" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "49", + "metadata": {}, + "outputs": [], + "source": [ + "# check Label and Annotations for custom pool\n", + "custom_pool_statefulset = get_statefulset_by_pool_name(worker_pool_name)\n", + "assert custom_pool_statefulset is not None, \"Custom pool statefulset not found\"\n", + "custom_pool_pod_metadata = custom_pool_statefulset.spec.template.metadata\n", + "\n", + "assert (\n", + " \"annotations\" in custom_pool_pod_metadata\n", + "), \"Annotations not found in custom pool pod metadata\"\n", + "assert (\n", + " \"labels\" in custom_pool_pod_metadata\n", + "), \"Labels not found in custom pool pod metadata\"\n", + "\n", + "assert is_subset_dict(\n", + " custom_pool_pod_annotations, custom_pool_pod_metadata.annotations\n", + "), \"Annotations do not match in Custom pool pod metadata\"\n", + "assert is_subset_dict(\n", + " custom_pool_pod_labels, custom_pool_pod_metadata.labels\n", + "), \"Labels do not match in Custom pool pod metadata\"" + ] + }, { "cell_type": "code", "execution_count": null, @@ -615,7 +635,6 @@ "metadata": {}, "outputs": [], "source": [ - "assert not isinstance(worker_pool_list, sy.SyftError), str(worker_pool_res)\n", "assert len(worker_pool_list) == 2" ] }, @@ -645,7 +664,7 @@ "outputs": [], "source": [ "# We can filter pools based on the image id upon which the pools were built\n", - "filtered_result = domain_client.api.services.worker_pool.filter_by_image_id(\n", + "filtered_result = datasite_client.api.services.worker_pool.filter_by_image_id(\n", " image_uid=workerimage.id\n", ")\n", "filtered_result" @@ -657,16 +676,6 @@ "id": "53", "metadata": {}, "outputs": [], - "source": [ - "assert not isinstance(filtered_result, sy.SyftError), str(filtered_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54", - "metadata": {}, - "outputs": [], "source": [ "second_worker = worker_pool.workers[1]\n", "second_worker" @@ -674,7 +683,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "54", "metadata": {}, "source": [ "#### Get Worker Logs" @@ -683,11 +692,11 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "55", "metadata": {}, "outputs": [], "source": [ - "worker_logs = domain_client.api.services.worker.logs(\n", + "worker_logs = datasite_client.api.services.worker.logs(\n", " uid=second_worker.id,\n", ")\n", "worker_logs" @@ -696,7 +705,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -706,7 +715,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -715,7 +724,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "58", "metadata": {}, "source": [ "### Syft function" @@ -724,21 +733,21 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "59", "metadata": {}, "outputs": [], "source": [ "data = np.array([1, 2, 3])\n", "data_action_obj = sy.ActionObject.from_obj(data)\n", "\n", - "data_pointer = domain_client.api.services.action.set(data_action_obj)\n", + "data_pointer = data_action_obj.send(datasite_client)\n", "data_pointer" ] }, { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -748,13 +757,17 @@ " worker_pool_name=worker_pool_name,\n", ")\n", "def custom_worker_func(x):\n", + " # third party\n", + " import pydicom\n", + "\n", + " print(pydicom.__version__)\n", " return {\"y\": x + 1}" ] }, { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "61", "metadata": {}, "outputs": [], "source": [ @@ -764,7 +777,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "62", "metadata": {}, "outputs": [], "source": [ @@ -774,50 +787,50 @@ { "cell_type": "code", "execution_count": null, - "id": "64", + "id": "63", "metadata": {}, "outputs": [], "source": [ - "request = domain_client.code.request_code_execution(custom_worker_func)\n", + "request = datasite_client.code.request_code_execution(custom_worker_func)\n", "request" ] }, { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "64", "metadata": {}, "outputs": [], "source": [ - "domain_client.requests[-1].approve(approve_nested=True)" + "datasite_client.requests[-1].approve(approve_nested=True)" ] }, { "cell_type": "code", "execution_count": null, - "id": "66", + "id": "65", "metadata": {}, "outputs": [], "source": [ - "job = domain_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", + "job = datasite_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", "job" ] }, { "cell_type": "code", "execution_count": null, - "id": "67", + "id": "66", "metadata": {}, "outputs": [], "source": [ - "worker_pool = domain_client.worker_pools[worker_pool_name]\n", + "worker_pool = datasite_client.worker_pools[worker_pool_name]\n", "worker_pool" ] }, { "cell_type": "code", "execution_count": null, - "id": "68", + "id": "67", "metadata": {}, "outputs": [], "source": [ @@ -827,7 +840,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69", + "id": "68", "metadata": {}, "outputs": [], "source": [ @@ -837,27 +850,17 @@ { "cell_type": "code", "execution_count": null, - "id": "70", - "metadata": {}, - "outputs": [], - "source": [ - "job_list = domain_client.jobs.get_by_user_code_id(job.user_code_id)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "71", + "id": "69", "metadata": {}, "outputs": [], "source": [ - "assert not isinstance(job_list, sy.SyftError), job_list" + "job_list = datasite_client.jobs.get_by_user_code_id(job.user_code_id)" ] }, { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "70", "metadata": {}, "outputs": [], "source": [ @@ -868,7 +871,7 @@ { "cell_type": "code", "execution_count": null, - "id": "73", + "id": "71", "metadata": {}, "outputs": [], "source": [ @@ -881,31 +884,27 @@ { "cell_type": "code", "execution_count": null, - "id": "74", + "id": "72", "metadata": {}, "outputs": [], "source": [ "# Scale Down the workers\n", - "custom_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", - " number=1, pool_name=worker_pool_name\n", - ")\n", - "assert not isinstance(custom_pool_scale_res, sy.SyftError), str(custom_pool_scale_res)\n", - "custom_pool_scale_res" + "datasite_client.api.services.worker_pool.scale(number=1, pool_name=worker_pool_name)" ] }, { "cell_type": "code", "execution_count": null, - "id": "75", + "id": "73", "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools[worker_pool_name].worker_list) == 1" + "assert len(datasite_client.worker_pools[worker_pool_name].worker_list) == 1" ] }, { "cell_type": "markdown", - "id": "76", + "id": "74", "metadata": {}, "source": [ "#### Worker Pool and Image Creation Request/Approval" @@ -914,14 +913,14 @@ { "cell_type": "code", "execution_count": null, - "id": "77", + "id": "75", "metadata": {}, "outputs": [], "source": [ "dockerfile_opendp = f\"\"\"\n", "FROM {registry}/{repo}:{tag}\n", "\n", - "RUN pip install opendp\n", + "RUN uv pip install opendp\n", "\"\"\".strip()\n", "\n", "docker_config_opendp = sy.DockerWorkerConfig(dockerfile=dockerfile_opendp)" @@ -930,13 +929,13 @@ { "cell_type": "code", "execution_count": null, - "id": "78", + "id": "76", "metadata": {}, "outputs": [], "source": [ "submit_result = None\n", - "submit_result = domain_client.api.services.worker_image.submit_dockerfile(\n", - " docker_config=docker_config_opendp\n", + "submit_result = datasite_client.api.services.worker_image.submit(\n", + " worker_config=docker_config_opendp\n", ")\n", "submit_result" ] @@ -944,7 +943,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79", + "id": "77", "metadata": {}, "outputs": [], "source": [ @@ -954,18 +953,17 @@ { "cell_type": "code", "execution_count": null, - "id": "80", + "id": "78", "metadata": {}, "outputs": [], "source": [ - "_images = domain_client.images\n", - "assert not isinstance(_images, sy.SyftError), str(_images)" + "_images = datasite_client.images" ] }, { "cell_type": "code", "execution_count": null, - "id": "81", + "id": "79", "metadata": {}, "outputs": [], "source": [ @@ -978,7 +976,7 @@ }, { "cell_type": "markdown", - "id": "82", + "id": "80", "metadata": {}, "source": [ "##### Build image first then create pool" @@ -987,13 +985,13 @@ { "cell_type": "code", "execution_count": null, - "id": "83", + "id": "81", "metadata": {}, "outputs": [], "source": [ "docker_tag_opendp = \"openmined/custom-worker-opendp:latest\"\n", "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", " image_uid=workerimage_opendp.id,\n", " tag=docker_tag_opendp,\n", " registry_uid=registry_uid,\n", @@ -1005,7 +1003,7 @@ { "cell_type": "code", "execution_count": null, - "id": "84", + "id": "82", "metadata": {}, "outputs": [], "source": [ @@ -1015,18 +1013,17 @@ { "cell_type": "code", "execution_count": null, - "id": "85", + "id": "83", "metadata": {}, "outputs": [], "source": [ - "_images = domain_client.images\n", - "assert not isinstance(_images, sy.SyftError), str(_images)" + "_images = datasite_client.images" ] }, { "cell_type": "code", "execution_count": null, - "id": "86", + "id": "84", "metadata": {}, "outputs": [], "source": [ @@ -1039,40 +1036,41 @@ "assert workerimage_opendp.built_at is not None, str(workerimage_opendp.__dict__)\n", "assert workerimage_opendp.image_hash is not None, str(workerimage_opendp.__dict__)\n", "\n", - "assert _images[workerimage_opendp.built_image_tag] == workerimage_opendp, str(\n", - " workerimage_opendp\n", - ")\n", - "\n", "workerimage_opendp" ] }, { "cell_type": "code", "execution_count": null, - "id": "87", + "id": "85", "metadata": {}, "outputs": [], "source": [ "# Push OpenDP Image to registry\n", - "push_result = None\n", - "push_result = domain_client.api.services.worker_image.push(\n", + "\n", + "datasite_client.api.services.worker_image.push(\n", " workerimage_opendp.id,\n", " username=external_registry_username,\n", " password=external_registry_password,\n", - ")\n", - "assert isinstance(push_result, sy.SyftSuccess), str(push_result)" + ")" ] }, { "cell_type": "code", "execution_count": null, - "id": "88", + "id": "86", "metadata": {}, "outputs": [], "source": [ "pool_name_opendp = \"opendp-pool\"\n", - "pool_create_request = domain_client.api.services.worker_pool.pool_creation_request(\n", - " pool_name=pool_name_opendp, num_workers=3, image_uid=workerimage_opendp.id\n", + "opendp_pod_annotations = {\"test-opendp-pool\": \"Test annotation for opendp pool\"}\n", + "opendp_pod_labels = {\"test-opendp-pool\": \"test_label_for_opendp_pool\"}\n", + "pool_create_request = datasite_client.api.services.worker_pool.pool_creation_request(\n", + " pool_name=pool_name_opendp,\n", + " num_workers=3,\n", + " image_uid=workerimage_opendp.id,\n", + " pod_annotations=opendp_pod_annotations,\n", + " pod_labels=opendp_pod_labels,\n", ")\n", "pool_create_request" ] @@ -1080,24 +1078,24 @@ { "cell_type": "code", "execution_count": null, - "id": "89", + "id": "87", "metadata": {}, "outputs": [], "source": [ - "assert not isinstance(pool_create_request, sy.SyftError), str(pool_create_request)\n", "assert len(pool_create_request.changes) == 1" ] }, { "cell_type": "code", "execution_count": null, - "id": "90", + "id": "88", "metadata": {}, "outputs": [], "source": [ "# get the pending request and approve it\n", "req_result = pool_create_request.approve(\n", - " reg_username=external_registry_username, reg_password=external_registry_password\n", + " registry_username=external_registry_username,\n", + " registry_password=external_registry_password,\n", ")\n", "req_result" ] @@ -1105,7 +1103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "91", + "id": "89", "metadata": {}, "outputs": [], "source": [ @@ -1115,11 +1113,11 @@ { "cell_type": "code", "execution_count": null, - "id": "92", + "id": "90", "metadata": {}, "outputs": [], "source": [ - "pool_opendp = domain_client.worker_pools[pool_name_opendp]\n", + "pool_opendp = datasite_client.worker_pools[pool_name_opendp]\n", "assert not isinstance(pool_opendp, sy.SyftError), str(pool_opendp)\n", "assert len(pool_opendp.worker_list) == 3" ] @@ -1127,44 +1125,67 @@ { "cell_type": "code", "execution_count": null, - "id": "93", + "id": "91", "metadata": {}, "outputs": [], "source": [ - "worker_pool_list = domain_client.worker_pools.get_all()\n", - "\n", - "assert not isinstance(worker_pool_list, sy.SyftError), str(worker_pool_list)\n", + "worker_pool_list = datasite_client.worker_pools.get_all()\n", "assert len(worker_pool_list) == 3" ] }, { "cell_type": "code", "execution_count": null, - "id": "94", + "id": "92", + "metadata": {}, + "outputs": [], + "source": [ + "# check annotations and labels for open dp pool\n", + "opendp_pool_statefulset = get_statefulset_by_pool_name(pool_name_opendp)\n", + "assert opendp_pool_statefulset is not None, \"Open DP pool statefulset not found\"\n", + "opendp_pool_pod_metadata = opendp_pool_statefulset.spec.template.metadata\n", + "\n", + "\n", + "assert (\n", + " \"annotations\" in opendp_pool_pod_metadata\n", + "), \"Annotations not found in opendp pool pod metadata\"\n", + "assert (\n", + " \"labels\" in opendp_pool_pod_metadata\n", + "), \"Labels not found in opendp pool pod metadata\"\n", + "\n", + "\n", + "assert is_subset_dict(\n", + " opendp_pod_annotations, opendp_pool_pod_metadata.annotations\n", + "), \"Annotations do not match in opendp pool pod metadata\"\n", + "assert is_subset_dict(\n", + " opendp_pod_labels, opendp_pool_pod_metadata.labels\n", + "), \"Labels do not match in opendp pool pod metadata\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93", "metadata": {}, "outputs": [], "source": [ "# Scale Down the workers\n", - "opendp_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", - " number=1, pool_name=pool_name_opendp\n", - ")\n", - "assert not isinstance(opendp_pool_scale_res, sy.SyftError), str(opendp_pool_scale_res)\n", - "opendp_pool_scale_res" + "datasite_client.api.services.worker_pool.scale(number=1, pool_name=pool_name_opendp)" ] }, { "cell_type": "code", "execution_count": null, - "id": "95", + "id": "94", "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools[pool_name_opendp].worker_list) == 1" + "assert len(datasite_client.worker_pools[pool_name_opendp].worker_list) == 1" ] }, { "cell_type": "markdown", - "id": "96", + "id": "95", "metadata": {}, "source": [ "Request to build the image and create the pool at the same time" @@ -1173,14 +1194,14 @@ { "cell_type": "code", "execution_count": null, - "id": "97", + "id": "96", "metadata": {}, "outputs": [], "source": [ "dockerfile_recordlinkage = f\"\"\"\n", "FROM {registry}/{repo}:{tag}\n", "\n", - "RUN pip install recordlinkage\n", + "RUN uv pip install recordlinkage\n", "\"\"\".strip()\n", "\n", "docker_config_recordlinkage = sy.DockerWorkerConfig(dockerfile=dockerfile_recordlinkage)\n", @@ -1191,21 +1212,26 @@ { "cell_type": "code", "execution_count": null, - "id": "98", + "id": "97", "metadata": {}, "outputs": [], "source": [ "pool_name_recordlinkage = \"recordlinkage-pool\"\n", - "\n", - "pool_image_create_request = (\n", - " domain_client.api.services.worker_pool.create_image_and_pool_request(\n", - " pool_name=pool_name_recordlinkage,\n", - " num_workers=2,\n", - " tag=docker_tag_recordlinkage,\n", - " config=docker_config_recordlinkage,\n", - " registry_uid=registry_uid,\n", - " reason=\"I want to do some more cool data science with PySyft and OpenDP\",\n", - " )\n", + "recordlinkage_pod_annotations = {\n", + " \"test-recordlinkage-pool\": \"Test annotation for recordlinkage pool\"\n", + "}\n", + "recordlinkage_pod_labels = {\n", + " \"test-recordlinkage-pool\": \"test_label_for_recordlinkage_pool\"\n", + "}\n", + "pool_image_create_request = datasite_client.api.services.worker_pool.create_image_and_pool_request(\n", + " pool_name=pool_name_recordlinkage,\n", + " num_workers=2,\n", + " tag=docker_tag_recordlinkage,\n", + " config=docker_config_recordlinkage,\n", + " registry_uid=registry_uid,\n", + " reason=\"I want to do some more cool data science with PySyft and RecordLinkage!\",\n", + " pod_annotations=recordlinkage_pod_annotations,\n", + " pod_labels=recordlinkage_pod_labels,\n", ")\n", "pool_image_create_request" ] @@ -1213,19 +1239,7 @@ { "cell_type": "code", "execution_count": null, - "id": "99", - "metadata": {}, - "outputs": [], - "source": [ - "assert not isinstance(pool_image_create_request, sy.SyftError), str(\n", - " pool_image_create_request\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "100", + "id": "98", "metadata": {}, "outputs": [], "source": [ @@ -1238,12 +1252,13 @@ { "cell_type": "code", "execution_count": null, - "id": "101", + "id": "99", "metadata": {}, "outputs": [], "source": [ "req_result = pool_image_create_request.approve(\n", - " reg_username=external_registry_username, reg_password=external_registry_password\n", + " registry_username=external_registry_username,\n", + " registry_password=external_registry_password,\n", ")\n", "req_result" ] @@ -1251,7 +1266,7 @@ { "cell_type": "code", "execution_count": null, - "id": "102", + "id": "100", "metadata": {}, "outputs": [], "source": [ @@ -1261,18 +1276,17 @@ { "cell_type": "code", "execution_count": null, - "id": "103", + "id": "101", "metadata": {}, "outputs": [], "source": [ - "_requests = domain_client.requests\n", - "assert not isinstance(_requests, sy.SyftError), str(_requests)" + "_requests = datasite_client.requests" ] }, { "cell_type": "code", "execution_count": null, - "id": "104", + "id": "102", "metadata": {}, "outputs": [], "source": [ @@ -1287,22 +1301,49 @@ { "cell_type": "code", "execution_count": null, - "id": "105", + "id": "103", + "metadata": {}, + "outputs": [], + "source": [ + "# check annotations and labels for recordlinkage pool\n", + "recordlinkage_pool_statefulset = get_statefulset_by_pool_name(pool_name_recordlinkage)\n", + "assert (\n", + " recordlinkage_pool_statefulset is not None\n", + "), \"RecordLinkage pool statefulset not found\"\n", + "recordlinkage_pool_pod_metadata = recordlinkage_pool_statefulset.spec.template.metadata\n", + "\n", + "\n", + "assert is_subset_dict(\n", + " recordlinkage_pod_annotations, recordlinkage_pool_pod_metadata.annotations\n", + "), \"Annotations not found in recordlinkage pool pod metadata\"\n", + "assert (\n", + " \"labels\" in recordlinkage_pool_pod_metadata\n", + "), \"Labels not found in recordlinkage pool pod metadata\"\n", + "\n", + "assert is_subset_dict(\n", + " recordlinkage_pod_labels, recordlinkage_pool_pod_metadata.labels\n", + "), \"Annotations do not match in recordlinkage pool pod metadata\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "104", "metadata": {}, "outputs": [], "source": [ - "domain_client.images" + "datasite_client.images" ] }, { "cell_type": "code", "execution_count": null, - "id": "106", + "id": "105", "metadata": {}, "outputs": [], "source": [ "image_exists = False\n", - "for im in domain_client.images.get_all():\n", + "for im in datasite_client.images.get_all():\n", " if (\n", " im.image_identifier\n", " and im.image_identifier.repo_with_tag == docker_tag_recordlinkage\n", @@ -1315,45 +1356,41 @@ { "cell_type": "code", "execution_count": null, - "id": "107", + "id": "106", "metadata": {}, "outputs": [], "source": [ - "assert domain_client.worker_pools[pool_name_recordlinkage]\n", - "assert len(domain_client.worker_pools[pool_name_recordlinkage].worker_list) == 2" + "assert datasite_client.worker_pools[pool_name_recordlinkage]\n", + "assert len(datasite_client.worker_pools[pool_name_recordlinkage].worker_list) == 2" ] }, { "cell_type": "code", "execution_count": null, - "id": "108", + "id": "107", "metadata": {}, "outputs": [], "source": [ "# Scale down the workers\n", - "recordlinkage_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", + "datasite_client.api.services.worker_pool.scale(\n", " number=1, pool_name=pool_name_recordlinkage\n", - ")\n", - "assert not isinstance(recordlinkage_pool_scale_res, sy.SyftError), str(\n", - " recordlinkage_pool_scale_res\n", - ")\n", - "recordlinkage_pool_scale_res" + ")" ] }, { "cell_type": "code", "execution_count": null, - "id": "109", + "id": "108", "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools[pool_name_recordlinkage].worker_list) == 1" + "assert len(datasite_client.worker_pools[pool_name_recordlinkage].worker_list) == 1" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "syft-3.11", "language": "python", "name": "python3" }, diff --git a/notebooks/api/0.8/12-custom-api-endpoint.ipynb b/notebooks/api/0.8/12-custom-api-endpoint.ipynb new file mode 100644 index 00000000000..c58c78c1795 --- /dev/null +++ b/notebooks/api/0.8/12-custom-api-endpoint.ipynb @@ -0,0 +1,715 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom API Notebook" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initialize the Server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from typing import Any\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import SyftError\n", + "from syft import SyftSuccess\n", + "\n", + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", + " dev_mode=True,\n", + " create_producer=True,\n", + " n_consumers=3,\n", + " reset=True,\n", + ")\n", + "\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_client.register(\n", + " email=\"user@openmined.org\",\n", + " password=\"verysecurepassword\",\n", + " password_verify=\"verysecurepassword\",\n", + " name=\"New User\",\n", + ")\n", + "datasite_guest = server.login(email=\"user@openmined.org\", password=\"verysecurepassword\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create a public custom API Endpoint by using the decorator\n", + "\n", + "This allows server admin to create a new public endpoint by using only the decorator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint(\n", + " path=\"first.query\",\n", + " settings={\"key\": \"value\"},\n", + ")\n", + "def public_endpoint_method(\n", + " context,\n", + " query: str,\n", + ") -> \"Any\":\n", + " return context.settings[\"key\"] == \"value\"\n", + "\n", + "\n", + "# Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=public_endpoint_method)\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(response, SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.api.services.api.api_endpoints()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(datasite_client.api.services.api.api_endpoints()) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Once api refresh is done, remove this cell\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_guest = server.login(email=\"user@openmined.org\", password=\"verysecurepassword\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert datasite_client.api.services.first.query(query=\"SELECT *\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = datasite_guest.api.services.first.query(query=\"SELECT *\")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create public/private Custom API Endpoint using TwinAPIEndpoint\n", + "\n", + "This allows the admin to create a public/private endpoint interface where the users can iteract with." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint_method(settings={\"Hello\": \"Public\"})\n", + "def public_function(\n", + " context,\n", + ") -> str:\n", + " return \"Public Function Execution\"\n", + "\n", + "\n", + "@sy.api_endpoint_method(settings={\"Hello\": \"Private\"})\n", + "def private_function(\n", + " context,\n", + ") -> str:\n", + " return \"Private Function Execution\"\n", + "\n", + "\n", + "new_endpoint = sy.TwinAPIEndpoint(\n", + " path=\"third.query\",\n", + " mock_function=public_function,\n", + " private_function=private_function,\n", + " description=\"Lore ipsulum ...\",\n", + ")\n", + "\n", + "# # Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=new_endpoint)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.api.services.api.api_endpoints()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(response, SyftSuccess)\n", + "assert len(datasite_client.api.services.api.api_endpoints()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Once api refresh is done, remove this cell\n", + "datasite_client.refresh()\n", + "datasite_guest.refresh()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.api.services.third.query()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert datasite_client.api.services.third.query() == \"Private Function Execution\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert datasite_guest.api.services.third.query() == \"Public Function Execution\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_guest.api.services.third.query()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function_single_use(\n", + " endpoint=datasite_guest.api.services.third.query,\n", + ")\n", + "def job_function(endpoint):\n", + " return endpoint()\n", + "\n", + "\n", + "# Create a new project\n", + "new_project = sy.Project(\n", + " name=\"My Cool UN Project\",\n", + " description=\"Hi, I want to calculate the trade volume in million's with my cool code.\",\n", + " members=[datasite_guest],\n", + ")\n", + "\n", + "result = new_project.create_code_request(job_function, datasite_guest)\n", + "assert isinstance(result, SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = None\n", + "for r in datasite_client.requests.get_all():\n", + " if r.requesting_user_email == \"user@openmined.org\":\n", + " res = r.approve()\n", + "\n", + "assert res is not None, res\n", + "res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = datasite_guest.code.job_function(\n", + " endpoint=datasite_client.api.services.third.query\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert not isinstance(result, SyftError), result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert result.get() == \"Private Function Execution\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " datasite_guest.api.services.third.query.private()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = datasite_client.api.services.api.delete(endpoint_path=\"third.query\")\n", + "assert isinstance(result, SyftSuccess)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(datasite_client.api.services.api.api_endpoints()) == 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Updating Endpoints" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we'll create a new endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint(\n", + " path=\"test.update\",\n", + " settings={\"key\": \"value\"},\n", + ")\n", + "def new_public_function(\n", + " context,\n", + " query: str,\n", + ") -> Any:\n", + " return context.settings[\"key\"] == \"value\"\n", + "\n", + "\n", + "# Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=new_public_function)\n", + "\n", + "assert isinstance(response, SyftSuccess), response\n", + "response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Update the public function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint_method(settings={\"Hello\": \"Public\"})\n", + "def updated_public_function(\n", + " context,\n", + ") -> str:\n", + " return \"Updated Public Function Execution\"\n", + "\n", + "\n", + "response = datasite_client.api.services.api.update(\n", + " endpoint_path=\"test.update\", mock_function=updated_public_function\n", + ")\n", + "assert isinstance(response, SyftSuccess), response\n", + "response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Update the private function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint_method(settings={\"Hello\": \"Private\"})\n", + "def updated_private_function(\n", + " context,\n", + ") -> str:\n", + " return \"Updated Private Function Execution\"\n", + "\n", + "\n", + "response = datasite_client.api.services.api.update(\n", + " endpoint_path=\"test.update\", private_function=updated_private_function\n", + ")\n", + "assert isinstance(response, SyftSuccess), response\n", + "response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Update both functions with a pair that has a new signature" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint_method(settings={\"Hello\": \"Public\"})\n", + "def new_sig_public_function(context, new_parameter) -> str:\n", + " return \"Updated Public Function Execution\"\n", + "\n", + "\n", + "@sy.api_endpoint_method(settings={\"Hello\": \"Private\"})\n", + "def new_sig_private_function(context, new_parameter) -> str:\n", + " return \"Updated Private Function Execution\"\n", + "\n", + "\n", + "response = datasite_client.api.services.api.update(\n", + " endpoint_path=\"test.update\",\n", + " mock_function=new_sig_public_function,\n", + " private_function=new_sig_private_function,\n", + ")\n", + "assert isinstance(response, SyftSuccess), response\n", + "response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Invalid update attempts\n", + "- Both functions empty\n", + "- Signature mismatch\n", + "- Non existing endpoint" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Both functions are empty" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " response = datasite_client.api.services.api.update(endpoint_path=\"test.update\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Signature mismatch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint_method(settings={\"Hello\": \"Public\"})\n", + "def bad_public_function(context, foo) -> str:\n", + " return \"Updated Public Function Execution\"\n", + "\n", + "\n", + "with sy.raises(sy.SyftException, show=True):\n", + " response = datasite_client.api.services.api.update(\n", + " endpoint_path=\"test.update\", mock_function=bad_public_function\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Non Existing endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " response = datasite_client.api.services.api.update(\n", + " endpoint_path=\"nonexistent\", mock_function=bad_public_function\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Syft Function/API Logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.api_endpoint_method()\n", + "def public_log_function(\n", + " context,\n", + ") -> str:\n", + " print(\"Logging Public Function Call\")\n", + " return \"Public Function Execution\"\n", + "\n", + "\n", + "@sy.api_endpoint_method()\n", + "def private_log_function(\n", + " context,\n", + ") -> str:\n", + " print(\"Logging Private Function Call\")\n", + " return \"Private Function Execution\"\n", + "\n", + "\n", + "new_endpoint = sy.TwinAPIEndpoint(\n", + " path=\"test.log\",\n", + " mock_function=public_log_function,\n", + " private_function=private_log_function,\n", + " description=\"Lore ipsulum ...\",\n", + ")\n", + "\n", + "# # Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=new_endpoint)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function_single_use(endpoint=datasite_client.api.services.test.log)\n", + "def test_log_call(endpoint): # noqa: F811\n", + " print(\"In Syft Function Context\")\n", + " endpoint()\n", + " print(\"After API endpoint call\")\n", + " return True\n", + "\n", + "\n", + "@sy.syft_function_single_use(endpoint=datasite_client.api.services.test.log)\n", + "def test_log_call_mock(endpoint): # noqa: F811\n", + " print(\"In Syft Function Context\")\n", + " endpoint.mock()\n", + " print(\"After API endpoint call\")\n", + " return True\n", + "\n", + "\n", + "@sy.syft_function_single_use(endpoint=datasite_client.api.services.test.log)\n", + "def test_log_call_private(endpoint): # noqa: F811\n", + " print(\"In Syft Function Context\")\n", + " endpoint.private()\n", + " print(\"After API endpoint call\")\n", + " return True\n", + "\n", + "\n", + "# Create a project\n", + "project = sy.Project(\n", + " name=\"My Cool Project\",\n", + " description=\"\"\"Hi, I want to calculate the mean of your private data,\\\n", + " pretty please!\"\"\",\n", + " members=[datasite_client],\n", + ")\n", + "project.create_code_request(test_log_call, datasite_client)\n", + "project.create_code_request(test_log_call_mock, datasite_client)\n", + "project.create_code_request(test_log_call_private, datasite_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_call_job = datasite_client.code.test_log_call(\n", + " endpoint=datasite_client.api.services.test.log, blocking=False\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_call_mock_job = datasite_client.code.test_log_call_mock(\n", + " endpoint=datasite_client.api.services.test.log, blocking=False\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_call_private_job = datasite_client.code.test_log_call_private(\n", + " endpoint=datasite_client.api.services.test.log, blocking=False\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import time\n", + "\n", + "# syft absolute\n", + "from syft.service.job.job_stash import JobStatus\n", + "\n", + "# Iterate over the Jobs waiting them to finish their pipelines.\n", + "job_pool = [\n", + " (log_call_job, \"Logging Private Function Call\"),\n", + " (log_call_mock_job, \"Logging Public Function Call\"),\n", + " (log_call_private_job, \"Logging Private Function Call\"),\n", + "]\n", + "for job, expected_log in job_pool:\n", + " updated_job = datasite_client.api.services.job.get(job.id)\n", + " while updated_job.status in {JobStatus.CREATED, JobStatus.PROCESSING}:\n", + " updated_job = datasite_client.api.services.job.get(job.id)\n", + " time.sleep(1)\n", + "\n", + " assert (\n", + " updated_job.status == JobStatus.COMPLETED\n", + " ), f\"Job {updated_job.id} exited with status {updated_job.status} and result {updated_job.result}\"\n", + " if updated_job.status == JobStatus.COMPLETED:\n", + " print(f\"Job {updated_job.id} completed\")\n", + " # If they're completed. Then, check if the TwinAPI print appears in the job logs.\n", + " assert expected_log in datasite_client.api.services.job.get(job.id).logs(\n", + " _print=False\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/api/0.8/13-forgot-user-password.ipynb b/notebooks/api/0.8/13-forgot-user-password.ipynb new file mode 100644 index 00000000000..6df96130590 --- /dev/null +++ b/notebooks/api/0.8/13-forgot-user-password.ipynb @@ -0,0 +1,178 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Forgot User Password" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "## Initialize the server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import SyftError\n", + "from syft import SyftSuccess\n", + "\n", + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", + " dev_mode=True,\n", + " create_producer=True,\n", + " n_consumers=3,\n", + " reset=True,\n", + " port=8081,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "## Register a new user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_client.register(\n", + " email=\"new_syft_user@openmined.org\",\n", + " password=\"verysecurepassword\",\n", + " password_verify=\"verysecurepassword\",\n", + " name=\"New User\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "### Ask for a password reset - Notifier disabled Workflow" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "### Call for users.forgot_password" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client = server.login_as_guest()\n", + "res = guest_client.forgot_password(email=\"new_syft_user@openmined.org\")\n", + "\n", + "if not isinstance(res, SyftSuccess):\n", + " raise Exception(f\"Res isn't SyftSuccess, its {res}\")" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "### Admin generates a temp token" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "temp_token = datasite_client.users.request_password_reset(\n", + " datasite_client.notifications[-1].linked_obj.resolve.id\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "### User use this token to reset password" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "res = guest_client.reset_password(token=temp_token, new_password=\"Password123\")\n", + "\n", + "if not isinstance(res, SyftSuccess):\n", + " raise Exception(f\"Res isn't SyftSuccess, its {res}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "new_user_session = server.login(\n", + " email=\"new_syft_user@openmined.org\", password=\"Password123\"\n", + ")\n", + "\n", + "if isinstance(new_user_session, SyftError):\n", + " raise Exception(f\"Res isn't SyftSuccess, its {new_user_session}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/api/0.8/14-container-images.ipynb b/notebooks/api/0.8/14-container-images.ipynb new file mode 100644 index 00000000000..d7a5e661d51 --- /dev/null +++ b/notebooks/api/0.8/14-container-images.ipynb @@ -0,0 +1,1486 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "import time\n", + "\n", + "# third party\n", + "import docker\n", + "import numpy as np\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)\n", + "\n", + "# syft absolute\n", + "from syft.service.worker.image_registry import SyftImageRegistry\n", + "from syft.service.worker.worker_image import SyftWorkerImage\n", + "\n", + "# Local registry to test external registry\n", + "\n", + "\n", + "class LocalRegistryContainer:\n", + " def __init__(self):\n", + " self.name = \"local_registry\"\n", + " self.client = docker.from_env()\n", + "\n", + " def start(self, host_port=5678):\n", + " existing = self.get()\n", + " if existing:\n", + " return existing\n", + "\n", + " result = self.client.containers.run(\n", + " \"registry:2\",\n", + " name=self.name,\n", + " detach=True,\n", + " ports={\"5000/tcp\": host_port},\n", + " labels={\"orgs.openmined.syft\": \"local-registry\"},\n", + " )\n", + "\n", + " return result\n", + "\n", + " def teardown(self):\n", + " existing = self.get()\n", + " if existing:\n", + " existing.stop()\n", + " existing.remove()\n", + "\n", + " def get(self):\n", + " try:\n", + " result = self.client.containers.get(self.name)\n", + " if result.status == \"running\":\n", + " return result\n", + " except docker.errors.NotFound:\n", + " return None\n", + "\n", + "\n", + "local_registry_container = LocalRegistryContainer()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# Uncomment this to run the whole docker based custom workers\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"container_stack\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "\n", + "\n", + "# Disable inmemory worker for container stack\n", + "running_as_container = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\") in (\n", + " \"container_stack\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "datasite = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", + " dev_mode=True,\n", + " create_producer=True,\n", + " reset=True,\n", + " port=8081,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client = datasite.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "We should see a default worker pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "syft_base_worker_tag = (\n", + " \"local-dev\"\n", + " if (bool(os.environ[\"DEV_MODE\"]) and running_as_container)\n", + " else sy.__version__\n", + ")\n", + "syft_base_worker_tag = \"0.9.3-beta.4\"" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "#### Submit Dockerfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "opendp_dockerfile_str = f\"\"\"\n", + "FROM openmined/syft-backend:{syft_base_worker_tag}\n", + "\n", + "RUN uv pip install opendp\n", + "\n", + "\"\"\".strip()\n", + "\n", + "docker_tag = \"openmined/custom-worker-opendp:1.0.0\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "docker_config = sy.DockerWorkerConfig(dockerfile=opendp_dockerfile_str)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# test image build locally\n", + "test_build_res = docker_config.test_image_build(tag=docker_tag)\n", + "test_build_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(test_build_res, sy.SyftSuccess), str(test_build_res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "assert docker_config.dockerfile == opendp_dockerfile_str" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "submit_result = datasite_client.api.services.worker_image.submit(\n", + " worker_config=docker_config\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "submit_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(submit_result, sy.SyftSuccess), str(submit_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = datasite_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(datasite_client.images.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "workerimage: SyftWorkerImage = None\n", + "for image in dockerfile_list:\n", + " if not image.is_prebuilt and image.config.dockerfile == opendp_dockerfile_str:\n", + " workerimage = image\n", + " break\n", + "\n", + "assert isinstance(workerimage, SyftWorkerImage), str(workerimage)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "workerimage" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "#### Add Local Registry in Syft" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "registry_add_result = datasite_client.api.services.image_registry.add(\"localhost:5678\")\n", + "registry_add_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(registry_add_result, sy.SyftSuccess), str(registry_add_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "images = datasite_client.api.services.image_registry.get_all()\n", + "assert len(images) == 1\n", + "images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "local_registry = images[0]\n", + "local_registry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(local_registry, SyftImageRegistry), str(local_registry)" + ] + }, + { + "cell_type": "markdown", + "id": "27", + "metadata": {}, + "source": [ + "#### Build Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "pull = False if syft_base_worker_tag == \"local-dev\" else True\n", + "pull" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "registry_uid = local_registry.id if running_as_container else local_registry.id\n", + "\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", + " image_uid=workerimage.id,\n", + " tag=docker_tag,\n", + " registry_uid=registry_uid,\n", + " pull_image=pull,\n", + ")\n", + "docker_build_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "workerimage.config.dockerfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(docker_build_result, sy.SyftSuccess), str(docker_build_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "image_list = datasite_client.images.get_all()\n", + "image_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "for image in image_list:\n", + " if image.id == workerimage.id:\n", + " workerimage = (\n", + " image # we can also index with string using the repo_with_tag format\n", + " )\n", + "\n", + "if running_as_container:\n", + " image_list[workerimage.built_image_tag]\n", + " assert image_list[workerimage.built_image_tag] == workerimage\n", + "\n", + "workerimage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "def get_image_hash(tag) -> str:\n", + " client = docker.from_env()\n", + " try:\n", + " image = client.images.get(tag)\n", + " return image.id\n", + " except docker.errors.ImageNotFound:\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "if running_as_container:\n", + " assert workerimage.image_hash == get_image_hash(\n", + " workerimage.built_image_tag\n", + " ), \"Worker Image image_hash does not match with built image hash\"" + ] + }, + { + "cell_type": "markdown", + "id": "36", + "metadata": {}, + "source": [ + "#### Push Image to Local Registry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "push_result = None\n", + "if running_as_container:\n", + " # stdlib\n", + " from time import sleep\n", + "\n", + " local_registry_container.start()\n", + " sleep(5)\n", + "\n", + " push_result = datasite_client.api.services.worker_image.push(workerimage.id)\n", + " assert isinstance(push_result, sy.SyftSuccess), str(push_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "push_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [ + "if running_as_container:\n", + " # third party\n", + " import requests\n", + "\n", + " base_url = f\"http://{workerimage.image_identifier.registry_host}\"\n", + " expected_tag = workerimage.image_identifier.tag\n", + " search_tag = \"openmined/custom-worker-opendp\"\n", + "\n", + " repos = requests.get(f\"{base_url}/v2/_catalog\").json()[\"repositories\"]\n", + " tags = requests.get(f\"{base_url}/v2/{search_tag}/tags/list\").json()\n", + " tags = tags[\"tags\"]\n", + "\n", + " print(tags)\n", + "\n", + " assert (\n", + " search_tag in repos\n", + " ), f\"'{search_tag}' not uploaded to local registry | {repos}\"\n", + " assert (\n", + " expected_tag in tags\n", + " ), f\"'{search_tag}' with tag {expected_tag} not available | {tags}\"" + ] + }, + { + "cell_type": "markdown", + "id": "40", + "metadata": {}, + "source": [ + "#### Delete locally built image to force pull from local registry" + ] + }, + { + "cell_type": "markdown", + "id": "41", + "metadata": {}, + "source": [ + "This should make the subsequent `worker_pool.launch` pull from registry at 'localhost:5678`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from time import sleep\n", + "\n", + "\n", + "def remove_local_image(tag):\n", + " client = docker.from_env()\n", + " try:\n", + " client.images.remove(tag)\n", + " except docker.errors.ImageNotFound:\n", + " pass\n", + "\n", + "\n", + "if running_as_container:\n", + " remove_local_image(workerimage.built_image_tag)" + ] + }, + { + "cell_type": "markdown", + "id": "43", + "metadata": {}, + "source": [ + "#### Create Worker Pool From Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44", + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool_name = \"opendp-pool\"\n", + "worker_pool_res = datasite_client.api.services.worker_pool.launch(\n", + " pool_name=worker_pool_name,\n", + " image_uid=workerimage.id,\n", + " num_workers=2,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(worker_pool_res) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [ + "for status in worker_pool_res:\n", + " assert status.error is None\n", + " if running_as_container:\n", + " assert status.worker.image.image_hash == get_image_hash(\n", + " workerimage.built_image_tag\n", + " ), \"Worker Pool Image image_hash does not match with built image hash\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47", + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool_list = datasite_client.worker_pools\n", + "worker_pool_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(datasite_client.worker_pools.get_all()) == 2\n", + "worker_pool = None\n", + "for pool in worker_pool_list:\n", + " if pool.name == worker_pool_name:\n", + " worker_pool = pool\n", + " break\n", + "assert worker_pool is not None\n", + "assert len(worker_pool.workers) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49", + "metadata": {}, + "outputs": [], + "source": [ + "# We can filter pools based on the image id upon which the pools were built\n", + "datasite_client.api.services.worker_pool.filter_by_image_id(image_uid=workerimage.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50", + "metadata": {}, + "outputs": [], + "source": [ + "# Delete the second worker\n", + "second_worker = worker_pool.workers[1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51", + "metadata": {}, + "outputs": [], + "source": [ + "second_worker" + ] + }, + { + "cell_type": "markdown", + "id": "52", + "metadata": {}, + "source": [ + "#### Get Worker Logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "53", + "metadata": {}, + "outputs": [], + "source": [ + "raw_worker_logs = datasite_client.api.services.worker.logs(\n", + " uid=second_worker.id,\n", + " raw=True,\n", + ")\n", + "raw_worker_logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "54", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(raw_worker_logs, bytes)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55", + "metadata": {}, + "outputs": [], + "source": [ + "worker_logs = datasite_client.api.services.worker.logs(\n", + " uid=second_worker.id,\n", + ")\n", + "worker_logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(worker_logs, str)" + ] + }, + { + "cell_type": "markdown", + "id": "57", + "metadata": {}, + "source": [ + "#### Delete Worker from Pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "58", + "metadata": {}, + "outputs": [], + "source": [ + "worker_delete_res = datasite_client.api.services.worker.delete(\n", + " uid=second_worker.id, force=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59", + "metadata": {}, + "outputs": [], + "source": [ + "worker_delete_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(worker_delete_res, sy.SyftSuccess), str(worker_delete_res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61", + "metadata": {}, + "outputs": [], + "source": [ + "# Refetch the worker pool\n", + "# Ensure that the deleted worker's id is not present\n", + "for pool in datasite_client.api.services.worker_pool.get_all():\n", + " if pool.name == worker_pool_name:\n", + " worker_pool = pool\n", + "assert len(worker_pool.workers) == 1\n", + "for worker in worker_pool.workers:\n", + " assert second_worker.id != worker.id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62", + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool" + ] + }, + { + "cell_type": "markdown", + "id": "63", + "metadata": {}, + "source": [ + "### Syft function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64", + "metadata": {}, + "outputs": [], + "source": [ + "data = np.array([1, 2, 3])\n", + "data_action_obj = sy.ActionObject.from_obj(data)\n", + "\n", + "data_pointer = data_action_obj.send(datasite_client)\n", + "data_pointer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "65", + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function(\n", + " input_policy=sy.ExactMatch(x=data_pointer),\n", + " output_policy=sy.SingleExecutionExactOutput(),\n", + " worker_pool_name=worker_pool_name,\n", + ")\n", + "def custom_worker_func(x):\n", + " # third party\n", + "\n", + " return {\"y\": x + 1}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66", + "metadata": {}, + "outputs": [], + "source": [ + "custom_worker_func" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "67", + "metadata": {}, + "outputs": [], + "source": [ + "assert custom_worker_func.worker_pool_name == worker_pool.name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68", + "metadata": {}, + "outputs": [], + "source": [ + "request = datasite_client.code.request_code_execution(custom_worker_func)\n", + "request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.requests[-1].approve(approve_nested=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70", + "metadata": {}, + "outputs": [], + "source": [ + "job = datasite_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", + "job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71", + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool = datasite_client.worker_pools[worker_pool_name]\n", + "worker_pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "72", + "metadata": {}, + "outputs": [], + "source": [ + "job.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73", + "metadata": {}, + "outputs": [], + "source": [ + "assert job.status.value == \"completed\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74", + "metadata": {}, + "outputs": [], + "source": [ + "job = datasite_client.jobs[-1]\n", + "job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75", + "metadata": {}, + "outputs": [], + "source": [ + "job.job_worker_id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "76", + "metadata": {}, + "outputs": [], + "source": [ + "# Disabling it due to Race Condition Error\n", + "# assert job.job_worker_id is not None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "77", + "metadata": {}, + "outputs": [], + "source": [ + "# Sleeping so that consumer state is updated\n", + "time.sleep(5)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78", + "metadata": {}, + "outputs": [], + "source": [ + "# Once the work is done by the worker, its state is returned to idle again.\n", + "consuming_worker_is_now_idle = False\n", + "for worker in datasite_client.worker_pools[worker_pool_name].workers:\n", + " if worker.id == job.job_worker_id:\n", + " consuming_worker_is_now_idle = worker.consumer_state.value.lower() == \"idle\"\n", + "\n", + "assert consuming_worker_is_now_idle is True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79", + "metadata": {}, + "outputs": [], + "source": [ + "# Validate the result received from the syft function\n", + "result = job.wait().get()\n", + "result_matches = result[\"y\"] == data + 1\n", + "assert result_matches.all()" + ] + }, + { + "cell_type": "markdown", + "id": "80", + "metadata": {}, + "source": [ + "#### Worker Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81", + "metadata": {}, + "outputs": [], + "source": [ + "# delete the remaining workers\n", + "for worker in worker_pool.workers:\n", + " res = datasite_client.api.services.worker.delete(\n", + " uid=worker.id,\n", + " )\n", + " assert isinstance(res, sy.SyftSuccess), str(res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82", + "metadata": {}, + "outputs": [], + "source": [ + "delete_res = datasite_client.api.services.worker_image.remove(workerimage.id)\n", + "delete_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83", + "metadata": {}, + "outputs": [], + "source": [ + "# Since the containers are delete, we should be able to delete the image\n", + "assert isinstance(delete_res, sy.SyftSuccess), str(delete_res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "84", + "metadata": {}, + "outputs": [], + "source": [ + "if running_as_container:\n", + " local_registry_container.teardown()" + ] + }, + { + "cell_type": "markdown", + "id": "85", + "metadata": {}, + "source": [ + "#### Worker Pool and Image Creation Request/Approval" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86", + "metadata": {}, + "outputs": [], + "source": [ + "custom_dockerfile_str_2 = f\"\"\"\n", + "FROM openmined/syft-backend:{syft_base_worker_tag}\n", + "\n", + "RUN uv pip install opendp\n", + "\"\"\".strip()\n", + "\n", + "docker_config_2 = sy.DockerWorkerConfig(dockerfile=custom_dockerfile_str_2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87", + "metadata": {}, + "outputs": [], + "source": [ + "submit_result = datasite_client.api.services.worker_image.submit(\n", + " worker_config=docker_config_2\n", + ")\n", + "submit_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "88", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89", + "metadata": {}, + "outputs": [], + "source": [ + "# get the image that's not built\n", + "workerimage_2 = None\n", + "for im in datasite_client.images:\n", + " if im.config == docker_config_2:\n", + " workerimage_2 = im" + ] + }, + { + "cell_type": "markdown", + "id": "90", + "metadata": {}, + "source": [ + "##### Build image first then create pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91", + "metadata": {}, + "outputs": [], + "source": [ + "docker_tag_2 = \"openmined/custom-worker-opendp:latest\"\n", + "\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", + " image_uid=workerimage_2.id,\n", + " tag=docker_tag_2,\n", + " pull_image=pull,\n", + ")\n", + "docker_build_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "92", + "metadata": {}, + "outputs": [], + "source": [ + "opendp_pool_name = \"second-opendp-pool\"\n", + "pool_create_request = datasite_client.api.services.worker_pool.pool_creation_request(\n", + " pool_name=opendp_pool_name, num_workers=2, image_uid=workerimage_2.id\n", + ")\n", + "pool_create_request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(pool_create_request.changes) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94", + "metadata": {}, + "outputs": [], + "source": [ + "# get the pending request and approve it\n", + "req_result = pool_create_request.approve()\n", + "req_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "95", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(req_result, sy.SyftSuccess), str(req_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96", + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client.worker_pools[opendp_pool_name]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97", + "metadata": {}, + "outputs": [], + "source": [ + "assert datasite_client.worker_pools[opendp_pool_name]\n", + "assert len(datasite_client.worker_pools[opendp_pool_name].workers) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98", + "metadata": {}, + "outputs": [], + "source": [ + "# default, opendp-pool, second-opendp-pool\n", + "assert len(datasite_client.worker_pools.get_all()) == 3" + ] + }, + { + "cell_type": "markdown", + "id": "99", + "metadata": {}, + "source": [ + "Remove all `second-opendp-pool` workers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "100", + "metadata": {}, + "outputs": [], + "source": [ + "for worker in datasite_client.worker_pools[\"second-opendp-pool\"].workers:\n", + " res = datasite_client.api.services.worker.delete(uid=worker.id, force=True)\n", + " assert isinstance(res, sy.SyftSuccess), str(res)\n", + "\n", + "assert len(datasite_client.worker_pools[\"second-opendp-pool\"].workers) == 0" + ] + }, + { + "cell_type": "markdown", + "id": "101", + "metadata": {}, + "source": [ + "Remove the `second-opendp-pool`'s worker image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "102", + "metadata": {}, + "outputs": [], + "source": [ + "delete_res = datasite_client.api.services.worker_image.remove(workerimage_2.id)\n", + "delete_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "103", + "metadata": {}, + "outputs": [], + "source": [ + "# Since the containers are delete, we should be able to delete the image\n", + "assert isinstance(delete_res, sy.SyftSuccess), str(delete_res)" + ] + }, + { + "cell_type": "markdown", + "id": "104", + "metadata": {}, + "source": [ + "##### Request to build the image and create the pool at the same time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "105", + "metadata": {}, + "outputs": [], + "source": [ + "custom_dockerfile_str_3 = f\"\"\"\n", + "FROM openmined/syft-backend:{syft_base_worker_tag}\n", + "\n", + "RUN uv pip install recordlinkage\n", + "\"\"\".strip()\n", + "\n", + "docker_config_3 = sy.DockerWorkerConfig(dockerfile=custom_dockerfile_str_3)\n", + "\n", + "docker_tag_3 = \"openmined/custom-worker-recordlinkage:latest\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "106", + "metadata": {}, + "outputs": [], + "source": [ + "recordlinkage_pool_name = \"recordlinkage-pool\"\n", + "pool_image_create_request = (\n", + " datasite_client.api.services.worker_pool.create_image_and_pool_request(\n", + " pool_name=recordlinkage_pool_name,\n", + " num_workers=2,\n", + " tag=docker_tag_3,\n", + " config=docker_config_3,\n", + " reason=\"I want to do some more cool data science with PySyft and recordlinkage\",\n", + " pull_image=pull,\n", + " )\n", + ")\n", + "pool_image_create_request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "107", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(pool_image_create_request.changes) == 2\n", + "assert pool_image_create_request.changes[0].config == docker_config_3\n", + "assert pool_image_create_request.changes[1].num_workers == 2\n", + "assert pool_image_create_request.changes[1].pool_name == recordlinkage_pool_name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "108", + "metadata": {}, + "outputs": [], + "source": [ + "# get the pending request and approve it\n", + "req_result = pool_image_create_request.approve()\n", + "req_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "109", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(req_result, sy.SyftSuccess), str(req_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "110", + "metadata": {}, + "outputs": [], + "source": [ + "# Get updated request object and status\n", + "for req in datasite_client.requests:\n", + " if req.id == pool_image_create_request.id:\n", + " pool_image_create_request = req\n", + "\n", + "assert pool_image_create_request.status.value == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "111", + "metadata": {}, + "outputs": [], + "source": [ + "image_exists = False\n", + "recordlinkage_image = None\n", + "\n", + "for im in datasite_client.images.get_all():\n", + " if im.image_identifier and im.image_identifier.repo_with_tag == docker_tag_3:\n", + " image_exists = True\n", + " recordlinkage_image = im\n", + "assert image_exists\n", + "assert recordlinkage_image\n", + "recordlinkage_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "112", + "metadata": {}, + "outputs": [], + "source": [ + "recordlinkage_pool = datasite_client.worker_pools[recordlinkage_pool_name]\n", + "\n", + "assert recordlinkage_pool\n", + "assert len(recordlinkage_pool.workers) == 2" + ] + }, + { + "cell_type": "markdown", + "id": "113", + "metadata": {}, + "source": [ + "Cleanup `recordlinkage-pool` workers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "114", + "metadata": {}, + "outputs": [], + "source": [ + "for worker in recordlinkage_pool.workers:\n", + " res = datasite_client.api.services.worker.delete(uid=worker.id, force=True)\n", + " assert isinstance(res, sy.SyftSuccess), str(res)" + ] + }, + { + "cell_type": "markdown", + "id": "115", + "metadata": {}, + "source": [ + "Cleanup `recordlinkage-pool`'s image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "116", + "metadata": {}, + "outputs": [], + "source": [ + "delete_res = datasite_client.api.services.worker_image.remove(recordlinkage_image.id)\n", + "delete_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "117", + "metadata": {}, + "outputs": [], + "source": [ + "datasite.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "118", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/helm/docker-helm-syft.ipynb b/notebooks/helm/docker-helm-syft.ipynb deleted file mode 100644 index 2bcb8bc5215..00000000000 --- a/notebooks/helm/docker-helm-syft.ipynb +++ /dev/null @@ -1,2929 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "3333ab14", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "from collections import defaultdict\n", - "import os\n", - "\n", - "# syft absolute\n", - "import syft as sy\n", - "from syft import ActionObject" - ] - }, - { - "cell_type": "markdown", - "id": "732a9097", - "metadata": {}, - "source": [ - "Start this using" - ] - }, - { - "cell_type": "markdown", - "id": "e0d0a11e", - "metadata": {}, - "source": [ - "```\n", - "hagrid launch domain to docker:8080 --dev --verbose\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "3fc952d5", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Logged into as \n" - ] - }, - { - "data": { - "text/html": [ - "
SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" - ], - "text/plain": [ - "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "client = sy.login(\n", - " url=\"http://localhost:8080\", email=\"info@openmined.org\", password=\"changethis\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e3a3c58d", - "metadata": {}, - "source": [ - "# Mount storage container with Helm azure container" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "8b93a69d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Mounting Azure Successful!

" - ], - "text/plain": [ - "SyftSuccess: Mounting Azure Successful!" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.api.services.blob_storage.mount_azure(\n", - " account_name=\"helmprojectstorage\",\n", - " container_name=\"helm\",\n", - " account_key=os.environ[\"HELM_STORAGE_ACCOUNT_KEY\"],\n", - " bucket_name=\"helmazurebucket\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "fd89b14e", - "metadata": {}, - "outputs": [], - "source": [ - "blob_files = client.api.services.blob_storage.get_files_from_bucket(\n", - " bucket_name=\"helmazurebucket\"\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "93f1f918", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

BlobFile List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile,\n", - " syft.types.blob_storage.BlobFile]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "blob_files" - ] - }, - { - "cell_type": "markdown", - "id": "e12255c2", - "metadata": {}, - "source": [ - "# Start workers" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "d84a897e", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: 3 workers added

" - ], - "text/plain": [ - "SyftSuccess: 3 workers added" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.worker.start_workers(n=3)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "4cea5229", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

SyftWorker List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.service.worker.worker_pool.SyftWorker,\n", - " syft.service.worker.worker_pool.SyftWorker,\n", - " syft.service.worker.worker_pool.SyftWorker]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.worker.list()" - ] - }, - { - "cell_type": "markdown", - "id": "2703f5a0", - "metadata": {}, - "source": [ - "# Create Dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "c7d90857", - "metadata": {}, - "outputs": [], - "source": [ - "train_file = (\n", - " sy.ActionObject.from_path(\"short_input.jsonl\").send(client).syft_action_data\n", - ")\n", - "scenario_file = scenario_obj = (\n", - " sy.ActionObject.from_path(path=\"scenario_data.jsonl\").send(client).syft_action_data\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "740b3cf1", - "metadata": {}, - "outputs": [], - "source": [ - "# train_file = [f for f in blob_files if \"train-00\" in f.file_name][0]\n", - "# scenario_file = [f for f in blob_files if \"scenario_data\" in f.file_name][0]" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "f0da9c8a", - "metadata": {}, - "outputs": [], - "source": [ - "helm_dataset = sy.Dataset(\n", - " name=\"Helm Dataset\",\n", - " asset_list=[\n", - " sy.Asset(\n", - " name=\"helm train data\",\n", - " data=ActionObject.from_obj([train_file]),\n", - " mock=sy.ActionObject.empty(),\n", - " ),\n", - " sy.Asset(\n", - " name=\"helm test data\",\n", - " data=ActionObject.from_obj([scenario_file]),\n", - " mock=sy.ActionObject.empty(),\n", - " ),\n", - " ],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "4400f06f", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftWarning: You're approving a request on high side domain which may host datasets with private information.

" - ], - "text/plain": [ - "SyftWarning: You're approving a request on high side domain which may host datasets with private information." - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Would you like to proceed? [y/n]: y\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\r\n", - " 0%| | 0/2 [00:00SyftSuccess: Dataset uploaded to 'wizardly_vapnik'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`
" - ], - "text/plain": [ - "SyftSuccess: Dataset uploaded to 'wizardly_vapnik'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.upload_dataset(helm_dataset)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "842988d1", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftWarning: You're performing an operation on high side domain, which could host datasets with private information.

" - ], - "text/plain": [ - "SyftWarning: You're performing an operation on high side domain, which could host datasets with private information." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "helm_ds = client.datasets[\"Helm Dataset\"]\n", - "helm_train_files = helm_ds.assets[\"helm train data\"]\n", - "helm_test_files = helm_ds.assets[\"helm test data\"]" - ] - }, - { - "cell_type": "markdown", - "id": "bd60b056", - "metadata": {}, - "source": [ - "# Syft functions" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "aa3a5c31", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'compute_document_data_overlap' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'compute_document_data_overlap' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@sy.syft_function()\n", - "def compute_document_data_overlap(domain, scenario_file, input_files, n):\n", - " print(\"starting overlap computation\")\n", - "\n", - " # stdlib\n", - " from collections import defaultdict\n", - " import json\n", - " import re\n", - " from string import punctuation\n", - " import time\n", - "\n", - " # third party\n", - " from nltk import ngrams\n", - "\n", - " r = re.compile(rf\"[\\s{re.escape(punctuation)}]+\")\n", - "\n", - " def create_ngram_index(light_scenarios, n_values, stats_key_counts):\n", - " ngram_index = {n: {} for n in n_values}\n", - " for i, scenario in enumerate(light_scenarios):\n", - " if i % 20 == 0:\n", - " print(f\"n_gram indexing progress: {(i/len(light_scenarios))*100:.2f}%\")\n", - " for n in n_values:\n", - " stats_key = scenario[\"scenario_key\"] + \"_\" + str(n)\n", - " stats_key_counts[stats_key] = len(scenario[\"instances\"])\n", - " for instance in scenario[\"instances\"]:\n", - " id = instance[\"id\"]\n", - " input_tokens = r.split(instance[\"input\"].lower())\n", - " for input_ngram in ngrams(input_tokens, n):\n", - " if input_ngram not in ngram_index[n]:\n", - " ngram_index[n][input_ngram] = set()\n", - " ngram_index[n][input_ngram].add(\n", - " stats_key + \"+\" + id + \"+\" + \"input\"\n", - " )\n", - "\n", - " # compute reference ngrams\n", - " for reference in instance[\"references\"]:\n", - " reference_unigrams = r.split(reference.lower())\n", - " for reference_ngram in ngrams(reference_unigrams, n):\n", - " if reference_ngram not in ngram_index[n]:\n", - " ngram_index[n][reference_ngram] = set()\n", - " ngram_index[n][reference_ngram].add(\n", - " stats_key + \"+\" + id + \"+\" + \"references\"\n", - " )\n", - " return ngram_index\n", - "\n", - " # SETUP\n", - " print(\"preparing scenarios and creating indexes\")\n", - " start = time.time()\n", - " light_scenarios = []\n", - " for i, (bytes_read, light_scenario_json) in enumerate(\n", - " scenario_file.iter_lines(progress=True)\n", - " ):\n", - " if i % 20 == 0:\n", - " print(\n", - " f\"scenario creation progress: {(bytes_read/scenario_file.file_size)*100:.2f}%\"\n", - " )\n", - "\n", - " light_scenario_dict: dict = json.loads(light_scenario_json)\n", - "\n", - " light_scenario_key_dict: dict = light_scenario_dict[\"scenario_key\"]\n", - " scenario_spec = str(light_scenario_key_dict[\"scenario_spec\"])\n", - "\n", - " light_scenario_key = scenario_spec + \"_\" + light_scenario_key_dict[\"split\"]\n", - " light_instances = [\n", - " {\n", - " \"input\": instance_dict[\"input\"],\n", - " \"references\": instance_dict[\"references\"],\n", - " \"id\": instance_dict[\"id\"],\n", - " }\n", - " for instance_dict in light_scenario_dict[\"instances\"]\n", - " ]\n", - " light_scenarios.append(\n", - " {\"scenario_key\": light_scenario_key, \"instances\": light_instances}\n", - " )\n", - " print(f\"Finished creating scenarios ({time.time()-start}s)\")\n", - "\n", - " print(\"Creating indexes\")\n", - "\n", - " start = time.time()\n", - " stats_key_counts = defaultdict(int)\n", - " ngram_index = create_ngram_index(\n", - " light_scenarios=light_scenarios, n_values=[n], stats_key_counts=stats_key_counts\n", - " )\n", - " print(f\"Finished creating indexes ({time.time()-start}s)\")\n", - "\n", - " r = re.compile(rf\"[\\s{re.escape(punctuation)}]+\")\n", - " stats_key_to_input_ids = defaultdict(set)\n", - " stats_key_to_reference_ids = defaultdict(set)\n", - " print(\"computing overlap\")\n", - " start = time.time()\n", - "\n", - " domain.init_progress(input_files[0].file_size)\n", - "\n", - " for input_file in input_files:\n", - " for i, (bytes_read, line) in enumerate(input_file.iter_lines(progress=True)):\n", - " if i % 1000 == 0:\n", - " print(\n", - " f\"computing overlap progress: {(bytes_read / input_file.file_size) * 100:.2f}%\"\n", - " )\n", - " domain.set_progress(bytes_read)\n", - " if i == 10000:\n", - " break\n", - " document = json.loads(line)[\"text\"]\n", - " document_tokens = r.split(document.lower())\n", - " for n in ngram_index.keys():\n", - " for document_ngram in ngrams(document_tokens, n):\n", - " if document_ngram in ngram_index[n]:\n", - " for entry_overlap_key in ngram_index[n][document_ngram]:\n", - " stats_key, id, part = entry_overlap_key.split(\"+\")\n", - " if part == \"input\":\n", - " stats_key_to_input_ids[stats_key].add(id)\n", - " elif part == \"references\":\n", - " stats_key_to_reference_ids[stats_key].add(id)\n", - " print(f\"Finished computing overlap ({time.time()-start}s)\")\n", - " print(\"done\")\n", - "\n", - " return stats_key_to_input_ids, stats_key_to_reference_ids, stats_key_counts" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "2f23c7ae", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.submit(compute_document_data_overlap)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "27be4dc4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'main_function' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'main_function' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@sy.syft_function_single_use(\n", - " input_files=helm_train_files, scenario_files=helm_test_files\n", - ")\n", - "def main_function(domain, input_files, scenario_files):\n", - " N = [5, 9, 13]\n", - " jobs = []\n", - " for n in N[:1]:\n", - " for scenario_file in scenario_files:\n", - " batch_job = domain.launch_job(\n", - " compute_document_data_overlap,\n", - " scenario_file=scenario_file,\n", - " input_files=input_files,\n", - " n=n,\n", - " )\n", - " jobs.append(batch_job)\n", - "\n", - " return None" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "82d92df1", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - "
\n", - "

Request

\n", - "

Id: 61a8a248fb5946da8682bb1764d0a40f

\n", - "

Request time: 2023-12-19 15:35:59

\n", - " \n", - " \n", - "

Status: RequestStatus.PENDING

\n", - "

Requested on: Wizardly_vapnik of type Domain

\n", - "

Requested by: Jane Doe (info@openmined.org)

\n", - "

Changes: Request to change main_function to permission RequestStatus.APPROVED. Nested Requests not resolved.

\n", - "
\n", - "\n", - " " - ], - "text/markdown": [ - "```python\n", - "class Request:\n", - " id: str = 61a8a248fb5946da8682bb1764d0a40f\n", - " request_time: str = 2023-12-19 15:35:59\n", - " updated_at: str = None\n", - " status: str = RequestStatus.PENDING\n", - " changes: str = ['Request to change main_function to permission RequestStatus.APPROVED. Nested Requests not resolved']\n", - " requesting_user_verify_key: str = 5959fe0c5c656120a574beafaa304dad187bb3cad3d40dc1f1bea34a1ead7c94\n", - "\n", - "```" - ], - "text/plain": [ - "syft.service.request.request.Request" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.request_code_execution(main_function)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "29ee2790", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftWarning: You're approving a request on high side domain which may host datasets with private information.

" - ], - "text/plain": [ - "SyftWarning: You're approving a request on high side domain which may host datasets with private information." - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Would you like to proceed? [y/n]: y\n", - "Request approved for domain wizardly_vapnik\n" - ] - }, - { - "data": { - "text/html": [ - "
SyftSuccess: Request 61a8a248fb5946da8682bb1764d0a40f changes applied

" - ], - "text/plain": [ - "SyftSuccess: Request 61a8a248fb5946da8682bb1764d0a40f changes applied" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.requests[-1].approve(approve_nested=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "78b084c0", - "metadata": {}, - "outputs": [], - "source": [ - "job = client.code.main_function(\n", - " input_files=helm_train_files, scenario_files=helm_test_files, blocking=False\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "1df60a45", - "metadata": {}, - "source": [ - "# Inspect Jobs and get results" - ] - }, - { - "cell_type": "code", - "execution_count": 71, - "id": "55c3bee6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "class Job:\n", - " id: UID = a388f67b3c3d407db9c2733aff9447f0\n", - " status: errored\n", - " has_parent: False\n", - " result: Err('UserCodeStatus.DENIED: Function has no output policy')\n", - " logs:\n", - "\n", - "0 \n", - " \n", - "```" - ], - "text/plain": [ - "syft.service.job.job_stash.Job" - ] - }, - "execution_count": 71, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "4d567f04", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

Job List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.service.job.job_stash.Job, syft.service.job.job_stash.Job]" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.subjobs" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "852360ec", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "starting overlap computation\n", - "preparing scenarios and creating indexes\n", - "scenario creation progress: 0.90%\n", - "Finished creating scenarios (0.37901782989501953s)\n", - "Creating indexes\n", - "n_gram indexing progress: 0.00%\n", - "Finished creating indexes (0.10171127319335938s)\n", - "computing overlap\n", - "computing overlap progress: 3.83%\n", - "Finished computing overlap (0.08044147491455078s)\n", - "done\n", - "\n", - "\n" - ] - } - ], - "source": [ - "job.subjobs[0].logs()" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "c5de7233", - "metadata": {}, - "outputs": [], - "source": [ - "results = [j.wait().get() for j in job.subjobs]" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "4a079df7", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "[(defaultdict(<class 'set'>, {}), defaultdict(<class 'set'>, {}), defaultdict(<class 'int'>, {"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_train_9": 5, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_valid_9": 34, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_test_9": 311, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_train_9": 5, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_valid_9": 14, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_test_9": 135})), (defaultdict(<class 'set'>, {"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_test_5": {'id328'}, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_valid_5": {'id12'}}), defaultdict(<class 'set'>, {}), defaultdict(<class 'int'>, {"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_train_5": 5, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_valid_5": 34, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_test_5": 311, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_train_5": 5, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_valid_5": 14, "{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_test_5": 135}))]" - ], - "text/plain": [ - "[(defaultdict(set, {}),\n", - " defaultdict(set, {}),\n", - " defaultdict(int,\n", - " {\"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_train_9\": 5,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_valid_9\": 34,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_test_9\": 311,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_train_9\": 5,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_valid_9\": 14,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_test_9\": 135})),\n", - " (defaultdict(set,\n", - " {\"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_test_5\": {'id328'},\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_valid_5\": {'id12'}}),\n", - " defaultdict(set, {}),\n", - " defaultdict(int,\n", - " {\"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_train_5\": 5,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_valid_5\": 34,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'philosophy'}}_test_5\": 311,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_train_5\": 5,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_valid_5\": 14,\n", - " \"{'class_name': 'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', 'args': {'subject': 'anatomy'}}_test_5\": 135}))]" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# stats_key_to_input_ids, stats_key_to_reference_ids, stats_key_counts\n", - "results" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "0dcd8d03", - "metadata": {}, - "outputs": [], - "source": [ - "# results[0]" - ] - }, - { - "cell_type": "markdown", - "id": "6fe4daea", - "metadata": {}, - "source": [ - "# Aggregate" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "d5053b78", - "metadata": {}, - "outputs": [], - "source": [ - "stats_key_to_input_ids, stats_key_to_reference_ids, stats_key_counts = zip(*results)\n", - "\n", - "total_input_ids = defaultdict(set)\n", - "total_reference_ids = defaultdict(set)\n", - "total_stats_key_counts = defaultdict(int)\n", - "\n", - "for d in stats_key_counts:\n", - " for key, val in d.items():\n", - " total_stats_key_counts[key] += val\n", - "\n", - "\n", - "for d in stats_key_to_input_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_input_ids:\n", - " new_set = total_input_ids[key]\n", - " new_set = new_set.union(d[key])\n", - " total_input_ids[key] = new_set\n", - "\n", - "for d in stats_key_to_reference_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_reference_ids:\n", - " new_set = total_reference_ids[key]\n", - " new_set = total_reference_ids[key].union(d[key])\n", - " total_reference_ids[key] = new_set\n", - "\n", - "all_data_overlap_stats = []\n", - "for stats_key, count in total_stats_key_counts.items():\n", - " data_overlap_stats = {\n", - " \"data_overlap_stats_key\": None,\n", - " \"num_instances\": count,\n", - " \"instance_ids_with_overlapping_input\": sorted(total_input_ids[stats_key]),\n", - " \"instance_ids_with_overlapping_reference\": sorted(\n", - " total_reference_ids[stats_key]\n", - " ),\n", - " }\n", - " subject, split, n_str = stats_key.rsplit(\"_\", 2)\n", - " data_overlap_stats[\"data_overlap_stats_key\"] = {\n", - " \"light_scenario_key\": {\"scenario_spec\": subject, \"split\": split},\n", - " \"overlap_protocol_spec\": {\"n\": int(n_str)},\n", - " }\n", - " all_data_overlap_stats.append(data_overlap_stats)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "9c53c3aa", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'philosophy'}}\",\n", - " 'split': 'train'},\n", - " 'overlap_protocol_spec': {'n': 9}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 5},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'philosophy'}}\",\n", - " 'split': 'valid'},\n", - " 'overlap_protocol_spec': {'n': 9}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 34},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'philosophy'}}\",\n", - " 'split': 'test'},\n", - " 'overlap_protocol_spec': {'n': 9}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 311},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'anatomy'}}\",\n", - " 'split': 'train'},\n", - " 'overlap_protocol_spec': {'n': 9}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 5},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'anatomy'}}\",\n", - " 'split': 'valid'},\n", - " 'overlap_protocol_spec': {'n': 9}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 14},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'anatomy'}}\",\n", - " 'split': 'test'},\n", - " 'overlap_protocol_spec': {'n': 9}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 135},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'philosophy'}}\",\n", - " 'split': 'train'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 5},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'philosophy'}}\",\n", - " 'split': 'valid'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': ['id12'],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 34},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'philosophy'}}\",\n", - " 'split': 'test'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': ['id328'],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 311},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'anatomy'}}\",\n", - " 'split': 'train'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 5},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'anatomy'}}\",\n", - " 'split': 'valid'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 14},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': \"{'class_name': \"\n", - " \"'helm.benchmark.scenarios.mmlu_scenario.MMLUScenario', \"\n", - " \"'args': \"\n", - " \"{'subject': \"\n", - " \"'anatomy'}}\",\n", - " 'split': 'test'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 135}]\n" - ] - } - ], - "source": [ - "# stdlib\n", - "from pprint import pprint\n", - "\n", - "pprint(all_data_overlap_stats)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "300abb87", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/helm/dynamic-docker-workers.ipynb b/notebooks/helm/dynamic-docker-workers.ipynb deleted file mode 100644 index 3be957d7b2d..00000000000 --- a/notebooks/helm/dynamic-docker-workers.ipynb +++ /dev/null @@ -1,768 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "b27d69a2", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /Users/koen/workspace/pysyft/notebooks\n" - ] - } - ], - "source": [ - "# syft absolute\n", - "import syft as sy" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "dcad6636", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Logged into as \n" - ] - }, - { - "data": { - "text/html": [ - "
SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" - ], - "text/plain": [ - "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"test-domain-helm2\",\n", - " dev_mode=True,\n", - " reset=True,\n", - " n_consumers=0,\n", - " create_producer=True,\n", - ")\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a9ea17d8", - "metadata": {}, - "outputs": [], - "source": [ - "client.worker.start_workers(n=3)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "c0c8331c", - "metadata": {}, - "outputs": [], - "source": [ - "workers = client.worker.list()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "f8fc2e1b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

DockerWorker List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.service.worker.worker_service.DockerWorker,\n", - " syft.service.worker.worker_service.DockerWorker,\n", - " syft.service.worker.worker_service.DockerWorker]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "workers" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "28c5e351", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: 2 workers stopped

" - ], - "text/plain": [ - "SyftSuccess: 2 workers stopped" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.worker.stop(workers)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/helm/helm-audit-without-syft.ipynb b/notebooks/helm/helm-audit-without-syft.ipynb deleted file mode 100644 index 0b078996e48..00000000000 --- a/notebooks/helm/helm-audit-without-syft.ipynb +++ /dev/null @@ -1,403 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "from collections import defaultdict\n", - "import glob\n", - "import json\n", - "import os\n", - "import re\n", - "from string import punctuation\n", - "\n", - "# third party\n", - "from nltk import ngrams\n", - "import tqdm" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# # stdlib\n", - "# import subprocess\n", - "\n", - "# helm_process = subprocess.run(\n", - "# [\n", - "# \"python\",\n", - "# \"/home/teo/helm/scripts/data_overlap/compute_data_overlap_metrics.py\",\n", - "# \"--scenario-data\",\n", - "# \"/home/teo/helm/scripts/data_overlap/scenario_data.jsonl\",\n", - "# \"--input-data\",\n", - "# \"short_input.jsonl\",\n", - "# \"--output-stats\",\n", - "# \"/home/teo/helm/scripts/data_overlap/output_stats.jsonl\",\n", - "# \"--input-format\",\n", - "# \"the_pile\",\n", - "# ]\n", - "# )" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "PART_INPUT: str = \"input\"\n", - "PART_REF: str = \"references\"\n", - "\n", - "r = re.compile(rf\"[\\s{re.escape(punctuation)}]+\")\n", - "\n", - "\n", - "def create_ngram_index(light_scenarios, n_values, stats_key_counts):\n", - " ngram_index = {n: {} for n in n_values}\n", - " for scenario in tqdm.tqdm(light_scenarios):\n", - " # print(f\"Building ngram indexes for {scenario['scenario_key']}\")\n", - " for n in n_values:\n", - " stats_key = scenario[\"scenario_key\"] + \"_\" + str(n)\n", - " stats_key_counts[stats_key] = len(scenario[\"instances\"])\n", - " for instance in scenario[\"instances\"]:\n", - " id = instance[\"id\"]\n", - " assert id\n", - "\n", - " input_tokens = r.split(instance[\"input\"].lower())\n", - " for input_ngram in ngrams(input_tokens, n):\n", - " if input_ngram not in ngram_index[n]:\n", - " ngram_index[n][input_ngram] = set()\n", - " ngram_index[n][input_ngram].add(\n", - " stats_key + \"+\" + id + \"+\" + PART_INPUT\n", - " )\n", - "\n", - " # compute reference ngrams\n", - " for reference in instance[\"references\"]:\n", - " reference_unigrams = r.split(reference.lower())\n", - " for reference_ngram in ngrams(reference_unigrams, n):\n", - " if reference_ngram not in ngram_index[n]:\n", - " ngram_index[n][reference_ngram] = set()\n", - " ngram_index[n][reference_ngram].add(\n", - " stats_key + \"+\" + id + \"+\" + PART_REF\n", - " )\n", - " return ngram_index" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "def compute_document_data_overlap(document, ngram_index):\n", - " stats_key_to_input_ids = defaultdict(set)\n", - " stats_key_to_reference_ids = defaultdict(set)\n", - " document_tokens = r.split(document.lower())\n", - " for n in ngram_index.keys():\n", - " for document_ngram in ngrams(document_tokens, n):\n", - " if document_ngram in ngram_index[n]:\n", - " for entry_overlap_key in ngram_index[n][document_ngram]:\n", - " stats_key, id, part = entry_overlap_key.split(\"+\")\n", - " if part == PART_INPUT:\n", - " stats_key_to_input_ids[stats_key].add(id)\n", - " elif part == PART_REF:\n", - " stats_key_to_reference_ids[stats_key].add(id)\n", - " return stats_key_to_input_ids, stats_key_to_reference_ids" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "scenario_data_path = \"/Users/koen/Downloads/filtered_scenario_data_new.jsonl\"" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib\n", - "import sys" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "all_lines = open(scenario_data_path).read()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "167.00667" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sys.getsizeof(all_lines) / 1000000" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CPU times: user 663 ms, sys: 149 ms, total: 812 ms\n", - "Wall time: 812 ms\n" - ] - } - ], - "source": [ - "%%time\n", - "light_scenarios = []\n", - "light_scenario_jsons = open(scenario_data_path).readlines()\n", - "for light_scenario_json in light_scenario_jsons:\n", - " light_scenario_dict: dict = json.loads(light_scenario_json)\n", - "\n", - " light_scenario_key_dict: dict = light_scenario_dict[\"scenario_key\"]\n", - " # if the light_scenarios are exported from helm, they will have a scenario_spec field\n", - " # subject_spec = light_scenario_key_dict[\"scenario_spec\"]['args']['subject']\n", - " scenario_spec = str(light_scenario_key_dict[\"scenario_spec\"])\n", - " light_scenario_key = scenario_spec + \"_\" + light_scenario_key_dict[\"split\"]\n", - " light_instances = [\n", - " {\n", - " \"input\": instance_dict[PART_INPUT],\n", - " \"references\": instance_dict[PART_REF],\n", - " \"id\": instance_dict[\"id\"],\n", - " }\n", - " for instance_dict in light_scenario_dict[\"instances\"]\n", - " ]\n", - " light_scenarios.append(\n", - " {\"scenario_key\": light_scenario_key, \"instances\": light_instances}\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The input data will be loaded from ['short_input.jsonl']\n", - "Loading scenario data from /Users/koen/Downloads/filtered_scenario_data_new.jsonl\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|█████████████████████████████████████████████████| 241/241 [27:11<00:00, 6.77s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CPU times: user 4min 48s, sys: 12min 52s, total: 17min 41s\n", - "Wall time: 27min 11s\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "%%time\n", - "input_data_path = \"short_input.jsonl\"\n", - "# scenario_data_path = \"/home/teo/helm/scripts/data_overlap/scenario_data.jsonl\"\n", - "# scenario_data_path = \"/home/teo/helm/scripts/data_overlap/scenario_data.jsonl\"\n", - "output_path = \"output2.jsonl\"\n", - "normalization = \"default\"\n", - "N = [5, 9, 13]\n", - "\n", - "\n", - "print(f\"Loading scenario data from {scenario_data_path}\")\n", - "\n", - "\n", - "stats_key_counts = defaultdict(int)\n", - "ngram_index = create_ngram_index(\n", - " light_scenarios=light_scenarios, n_values=N, stats_key_counts=stats_key_counts\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# SETUP\n", - "if os.path.isdir(input_data_path):\n", - " input_file_paths = []\n", - " for file_path in glob.iglob(os.path.join(input_data_path, \"**/*\"), recursive=True):\n", - " if os.path.isfile(file_path):\n", - " input_file_paths.append(file_path)\n", - "else:\n", - " input_file_paths = [input_data_path]\n", - "print(f\"The input data will be loaded from {input_file_paths}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Written 723 results to output2.jsonl\n" - ] - } - ], - "source": [ - "stats_key_to_input_ids = []\n", - "stats_key_to_reference_ids = []\n", - "\n", - "# BATCH PROCESSING\n", - "for input_file_index in tqdm.tqdm(\n", - " range(len(input_file_paths)),\n", - " desc=\"Computing overlap stats for input files\",\n", - " disable=None,\n", - "):\n", - " input_file_path: str = input_file_paths[input_file_index]\n", - " with open(input_file_path) as f:\n", - " for line in f:\n", - " document = json.loads(line)[\"text\"]\n", - " doc_input_ids, doc_ref_ids = compute_document_data_overlap(\n", - " document=document,\n", - " ngram_index=ngram_index,\n", - " )\n", - " stats_key_to_input_ids.append(doc_input_ids)\n", - " stats_key_to_reference_ids.append(doc_ref_ids)\n", - "\n", - "# AGGREGATION\n", - "total_input_ids = defaultdict(set)\n", - "total_reference_ids = defaultdict(set)\n", - "\n", - "for d in stats_key_to_input_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_input_ids:\n", - " new_set = total_input_ids[key]\n", - " new_set = new_set.union(d[key])\n", - " total_input_ids[key] = new_set\n", - "\n", - "for d in stats_key_to_reference_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_reference_ids:\n", - " new_set = total_reference_ids[key]\n", - " new_set = total_reference_ids[key].union(d[key])\n", - " total_reference_ids[key] = new_set\n", - "\n", - "all_data_overlap_stats = []\n", - "for stats_key, count in stats_key_counts.items():\n", - " data_overlap_stats = {\n", - " \"data_overlap_stats_key\": None,\n", - " \"num_instances\": count,\n", - " \"instance_ids_with_overlapping_input\": sorted(total_input_ids[stats_key]),\n", - " \"instance_ids_with_overlapping_reference\": sorted(\n", - " total_reference_ids[stats_key]\n", - " ),\n", - " }\n", - " # print(stats_key)\n", - " subject, split, n_str = stats_key.rsplit(\"_\", 2)\n", - " data_overlap_stats[\"data_overlap_stats_key\"] = {\n", - " \"light_scenario_key\": {\"scenario_spec\": subject, \"split\": split},\n", - " \"overlap_protocol_spec\": {\"n\": int(n_str)},\n", - " }\n", - " all_data_overlap_stats.append(data_overlap_stats)\n", - "\n", - "with open(output_path, \"w\") as f:\n", - " f.writelines(\n", - " f\"{json.dumps(data_overlap_stats)}\\n\"\n", - " for data_overlap_stats in all_data_overlap_stats\n", - " )\n", - "print(f\"Written {len(all_data_overlap_stats)} results to {output_path}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /home/teo/OpenMined/PySyft\n" - ] - } - ], - "source": [ - "# syft absolute" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/helm/helm-syft.ipynb b/notebooks/helm/helm-syft.ipynb deleted file mode 100644 index ea88e25b7c2..00000000000 --- a/notebooks/helm/helm-syft.ipynb +++ /dev/null @@ -1,1387 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /Users/koen/workspace/PySyft\n" - ] - } - ], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "from syft import ActionObject\n", - "from syft.types.blob_storage import BlobFile" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Staging Protocol Changes...\n", - "Data Migrated to latest version !!!\n", - "Logged into as \n" - ] - }, - { - "data": { - "text/html": [ - "
SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" - ], - "text/plain": [ - "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"test-domain-helm2\",\n", - " dev_mode=True,\n", - " reset=True,\n", - " n_consumers=4,\n", - " create_producer=True,\n", - ")\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User 'A' successfully registered! To see users, run `[your_client].users`

" - ], - "text/plain": [ - "SyftSuccess: User 'A' successfully registered! To see users, run `[your_client].users`" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.register(name=\"A\", email=\"a@b.org\", password=\"b\", password_verify=\"b\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also run this with seaweed, but then you need to run the seaweed container manually and connect to it:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```bash\n", - "docker run --entrypoint /bin/sh -p 8333:8333 -p 8888:8888 chrislusf/seaweedfs -c \"echo 's3.configure -access_key admin -secret_key admin -user iam -actions Read,Write,List,Tagging,Admin -apply' | weed shell > /dev/null 2>&1 & weed server -s3 -s3.port=8333 -master.volumeSizeLimitMB=2048\"\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# blob_config = BlobStorageConfig(\n", - "# client_type=SeaweedFSClient,\n", - "# client_config=SeaweedFSClientConfig(\n", - "# host=\"http://0.0.0.0\",\n", - "# port=\"8333\",\n", - "# access_key=\"admin\",\n", - "# secret_key=\"admin\",\n", - "# bucket_name=\"test_bucket\",\n", - "# region=\"us-east-1\",\n", - "# ),\n", - "# )" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# node.python_node.init_blob_storage(blob_config)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Inputs" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "# TODO: fix way we send list of files\n", - "scenario_objs = ActionObject.from_obj(\n", - " [BlobFile.upload_from_path(\"scenario_data.jsonl\", client)]\n", - ")\n", - "\n", - "scenario_files_ptr = scenario_objs.send(client)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "input_files = ActionObject.from_obj(\n", - " [BlobFile.upload_from_path(\"short_input.jsonl\", client)]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "input_files_ptr = input_files.send(client)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "input_files_dataset = sy.Dataset(\n", - " name=\"Helm dataset\",\n", - " asset_list=[\n", - " sy.Asset(\n", - " name=\"helm train data\",\n", - " data=input_files_ptr,\n", - " mock=sy.ActionObject.empty(),\n", - " ),\n", - " sy.Asset(\n", - " name=\"helm test data\",\n", - " data=scenario_files_ptr,\n", - " mock=sy.ActionObject.empty(),\n", - " ),\n", - " ],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - " 50%|████████████████████████████████████████████████████▌ | 1/2 [00:00<00:00, 5.61it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Uploading: helm train data\n", - "Uploading: helm test data\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 5.84it/s]\n" - ] - }, - { - "data": { - "text/html": [ - "
SyftSuccess: Dataset uploaded to 'test-domain-helm2'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`

" - ], - "text/plain": [ - "SyftSuccess: Dataset uploaded to 'test-domain-helm2'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.upload_dataset(input_files_dataset)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "input_files_asset = client.datasets[\"Helm dataset\"].assets[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "scenario_files_asset = client.datasets[\"Helm dataset\"].assets[1]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Syft functions" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'compute_document_data_overlap' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'compute_document_data_overlap' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@sy.syft_function()\n", - "def compute_document_data_overlap(domain, scenario_file, input_files, n):\n", - " # stdlib\n", - " from collections import defaultdict\n", - " import json\n", - " import re\n", - " from string import punctuation\n", - "\n", - " # third party\n", - " from nltk import ngrams\n", - "\n", - " print(\"starting overlap computation\")\n", - " r = re.compile(rf\"[\\s{re.escape(punctuation)}]+\")\n", - "\n", - " def create_ngram_index(light_scenarios, n_values, stats_key_counts):\n", - " ngram_index = {n: {} for n in n_values}\n", - " for scenario in light_scenarios:\n", - " for n in n_values:\n", - " stats_key = scenario[\"scenario_key\"] + \"_\" + str(n)\n", - " stats_key_counts[stats_key] = len(scenario[\"instances\"])\n", - " for instance in scenario[\"instances\"]:\n", - " id = instance[\"id\"]\n", - " input_tokens = r.split(instance[\"input\"].lower())\n", - " for input_ngram in ngrams(input_tokens, n):\n", - " if input_ngram not in ngram_index[n]:\n", - " ngram_index[n][input_ngram] = set()\n", - " ngram_index[n][input_ngram].add(\n", - " stats_key + \"+\" + id + \"+\" + \"input\"\n", - " )\n", - "\n", - " # compute reference ngrams\n", - " for reference in instance[\"references\"]:\n", - " reference_unigrams = r.split(reference.lower())\n", - " for reference_ngram in ngrams(reference_unigrams, n):\n", - " if reference_ngram not in ngram_index[n]:\n", - " ngram_index[n][reference_ngram] = set()\n", - " ngram_index[n][reference_ngram].add(\n", - " stats_key + \"+\" + id + \"+\" + \"references\"\n", - " )\n", - " return ngram_index\n", - "\n", - " # # SETUP\n", - " print(\"preparing scenarios and creating indexes\")\n", - " light_scenarios = []\n", - " for light_scenario_json in scenario_file.iter_lines():\n", - " light_scenario_dict: dict = json.loads(light_scenario_json)\n", - "\n", - " light_scenario_key_dict: dict = light_scenario_dict[\"scenario_key\"]\n", - " subject_spec = light_scenario_key_dict[\"scenario_spec\"][\"args\"][\"subject\"]\n", - " light_scenario_key = subject_spec + \"_\" + light_scenario_key_dict[\"split\"]\n", - " light_instances = [\n", - " {\n", - " \"input\": instance_dict[\"input\"],\n", - " \"references\": instance_dict[\"references\"],\n", - " \"id\": instance_dict[\"id\"],\n", - " }\n", - " for instance_dict in light_scenario_dict[\"instances\"]\n", - " ]\n", - " light_scenarios.append(\n", - " {\"scenario_key\": light_scenario_key, \"instances\": light_instances}\n", - " )\n", - "\n", - " stats_key_counts = defaultdict(int)\n", - "\n", - " ngram_index = create_ngram_index(\n", - " light_scenarios=light_scenarios, n_values=[n], stats_key_counts=stats_key_counts\n", - " )\n", - "\n", - " r = re.compile(rf\"[\\s{re.escape(punctuation)}]+\")\n", - " stats_key_to_input_ids = defaultdict(set)\n", - " stats_key_to_reference_ids = defaultdict(set)\n", - " print(\"computing overlap\")\n", - " # stdlib\n", - " from time import sleep\n", - "\n", - " sleep(1)\n", - "\n", - " domain.init_progress(input_files[0].file_size)\n", - "\n", - " for input_file in input_files:\n", - " for bytes_read, line in input_file.iter_lines(progress=True):\n", - " sleep(1)\n", - " document = json.loads(line)[\"text\"]\n", - " document_tokens = r.split(document.lower())\n", - " for n in ngram_index.keys():\n", - " for document_ngram in ngrams(document_tokens, n):\n", - " if document_ngram in ngram_index[n]:\n", - " for entry_overlap_key in ngram_index[n][document_ngram]:\n", - " stats_key, id, part = entry_overlap_key.split(\"+\")\n", - " if part == \"input\":\n", - " stats_key_to_input_ids[stats_key].add(id)\n", - " elif part == \"references\":\n", - " stats_key_to_reference_ids[stats_key].add(id)\n", - " domain.set_progress(bytes_read)\n", - " print(\"Finished overlap computation\")\n", - "\n", - " return stats_key_to_input_ids, stats_key_to_reference_ids, stats_key_counts" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'aggregate' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'aggregate' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@sy.syft_function()\n", - "def aggregate(batch_results):\n", - " # stdlib\n", - " from collections import defaultdict\n", - "\n", - " print(\"Starting aggregation\")\n", - " stats_key_to_input_ids, stats_key_to_reference_ids, stats_key_counts = zip(\n", - " *batch_results\n", - " )\n", - "\n", - " total_input_ids = defaultdict(set)\n", - " total_reference_ids = defaultdict(set)\n", - " total_stats_key_counts = defaultdict(int)\n", - "\n", - " for d in stats_key_counts:\n", - " for key, val in d.items():\n", - " total_stats_key_counts[key] += val\n", - "\n", - " for d in stats_key_to_input_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_input_ids:\n", - " new_set = total_input_ids[key]\n", - " new_set = new_set.union(d[key])\n", - " total_input_ids[key] = new_set\n", - "\n", - " for d in stats_key_to_reference_ids:\n", - " for key in d:\n", - " new_set = set()\n", - " if key in total_reference_ids:\n", - " new_set = total_reference_ids[key]\n", - " new_set = total_reference_ids[key].union(d[key])\n", - " total_reference_ids[key] = new_set\n", - "\n", - " all_data_overlap_stats = []\n", - " for stats_key, count in total_stats_key_counts.items():\n", - " data_overlap_stats = {\n", - " \"data_overlap_stats_key\": None,\n", - " \"num_instances\": count,\n", - " \"instance_ids_with_overlapping_input\": sorted(total_input_ids[stats_key]),\n", - " \"instance_ids_with_overlapping_reference\": sorted(\n", - " total_reference_ids[stats_key]\n", - " ),\n", - " }\n", - " subject, split, n_str = stats_key.rsplit(\"_\", 2)\n", - " data_overlap_stats[\"data_overlap_stats_key\"] = {\n", - " \"light_scenario_key\": {\"scenario_spec\": subject, \"split\": split},\n", - " \"overlap_protocol_spec\": {\"n\": int(n_str)},\n", - " }\n", - " all_data_overlap_stats.append(data_overlap_stats)\n", - " print(\"Finished aggregation\")\n", - " return all_data_overlap_stats" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.submit(compute_document_data_overlap)" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.submit(aggregate)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'main_function' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'main_function' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@sy.syft_function_single_use(\n", - " input_files=input_files_asset, scenario_files=scenario_files_asset\n", - ")\n", - "def main_function(domain, input_files, scenario_files):\n", - " N = [5, 9, 13]\n", - " batch_results = []\n", - " for n in N[:1]:\n", - " for scenario_file in scenario_files:\n", - " batch_job = domain.launch_job(\n", - " compute_document_data_overlap,\n", - " scenario_file=scenario_file,\n", - " input_files=input_files,\n", - " n=n,\n", - " )\n", - " batch_results.append(batch_job.result)\n", - "\n", - " aggregate_job = domain.launch_job(aggregate, batch_results=batch_results)\n", - " print(\"Finished main function\")\n", - " return aggregate_job.result" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Request approved for domain test-domain-helm2\n" - ] - }, - { - "data": { - "text/html": [ - "
SyftSuccess: Request 471b4b92c419465980a9ecceef323bc1 changes applied

" - ], - "text/plain": [ - "SyftSuccess: Request 471b4b92c419465980a9ecceef323bc1 changes applied" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.request_code_execution(main_function)\n", - "client.requests[-1].approve(approve_nested=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], - "source": [ - "job = client.code.main_function(\n", - " input_files=input_files_asset,\n", - " scenario_files=scenario_files_asset,\n", - " blocking=False,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Get results" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "20/12/23 13:41:09 FUNCTION LOG (d95fbe1f54f947ceae2fdb090d2449c8): Finished main function\n" - ] - }, - { - "data": { - "text/markdown": [ - "```python\n", - "class Job:\n", - " id: UID = d95fbe1f54f947ceae2fdb090d2449c8\n", - " status: processing\n", - " has_parent: False\n", - " result: syft.service.action.action_data_empty.ObjectNotReady\n", - " logs:\n", - "\n", - "0 Finished main function\n", - " \n", - "```" - ], - "text/plain": [ - "syft.service.job.job_stash.Job" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

Job List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.service.job.job_stash.Job, syft.service.job.job_stash.Job]" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "20/12/23 13:41:11 FUNCTION LOG (47e67b4fdbe84f54a31684c8cf0917bc): starting overlap computation\n", - "20/12/23 13:41:12 FUNCTION LOG (47e67b4fdbe84f54a31684c8cf0917bc): preparing scenarios and creating indexes\n", - "20/12/23 13:41:12 FUNCTION LOG (47e67b4fdbe84f54a31684c8cf0917bc): computing overlap\n" - ] - } - ], - "source": [ - "job.subjobs" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "20/12/23 13:41:24 FUNCTION LOG (47e67b4fdbe84f54a31684c8cf0917bc): Finished overlap computation\n", - "20/12/23 13:41:27 FUNCTION LOG (9aed743396c84e6fa3ded244132be58b): Starting aggregation\n", - "20/12/23 13:41:27 FUNCTION LOG (9aed743396c84e6fa3ded244132be58b): Finished aggregation\n" - ] - } - ], - "source": [ - "res = job.result.wait().get()" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': 'philosophy',\n", - " 'split': 'train'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 5},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': 'philosophy',\n", - " 'split': 'valid'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': ['id12'],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 34},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': 'philosophy',\n", - " 'split': 'test'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': ['id328'],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 311},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': 'anatomy',\n", - " 'split': 'train'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 5},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': 'anatomy',\n", - " 'split': 'valid'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 14},\n", - " {'data_overlap_stats_key': {'light_scenario_key': {'scenario_spec': 'anatomy',\n", - " 'split': 'test'},\n", - " 'overlap_protocol_spec': {'n': 5}},\n", - " 'instance_ids_with_overlapping_input': [],\n", - " 'instance_ids_with_overlapping_reference': [],\n", - " 'num_instances': 135}]\n" - ] - } - ], - "source": [ - "# stdlib\n", - "from pprint import pprint\n", - "\n", - "pprint(res)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": { - "height": "calc(100% - 180px)", - "left": "10px", - "top": "150px", - "width": "263.219px" - }, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/helm/nested-requests.ipynb b/notebooks/helm/nested-requests.ipynb deleted file mode 100644 index 3e9d4bc416a..00000000000 --- a/notebooks/helm/nested-requests.ipynb +++ /dev/null @@ -1,1420 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /Users/koen/workspace/pysyft\n" - ] - } - ], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "from syft import ActionObject\n", - "from syft import syft_function\n", - "from syft import syft_function_single_use" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Staging Protocol Changes...\n", - "Data Migrated to latest version !!!\n", - "Logged into as \n" - ] - }, - { - "data": { - "text/html": [ - "
SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" - ], - "text/plain": [ - "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"test-domain-helm2\",\n", - " dev_mode=True,\n", - " reset=True,\n", - " n_consumers=3,\n", - " create_producer=True,\n", - " queue_port=3322,\n", - ")\n", - "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "x = ActionObject.from_obj([1, 2])\n", - "x_ptr = x.send(client)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'process_batch' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'process_batch' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@syft_function()\n", - "def process_batch(batch):\n", - " # stdlib\n", - " from time import sleep\n", - "\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - "\n", - " sleep(1)\n", - " print(\"done\")\n", - " return batch + 1" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.submit(process_batch)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'middle_middle_job' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'middle_middle_job' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@syft_function()\n", - "def middle_middle_job(domain, batch):\n", - " # stdlib\n", - " from time import sleep\n", - "\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - "\n", - " sleep(1)\n", - " batch_job = domain.launch_job(process_batch, batch=batch)\n", - " print(\"start leaf job\", batch_job)\n", - " return 2" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.submit(middle_middle_job)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'middle_job' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'middle_job' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@syft_function()\n", - "def middle_job(domain, batch):\n", - " # stdlib\n", - " from time import sleep\n", - "\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - "\n", - " sleep(1)\n", - " batch_job = domain.launch_job(middle_middle_job, batch=batch)\n", - " print(\"start leaf job\", batch_job)\n", - " return 2" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.code.submit(middle_job)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'process_all' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'process_all' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@syft_function_single_use(x=x_ptr)\n", - "def process_all(domain, x):\n", - " jobs = []\n", - " print(\"Launching jobs\")\n", - " for elem in x:\n", - " batch_job = domain.launch_job(middle_job, batch=elem)\n", - " jobs += [batch_job]\n", - " print(\"starting aggregation\")\n", - " print(\"Done\")\n", - " return 1" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - "
\n", - "

Request

\n", - "

Id: 56f76f67cf544edb8894139a92677a83

\n", - "

Request time: 2023-11-24 15:37:14

\n", - " \n", - " \n", - "

Status: RequestStatus.PENDING

\n", - "

Requested on: Test-domain-helm2 of type Domain

\n", - "

Requested by: Jane Doe (info@openmined.org)

\n", - "

Changes: Request to change process_all to permission RequestStatus.APPROVED. Nested Requests not resolved.

\n", - "
\n", - "\n", - " " - ], - "text/markdown": [ - "```python\n", - "class Request:\n", - " id: str = 56f76f67cf544edb8894139a92677a83\n", - " request_time: str = 2023-11-24 15:37:14\n", - " updated_at: str = None\n", - " status: str = RequestStatus.PENDING\n", - " changes: str = ['Request to change process_all to permission RequestStatus.APPROVED. Nested Requests not resolved']\n", - " requesting_user_verify_key: str = 47f8c8f3db3a30695a28e4a51e44916669ac3d111924cb614181c64b2c3b8323\n", - "\n", - "```" - ], - "text/plain": [ - "syft.service.request.request.Request" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "req = client.code.request_code_execution(process_all)\n", - "req" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - "
\n", - "

Request

\n", - "

Id: 56f76f67cf544edb8894139a92677a83

\n", - "

Request time: 2023-11-24 15:37:14

\n", - " \n", - " \n", - "

Status: RequestStatus.PENDING

\n", - "

Requested on: Test-domain-helm2 of type Domain

\n", - "

Requested by: Jane Doe (info@openmined.org)

\n", - "

Changes: Request to change process_all to permission RequestStatus.APPROVED.

This change requests the following nested functions calls:
├──middle_job
├────middle_middle_job
├──────process_batch
.

\n", - "
\n", - "\n", - " " - ], - "text/markdown": [ - "```python\n", - "class Request:\n", - " id: str = 56f76f67cf544edb8894139a92677a83\n", - " request_time: str = 2023-11-24 15:37:14\n", - " updated_at: str = None\n", - " status: str = RequestStatus.PENDING\n", - " changes: str = ['Request to change process_all to permission RequestStatus.APPROVED.

This change requests the following nested functions calls:
├──middle_job
├────middle_middle_job
├──────process_batch
']\n", - " requesting_user_verify_key: str = 47f8c8f3db3a30695a28e4a51e44916669ac3d111924cb614181c64b2c3b8323\n", - "\n", - "```" - ], - "text/plain": [ - "syft.service.request.request.Request" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.requests[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "class UserCode\n", - " id: UID = 67823a91dbcc47c4a131fa1c307f9314\n", - " service_func_name: str = process_all\n", - " shareholders: list = ['test-domain-helm2']\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - "\n", - "@syft_function_single_use(x=x_ptr)\n", - "def process_all(domain, x):\n", - " jobs = []\n", - " print(\"Launching jobs\")\n", - " for elem in x:\n", - " batch_job = domain.launch_job(middle_job, batch=elem)\n", - " jobs += [batch_job]\n", - " print(\"starting aggregation\")\n", - " print(\"Done\")\n", - " return 1\n", - "\n", - "\n", - "\n", - " Nested Requests:\n", - " class UserCode\n", - " id: UID = d9c59810bea0486095944b54db12b609\n", - " service_func_name: str = middle_job\n", - " shareholders: list = []\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - " \n", - " @syft_function()\n", - " def middle_job(domain, batch):\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - " from time import sleep\n", - " sleep(1)\n", - " batch_job = domain.launch_job(middle_middle_job, batch=batch)\n", - " print(\"start leaf job\")\n", - " return 2\n", - " \n", - " \n", - " \n", - " Nested Requests:\n", - " class UserCode\n", - " id: UID = 0b70a83dd73c4a2ea839dd735ad7dfab\n", - " service_func_name: str = middle_middle_job\n", - " shareholders: list = []\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - " \n", - " @syft_function()\n", - " def middle_middle_job(domain, batch):\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - " from time import sleep\n", - " sleep(1)\n", - " batch_job = domain.launch_job(process_batch, batch=batch)\n", - " print(\"start leaf job\")\n", - " return 2\n", - " \n", - " \n", - " \n", - " Nested Requests:\n", - " class UserCode\n", - " id: UID = f4929e1833dd413fb6b5e95d93e77e46\n", - " service_func_name: str = process_batch\n", - " shareholders: list = []\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - " \n", - " @syft_function()\n", - " def process_batch(batch):\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - " from time import sleep\n", - " sleep(1)\n", - " print(\"done\")\n", - " return batch+1\n", - " \n", - "```" - ], - "text/plain": [ - "syft.service.code.user_code.UserCode" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.requests[0].code" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

UserCode List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.service.code.user_code.UserCode,\n", - " syft.service.code.user_code.UserCode,\n", - " syft.service.code.user_code.UserCode,\n", - " syft.service.code.user_code.UserCode]" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.requests[0].codes" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "### Deciding if to approve the requests...." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "class UserCode\n", - " id: UID = 67823a91dbcc47c4a131fa1c307f9314\n", - " service_func_name: str = process_all\n", - " shareholders: list = ['test-domain-helm2']\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - "\n", - "@syft_function_single_use(x=x_ptr)\n", - "def process_all(domain, x):\n", - " jobs = []\n", - " print(\"Launching jobs\")\n", - " for elem in x:\n", - " batch_job = domain.launch_job(middle_job, batch=elem)\n", - " jobs += [batch_job]\n", - " print(\"starting aggregation\")\n", - " print(\"Done\")\n", - " return 1\n", - "\n", - "\n", - "\n", - " Nested Requests:\n", - " class UserCode\n", - " id: UID = d9c59810bea0486095944b54db12b609\n", - " service_func_name: str = middle_job\n", - " shareholders: list = []\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - " \n", - " @syft_function()\n", - " def middle_job(domain, batch):\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - " from time import sleep\n", - " sleep(1)\n", - " batch_job = domain.launch_job(middle_middle_job, batch=batch)\n", - " print(\"start leaf job\")\n", - " return 2\n", - " \n", - " \n", - " \n", - " Nested Requests:\n", - " class UserCode\n", - " id: UID = 0b70a83dd73c4a2ea839dd735ad7dfab\n", - " service_func_name: str = middle_middle_job\n", - " shareholders: list = []\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - " \n", - " @syft_function()\n", - " def middle_middle_job(domain, batch):\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - " from time import sleep\n", - " sleep(1)\n", - " batch_job = domain.launch_job(process_batch, batch=batch)\n", - " print(\"start leaf job\")\n", - " return 2\n", - " \n", - " \n", - " \n", - " Nested Requests:\n", - " class UserCode\n", - " id: UID = f4929e1833dd413fb6b5e95d93e77e46\n", - " service_func_name: str = process_batch\n", - " shareholders: list = []\n", - " status: list = ['Node: test-domain-helm2, Status: pending']\n", - " \n", - " code:\n", - " \n", - " @syft_function()\n", - " def process_batch(batch):\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - " from time import sleep\n", - " sleep(1)\n", - " print(\"done\")\n", - " return batch+1\n", - " \n", - "```" - ], - "text/plain": [ - "syft.service.code.user_code.UserCode" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.requests[-1].code" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Request approved for domain test-domain-helm2\n" - ] - }, - { - "data": { - "text/html": [ - "
SyftSuccess: Request 56f76f67cf544edb8894139a92677a83 changes applied

" - ], - "text/plain": [ - "SyftSuccess: Request 56f76f67cf544edb8894139a92677a83 changes applied" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "client.requests[-1].approve(approve_nested=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], - "source": [ - "job = client.code.process_all(x=x_ptr, blocking=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "class Job:\n", - " id: UID = 69b2ab73f729484083e568d37a337a48\n", - " status: created\n", - " has_parent: False\n", - " result: ActionDataEmpty \n", - " logs:\n", - "\n", - "0 \n", - " \n", - "```" - ], - "text/plain": [ - "syft.service.job.job_stash.Job" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```python\n", - "class Job:\n", - " id: UID = 5bdd231de10046b69acad09e131ac6e8\n", - " status: completed\n", - " has_parent: True\n", - " result: ActionDataEmpty \n", - " logs:\n", - "\n", - "0 starting batch 2\n", - "1 start leaf job\n", - "JOB COMPLETED\n", - " \n", - "```" - ], - "text/plain": [ - "syft.service.job.job_stash.Job" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "24/11/23 17:37:19 FUNCTION LOG (f1aba6d42c3347bda56d0e38ccd45764): starting batch 1\n", - "24/11/23 17:37:20 FUNCTION LOG (29d2a2db42954664bc3e65071997f0c4): starting batch 2\n", - "24/11/23 17:37:21 FUNCTION LOG (f1aba6d42c3347bda56d0e38ccd45764): start leaf job\n", - "24/11/23 17:37:21 FUNCTION LOG (29d2a2db42954664bc3e65071997f0c4): start leaf job\n", - "24/11/23 17:37:22 FUNCTION LOG (ff78ff43033642eea84ddf083638e4b6): starting batch 1\n", - "24/11/23 17:37:23 FUNCTION LOG (33bd71ab5d9248e59d8ad192b6cbc619): starting batch 2\n", - "24/11/23 17:37:23 FUNCTION LOG (ff78ff43033642eea84ddf083638e4b6): done\n", - "24/11/23 17:37:24 FUNCTION LOG (33bd71ab5d9248e59d8ad192b6cbc619): done\n" - ] - } - ], - "source": [ - "job.subjobs[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Wait 30s here" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "[[[3]], [[2]]]" - ], - "text/plain": [ - "[[[3]], [[2]]]" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "[\n", - " [[subjob.result.get() for subjob in job.subjobs] for job in job.subjobs]\n", - " for job in job.subjobs\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/helm/nested-syft-functions.ipynb b/notebooks/helm/nested-syft-functions.ipynb deleted file mode 100644 index 95dd9d10819..00000000000 --- a/notebooks/helm/nested-syft-functions.ipynb +++ /dev/null @@ -1,1062 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "a196017f", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "kj/filesystem-disk-unix.c++:1703: warning: PWD environment variable doesn't match current directory; pwd = /Users/koen/workspace/pysyft\n" - ] - } - ], - "source": [ - "# stdlib\n", - "from time import sleep\n", - "\n", - "# syft absolute\n", - "import syft as sy\n", - "from syft import ActionObject\n", - "from syft import syft_function\n", - "from syft import syft_function_single_use" - ] - }, - { - "cell_type": "markdown", - "id": "cb2d07de", - "metadata": {}, - "source": [ - "with server" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "9b31c627", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Staging Protocol Changes...\n", - "Object in Action Store that needs migration: []\n", - "Data Migrated to latest version !!!\n", - "Logged into as \n" - ] - }, - { - "data": { - "text/html": [ - "
SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" - ], - "text/plain": [ - "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"test-domain-helm2\",\n", - " dev_mode=True,\n", - " reset=True,\n", - " n_consumers=3,\n", - " create_producer=True,\n", - " queue_port=3322,\n", - ")\n", - "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "507740d2", - "metadata": {}, - "outputs": [], - "source": [ - "res = client.register(name=\"a\", email=\"aa@b.org\", password=\"c\", password_verify=\"c\")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0c33d096", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Logged into as \n" - ] - } - ], - "source": [ - "ds_client = node.login(email=\"aa@b.org\", password=\"c\")" - ] - }, - { - "cell_type": "markdown", - "id": "176addfb", - "metadata": {}, - "source": [ - "setup: compute train-test overlap between a very large train set and a smaller test set. Small test is still to big for memory, so we split it into 54 parts. We keep 1 of those parts in memory. We dont keep the train set in memory, but read and compare with 1/54 parts line by line. Each part takes ~30 hours, but we can run 54 processes in parallel." - ] - }, - { - "cell_type": "markdown", - "id": "a0cea81b", - "metadata": {}, - "source": [ - "# Setup syft functions" - ] - }, - { - "cell_type": "markdown", - "id": "da2b114a", - "metadata": {}, - "source": [ - "## Dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "83307a2f", - "metadata": {}, - "outputs": [], - "source": [ - "x = ActionObject.from_obj([1, 2])\n", - "x_ptr = x.send(ds_client)" - ] - }, - { - "cell_type": "markdown", - "id": "31bbb3ff", - "metadata": {}, - "source": [ - "## Batch function" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "5d2fd248", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'process_batch' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'process_batch' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@syft_function()\n", - "def process_batch(batch):\n", - " # stdlib\n", - " from time import sleep\n", - "\n", - " # takes 30 hours normally\n", - " print(f\"starting batch {batch}\")\n", - "\n", - " sleep(1)\n", - " print(\"done\")\n", - " return batch + 1" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "9ba22655", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: User Code Submitted

" - ], - "text/plain": [ - "SyftSuccess: User Code Submitted" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "ds_client.code.submit(process_batch)" - ] - }, - { - "cell_type": "markdown", - "id": "01319f1f", - "metadata": {}, - "source": [ - "## Main function" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "ca1b95ee", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
SyftSuccess: Syft function 'process_all' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`.

" - ], - "text/plain": [ - "SyftSuccess: Syft function 'process_all' successfully created. To add a code request, please create a project using `project = syft.Project(...)`, then use command `project.create_code_request`." - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "@syft_function_single_use(x=x_ptr)\n", - "def process_all(domain, x):\n", - " jobs = []\n", - " print(\"Launching jobs\")\n", - " for elem in x:\n", - " # We inject a domain object in the scope\n", - " batch_job = domain.launch_job(process_batch, batch=elem)\n", - " jobs += [batch_job]\n", - " print(\"starting aggregation\")\n", - " print(\"Done\")\n", - " return None" - ] - }, - { - "cell_type": "markdown", - "id": "1e77c5db", - "metadata": {}, - "source": [ - "# Approve & run" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "0ab572f9", - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Request approved for domain test-domain-helm2\n" - ] - }, - { - "data": { - "text/html": [ - "
SyftSuccess: Request 49e1d1db7a08471287ace4aa89d879bb changes applied

" - ], - "text/plain": [ - "SyftSuccess: Request 49e1d1db7a08471287ace4aa89d879bb changes applied" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "r = ds_client.code.request_code_execution(process_all)\n", - "client.requests[-1].approve()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "375ed965", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "23/11/23 12:19:52 FUNCTION LOG (fc9e65a57be843faba46743fa2fe6fd8): Launching jobs\n", - "23/11/23 12:19:53 FUNCTION LOG (fc9e65a57be843faba46743fa2fe6fd8): starting aggregation\n", - "23/11/23 12:19:53 FUNCTION LOG (fc9e65a57be843faba46743fa2fe6fd8): Done\n", - "23/11/23 12:19:53 FUNCTION LOG (21f09099b7ac43ebbf6006e12eab8d81): starting batch 1\n", - "23/11/23 12:19:53 FUNCTION LOG (aab2c84e168d4c8fbb560c0b72ce8a27): starting batch 2\n", - "23/11/23 12:19:54 FUNCTION LOG (21f09099b7ac43ebbf6006e12eab8d81): done\n", - "23/11/23 12:19:54 FUNCTION LOG (aab2c84e168d4c8fbb560c0b72ce8a27): done\n" - ] - } - ], - "source": [ - "job = ds_client.code.process_all(x=x_ptr, blocking=False)\n", - "sleep(5)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "2db04ddd", - "metadata": {}, - "outputs": [], - "source": [ - "# job.subjobs[0].logs()" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "c3d71844", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "
\n", - "
\n", - "
\n", - "

Job List

\n", - "
\n", - "\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - " \n", - "
\n", - "\n", - "

0

\n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - "
\n", - "
\n", - " \n", - " \n" - ], - "text/plain": [ - "[syft.service.job.job_stash.Job, syft.service.job.job_stash.Job]" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.subjobs" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "7d8a2f95", - "metadata": {}, - "outputs": [], - "source": [ - "# client.jobs[0].subjobs[0].logs()" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "cc0db669", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Pointer:\n", - "None" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "job.wait()" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "5bf0974f", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "5" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sum([j.wait().get() for j in job.subjobs])" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "948d9162", - "metadata": {}, - "outputs": [], - "source": [ - "node.land()" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "e61760f5", - "metadata": {}, - "outputs": [], - "source": [ - "# stdlib" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/helm/short_input.jsonl b/notebooks/helm/short_input.jsonl deleted file mode 100644 index 6d1a280b70e..00000000000 --- a/notebooks/helm/short_input.jsonl +++ /dev/null @@ -1,10 +0,0 @@ -{"text":"\nChina Deserves Donald Trump - rm2889\nhttps:\/\/www.nytimes.com\/2019\/05\/21\/opinion\/china-trump-trade.html\n======\nNotPaidToPost\n> so he\u2019d be wise to curb his nationalistic \u201cno-one-tells-China-what-to-do\u201d\n> bluster\n\nThis comment highlights both ignorance of Chinese history and continuing\nAmerican arrogance.\n\nChina has been painfully dictated what to do during the last 200 years. This\nhas had a profound effect on the country and has led to the collapse of\nimperial rule and the drive to 'rejuvenate' the country (to use the official\nterm in China).\n\nThis is also arrogant because it suggests that China should be told what to do\ncoming from THE country (the USA) that really is the archetype of \"no-one\ntells us what to do\".\n\nI would quip that one of the US's issues with China is that China is not told\nwhat to do and is too big to be easily coerced. A bit of a rude awakening for\nthe US...\n\n> Huawei then uses ... its rising global market dominance to set the next\n> generation of global 5G telecom standards around its own technologies, not\n> those of Qualcomm or Sweden\u2019s Ericsson.\n\nWhich is exactly what Qualcomm did for 3G. Don't hate the player, hate the\ngame.\n\n~~~\nFjolsvith\n>> so he\u2019d be wise to curb his nationalistic \u201cno-one-tells-China-what-to-do\u201d\nbluster >This comment highlights both ignorance of Chinese history and\ncontinuing American arrogance.\n\n>China has been painfully dictated what to do during the last 200 years. This\nhas had a profound effect on the country and has led to the collapse of\nimperial rule and the drive to 'rejuvenate' the country (to use the official\nterm in China).\n\nI disagree. China has been given some unfair advantages in order to help it\nbuild its economy over the last 40 years. Instead of growing up and becoming\nan adult, they've become the playground bully with their IP theft and closed\nmarket.\n\n>This is also arrogant because it suggests that China should be told what to\ndo coming from THE country (the USA) that really is the archetype of \"no-one\ntells us what to do\".\n\nIf China doesn't figure out the game real fast, they're going to lose it. And\nto do that, they really need to do what people are telling them they should\ndo.\n\n~~~\nNotPaidToPost\n> I disagree\n\nI should point that the part of my comment you quoted expressed the historical\nreality, not an opinion.\n\n~~~\nFjolsvith\nI still disagree.\n\n~~~\nNotPaidToPost\nThe good thing with disagreeing with reality is that reality does not care.\n\n------\ncfarm\nThis article makes a good point about \"cheating\". I personally don't like that\nword here, but by blocking other companies like Amazon, Google, FB, etc from\nentering in China, then copying those companies and selling the products to\nthe rest of the world, this presents a problem for trade fairness.\n\n","meta":"{'id': '19979654'}"} -{"text":"\nHow to Be an Effective CEO - terpua\nhttp:\/\/www.readwriteweb.com\/readwritestart\/2009\/07\/how-to-be-an-effective-ceo.php\n======\npclark\nloved this line: \"Core is what you have to do really well and do in-house.\nEverything else you can and should outsource\"\n\n","meta":"{'id': '685596'}"} -{"text":"\nA Survey of Deep Learning for Scientific Discovery - alokrai\nhttps:\/\/arxiv.org\/abs\/2003.11755\n======\nantipaul\nIn a survey on \"scientific discovery\", I would have expected more examples\nthan face and image recognition and natural language processing, which are so\nstale at this point.\n\nHealthcare? Physics? Chemistry? Biology? Sociology?\n\n~~~\nanthony_doan\nThe rest you listed require inference and causality.\n\nDeep learning does not do this.\n\nData with less noises are what most deep learning and non statistical models\ndoes well. Meaning that image, nlp, etc.. deep learning does well. But data\nwith lots noises\/uncertainty\/variance or even data that isn't large enough,\nsuch as time series, currently statistical models are still king\n([https:\/\/en.wikipedia.org\/wiki\/Makridakis_Competitions](https:\/\/en.wikipedia.org\/wiki\/Makridakis_Competitions)).\n\nEven with healthcare you're answering a question\/ hypothesis. This is where\nstatistical models strength lies because all statistical models are hypothesis\ntests and vice versa. There are very little opportunity in healthcare where\nyou would use deep learning compare to statistic. I've seen NLP can be of use\nbut the majority of work in healthcare are inference\/casuality base (this is\nwhy they use propensity model so much). I'm in this space public healthcare.\n\n~~~\np1esk\nInteresting you mentioned Makridakis competitions. There's one going on right\nnow on Kaggle, and the current leader believes a NN will be the winning model:\n[https:\/\/www.kaggle.com\/c\/m5-forecasting-\naccuracy\/discussion\/...](https:\/\/www.kaggle.com\/c\/m5-forecasting-\naccuracy\/discussion\/138881)\n\nMore generally, it seems that time series forecasting so far has mostly\nattracted statisticians with little DL experience [1]. Now that there is $50k\nprize, this will be a good test of whether statistical methods are \"still\nking\". If I were to enter this field, I'd probably look into latest\ntransformer based models, especially the ones used to model raw audio data,\ne.g. [2].\n\nThere's also a real possibility that whenever any strong forecasting method is\ndeveloped (DL based or otherwise) it's not published as the developers simply\nuse it to make money (betting, stock market, etc).\n\n[1]\n[https:\/\/journals.plos.org\/plosone\/article?id=10.1371\/journal...](https:\/\/journals.plos.org\/plosone\/article?id=10.1371\/journal.pone.0194889)\n\n[2] [https:\/\/arxiv.org\/abs\/1904.10509](https:\/\/arxiv.org\/abs\/1904.10509)\n\n~~~\nanthony_doan\nI'll wait to see the result at the end of the competition.\n\nThis is just one of the two competitions for m5. The other one is uncertainty.\n\n------\njefft255\nEric Schmidt, as in Google's ex-CEO, is the second author of this paper! I\ndidn't know he did any scientific research.\n\n~~~\nhervature\nHe has a PhD, unlike Brin and Page.\n\n~~~\njefft255\nRight, but they both were Ph.D. students and Brin I think published quite a\nbit of scientific papers before dropping out.\n\n------\nwswin\nfor the moment I thought it was from 2003\n\n------\nthrowqwerty\nLooks like a good summary. Will read. But at the rate the discipline moves I\nfeel like we need one of these every couple of months for everyone (not just\n\"lay\" scientists). Anyone know a good journal or something that produces a\nsimilar sort of survey frequently? Like once a quarter?\n\n~~~\nssivark\n\u201cRate at which the discipline moves\u201d is mostly churn, not progress. Important\ninsights come at a slower rate \u2014 at the speed of human understanding, not at\nthe speed of conference papers. Good papers from even decades ago are likely\nto still be useful \u2014 in fact, they will have the key ideas presented simply\nand clearly, without much jargon or hype. Yes, deep learning practice moves\nquite fast these days, but that\u2019s just the veneer on top of those deeper\nideas, trying out tweaks and variations. That\u2019s not completely an indictment\nof deep learning, rather, any nascent field has a lot of confusing bustle.\n\n------\nbiomodel\nAlways wonder who these kinds of reviews \/ surveys are for? Nobody is going to\nlearn machine learning by reading a 50 page pdf. Meanwhile, people that have\nexperience will have a hard time finding the info they don't already know.\n\nOpinionated & narrow >> Shallow & comprehensive\n\n~~~\nmistrial9\nI will read it, to defend my non-DeepLearning choices for supervised ML .. so\nmany on the bandwagon for unsupervised CNN with their GPUs\n\n~~~\nmistrial9\nI am misunderstood here.. it means, for the purposes that are appropriate, use\na disciplined, supervised model.. and know the strengths and weakness' of the\nCNN models.. yes, some reaction to the hype of CNN..\n\n","meta":"{'id': '22705028'}"} -{"text":"\n\nSimple distributed computation with Clojure and nREPL - dj-wonk\nhttps:\/\/github.com\/bluemont\/kuzu\n\n======\ndj-wonk\nSome comments, which may be obvious:\n\n* I have not tested Kuzu in production yet.\n\n* It has a tiny fraction of the power of real projects like Hadoop, Storm, Spark, and so on.\n\n* I created it because I wanted a simple way to run maps, reduces, and filters over many machines without making big changes to my original Clojure code. For this simple use case, many distributed computation systems seem quite complicated, so I thought it would be fun to write something extremely simple.\n\n* I was inspired by how PigPen [https:\/\/github.com\/Netflix\/PigPen](https:\/\/github.com\/Netflix\/PigPen) offers a very natural interface to Clojure. I wondered if that could work on top of something simpler than Hadoop + Pig.\n\n* This isn't the first time something like this has been done. See net-eval for example [http:\/\/nakkaya.com\/2010\/02\/16\/net-eval-dead-simple-distribut...](http:\/\/nakkaya.com\/2010\/02\/16\/net-eval-dead-simple-distributed-computing-for-clojure\/).\n\n* Kuzu may or may not be useful to you, but I'd be interested in any and all commentary.\n\n","meta":"{'id': '7829473'}"} -{"text":"\n\nColor CEO explains how\/why they raised $41M - jasonmcalacanis\nhttp:\/\/www.youtube.com\/watch?v=_WGdwY6h5JI&list=SL#\n\n======\narepb\nAfter being prepared to hate this company, it's actually hard not to love this\nguy. The idea is really interesting, too. \"The implied social network will\nlead to better behavior\" -- interesting thought.\n\n","meta":"{'id': '2384827'}"} -{"text":"\n\nAnyone know of a site where the crowd votes to make someone famous every day? - amichail\n\nOr some variation on that theme?\n======\nanamax\n\"Make someone famous\" is more than \"a person becomes known by lots of other\npeople\". There's also some persistence.\n\nYes, a site that has lots of visitors can make them aware of a given person,\nperhaps even once a day, but what do said visitors get out of it and why will\nthey remember last week's \"new star\"? Note that \"make aware\" happens before\nmass voting. Mass voting can only choose between exposed folks.\n\nA small group can vote to expose someone to a larger group, but that assumes a\nsolution to a fairly hard problem - why does the larger group delegate their\nattention selection to said small group?\n\n------\nkqr2\nWould it work like a lottery?\n\n","meta":"{'id': '453484'}"} -{"text":"\nAsk HN: What would work well in a country built on the Unix Philosophy? - Numberwang\nIt seems to me counties end up with rules and institutions with increasingly less chance of improvement yet a accumulated complexity.

Would the Unix Philosophy when applied to country building help with this problem?\n======\nsova\nIf all legislation followed the model presented by git (versioning,\nincrements, branches, merges, and total transparency) I think that it would\nreflect positively on a true democracy.\n\nThe next step, however, would be to educate the populous so that all voters\nwere informed, and that voters would be presented (in an elegant fashion) with\nwhat is relevant to their districts on the three tiers of national, state, and\nlocal policy. I don't know if Unix has a good metaphor or reflection of this,\nbut unix is meant to be a) modular and b) minimalist, so if we can sponsor the\nidea of true modularity in voting, I think we could see some full-\nparticipation schemes that are not overwhelming. I don't have to vote on every\nissue, but could vote on collections of issues that reflect my general\nideology or current understanding of what best suits the republic.\n\nAnother issue though, is ownership. In the Feudalistic Republic of the United\nStates (as of 2016) it's hard to describe a system that could be adopted\nreasonably that promotes the idea that all the nation belongs to everyone in\nit. We have some things like \"the right to life, liberty, and property [often\nmisquoted as 'happiness' at the end here]\" and how does one reconcile this\nidea of property with a truly harmonious community? Good question.\n\nSo in short, the basis of the Unix philosophy would help (especially with law\nversioning, that is just what needs to happen and is so brilliant and clear I\nam surprised there is not greater traction for it). All Laws need time limits\n(and easy renew options if they are good)... And the entire populous needs\nhigher quality information that [forces?] causes people to consider the\ncommunity at large.\n\n\/rattle like a snake\n\n~~~\noftenwrong\nI have been quietly advocating for version controlled legislation for a long\ntime. Here in Massachusetts, bills describe what they would change in the text\nof a statute directly - a bit like an ed script. Here's an actual example:\n\n>SECTION 2. Said section 35 of said chapter 123, as so appearing, is hereby\nfurther amended by striking out the words \u201cis an alcoholic or substance\nabuser\u201d, in lines 17 and 18, and inserting in place thereof the following\nwords:- has an alcohol or substance use disorder.\n\n>SECTION 3. Said section 35 of said chapter 123, as so appearing, is hereby\nfurther amended by inserting after the word \u201ca\u201d, in line 36, the third time it\nappears, the following word:- qualified.\n\n>SECTION 4. Said section 35 of said chapter 123, as so appearing, is hereby\nfurther amended by striking out the fourth and fifth paragraphs and inserting\nin place thereof the following 3 paragraphs:-\n\nAs someone who attempts to keep informed about changes to the law, this style\nis a huge obstacle. It often necessitates a lot of manual piecing-together in\norder to form a complete view of the final change. A simple diff view would\nmake it much easier to understand.\n\nI have considered tracking changes to the law in git, including representing\nbills as branches, as a side project idea, but I determined it would require\nfar more effort than I am willing to put in.\n\n~~~\nsova\nWow sir. That is really a great list of examples! It actually seems [very\nvery] feasible to make a simple system that could automate this based on the\nlanguage used. It would be a worthwhile endeavor, but like you say, would take\na lot of time\/effort investment.\n\nPerhaps an open-source effort that does this (tracks and updates current laws\nand shows diffs) could be a worthwhile beginning?\n\nI think every senator and representative that has ever had to amend\nlegislation would delight at the thought.\n\n~~~\noftenwrong\nI was only considering doing it manually. I don't know how feasible it would\nbe to automate the conversion process. The formatting and language used in\nthese \"edit script\"-style bills varies considerably, as they are written for\nhumans by humans with no standardisation.\n\n~~~\nsova\nHonestly, this seems like one of the more realistic problems NLP could\nactually solve. Yes there may be many variants, say 100 or even 1000 different\nstructures and vocabularies for updating versions, but a differential neural\nnetwork where you have inputs (like the pre-amended law) and outputs (like the\nlaws after the \"amendments\" or version bumps) would actually be perfect for\nlearning what means what and when to do it.\n\nIt would be the perfect grad project for someone interesting in bridging the\ngap between computation\/machine learning and legislation.\n\nOf course, it would be a little tedious setting up the learning (thousands of\nsets of input cases and output cases) but in the end the findings could be\nused across the board.\n\n------\narkitaip\nHow do you define The Unix Philosophy as applied to a country? Software\ndoesn't come close to the complexity of an entire country so your analogy\ncould possibly be fundamentally mismatched...\n\n~~~\nNumberwang\nWell I believe that fundamentally the complexity of a country is to a large\nextent historical artifacts.\n\nHow do you think the relations between institutions would be different, how do\nyou think they would perform their functions differently? What would their\nstructures be?\n\nOr focus on some specific example -How would voting be different? -How would\nregistering for a licence be different? -How would taxation be different?\n\n~~~\nsova\nI think with taxation we could also do very cool things: Say your nation taxes\nat 30%, what if every voter had a subset of that value (say, 12.2%) that they\ncould choose which district or set-of-needs to fund?\n\nLike maybe I want my 12.2 to go to education for kids 6mnths-12years, or maybe\nI want to fund state medicines, and my neighbor and I both pay the base rate\nthat covers necessities like roads and stuff, but he may fund shelters instead\nof medicine specifically with his 12point2. It could be really wonderful.\n\nIn effect, people may become more participatory in their own governing\nsystems, and could actually direct funds instead of relying on bill-makers to\nfigure out where to spend monies\/resources.\n\n------\nangersock\nThe plumbing, presumably.\n\n~~~\nbbcbasic\nThe sewerage and garbage can be piped into \/dev\/null\n\n------\noftenwrong\nNational PKI. Every citizen would have a key pair. I believe I have read that\nEstonia has implemented this.\n\n~~~\nNumberwang\nWhat could it be used for?\n\n~~~\nsova\nVoting! And easily verifying a) your vote was\/is counted and b) is accurate\nfor what issues\/candidates you voted for. In fact, we could eliminate most\ncandidates because they are only there to \"represent\" the wills\/intentions of\ntheir constituents. Gloabl PKI pairings for voting would eliminate the need\nfor a lot of \"representatives\" and we could do more direct forms of democracy\ninstead!\n\n","meta":"{'id': '12585834'}"} -{"text":"\nNumber of Users on Each Plan - danw\nhttp:\/\/www.barenakedapp.com\/dropsend\/number-of-users-on-each-plan\n======\njwecker\nnice post. In my experience also the lowest paying accounts are the most\ndifficult to maintain- out of proportion certainly to the revenue they bring\nin. However, one thing it didn't mention here is to remember not to discount\nmarket share. In lots of apps the higher subscription plans will only be\nupsales- no one will jump straight into the business account, for example. And\nin some cases your low paying accounts are doing a lot of evangelizing for\nyour product, or not using a competitors product, etc. Keep it balanced, for\nsure, but get lots of users.\n\n","meta":"{'id': '3714'}"} -{"text":"Ask HN: What are well designed SaaS websites? - piu\n======\nksec\nStripe.com\n\nSimple, Consistent, fast , effective.\n\nThere are many other listed here as well. They mostly follows the same layout\nand pattern. What separate them are wording and graphics. Using simple words,\nand shortest sentence possible to describe your SaaS, and choice of graphics,\nwhich really is a matter of personal taste.\n\nI think Stripe manage to do this very well.\n\nOff Topic: Did Stripe ever talk about their Ruby Stack?\n\n~~~\nsimlevesque\nOn Stripe I really like that you can see the logs of every API call you've\never made with the request headers and body and response body... It makes\nworking with it much easier than Braintree.\n\n~~~\nMandatum\nDoes Stripe write about how they handle storing those requests\/responses?\nSeems like this could get very expensive, very quickly.\n\n------\nkenning\nI think turbotax has a pretty phenomenal interface if you're in the bracket of\npeople with really simple taxes. Two and three years ago, my taxes took me\nabout an hour.\n\nDepending on what you're looking for, you may also be interested in aping\ntheir freemium model, where the first time you use the service is free and\nsets you up quite well to reuse the service next year and pay $40 for one of\ntheir obnoxious services. As a customer it was quite frustrating but it\nsucceeded in getting me to pay $40 the second year, and had I not gone far out\nof my way to remove the \"plus\" and \"premium\" features I would have ended up\npaying ~$100 the first year and $140 total the second.\n\nThe third year I switched to a competitor and got to use their service for\nfree. In a way, using turbotax felt like a great UX mixed with a battle to\nread everything extremely carefully and retread my steps to avoid paying\nanything; to me, this is not all that morally reprehensible because it\nadversely affects people who don't value their money as much as their time.\nHowever it also seemed predatory in that a non-tech-savvy user such as my\nparents would likely be tricked into paying higher costs for essentially no\nadded value.\n\n~~~\ntootie\nThey have a really solid approach and keeping each step really straightforward\nand discrete to avoid overwhelming you with too much to think about at once.\nIt still fails really hard when you get to anything outside their flow. I had\nto spend time googling the awkward set of steps needed to deduct mortgage\ninterest. Ultimately, it wasn't hard, but it wasn't at all obvious how to do\nit.\n\n~~~\nbeamatronic\nUm... maybe you had a strange situation but usually for mortgage interest on\nyour home , your lender sends you a form with essentially 1 number on it and\nyou just enter this form into TurboTax when it asks you.\n\n------\nbjterry\nIt seems the question is ambiguous. Everyone is responding with the marketing\nwebsites of SaaS compnaies, but I interpreted it as asking for well-designed\ninternal interfaces of SaaS websites. Would love to see examples of that which\npeople think are particularly great. Personally I've always found Gusto and\nBasecamp to have very good interfaces. Stripe's internal interface (which\nothers have mentioned for their public site) gets the job done but I would\nhardly call it great.\n\n------\nphilip1209\nSome of my favorites:\n\n[https:\/\/mailchimp.com](https:\/\/mailchimp.com)\n\n[https:\/\/transitapp.com\/](https:\/\/transitapp.com\/)\n\n[https:\/\/www.intercom.com\/](https:\/\/www.intercom.com\/)\n\n[https:\/\/lattice.com\/](https:\/\/lattice.com\/)\n\nI'm fond of what we have built:\n[https:\/\/www.moonlightwork.com](https:\/\/www.moonlightwork.com)\n\n~~~\nwhitepoplar\nHey, curious about your experience with Mailchimp. I've noticed that people\nseem to either love it or hate it. What do you think they do well? Where do\nthey fall short? (if at all)\n\n~~~\njonathan-kosgei\nI hate mailchimp and prefer to use tinyletter.com. I can't talk enough about\nhow much I love tinyletter!\n\n~~~\nflaviocopes\nTinyLetter is amazing. Simple, easy to use, just does what you need without\ntemplates, campaigns and other stuff that gets in the way between you and\nsubscribers receiving an update from you.\n\n------\nanacleto\nNeedless to say Stripe.com \\-\n[https:\/\/www.plainflow.com\/](https:\/\/www.plainflow.com\/) \\-\n[https:\/\/sentry.io](https:\/\/sentry.io) \\-\n[https:\/\/slack.com](https:\/\/slack.com) \\-\n[https:\/\/figma.com](https:\/\/figma.com) \\-\n[https:\/\/basecamp.com](https:\/\/basecamp.com)\n\n~~~\nphilfrasty\nI found Slack rather poor in explaining what they are doing. This text is\nbasically their entire landing page.\n\n\"When your team needs to kick off a project, hire a new employee, deploy some\ncode, review a sales contract, finalize next year's budget, measure an A\/B\ntest, plan your next office opening, and more, Slack has you covered.\"\n\nDo they offer A\/B testing? HR tools? Code deployment? Who would have guessed\nit is chat.\n\nTheir \/features page does a better job: \"It simplifies communication. Slack\nbrings all your team's communication together, giving everyone a shared\nworkspace where conversations are organized and accessible.\"\n\n~~~\nanacleto\nTrue.\n\nBut that's actually a common trend. When the company's brand gets bigger and\nstronger in people's mind, company position slowly switches from\n\n1 Product attributes\n\n2\\. Product benefits\n\n3\\. Emotional benefits\n\n4\\. Something bigger\n\nThis applies well to every type of product. SaaS included.\n\nThis is a great essay on the topic:\n[https:\/\/medium.com\/speroventures\/branding-for-\nbuilders-19e10...](https:\/\/medium.com\/speroventures\/branding-for-\nbuilders-19e103ef3f1d)\n\n------\nlwansbrough\nI was wondering the same thing the other day: looking for inspiration but also\nexperienced recommendations and UI patterns. Found this with a quick Google (I\nhave no affiliation): [https:\/\/blog.chartmogul.com\/saas-landing-\npages](https:\/\/blog.chartmogul.com\/saas-landing-pages)\n\nAlso I found Pinterest to be a good resource for finding designs (more so than\nDribbble, Behance, etc. surprisingly.)\n\n------\nspking\n[https:\/\/baremetrics.com](https:\/\/baremetrics.com)\n\n[https:\/\/sendgrid.com](https:\/\/sendgrid.com)\n\n[https:\/\/www.drift.com](https:\/\/www.drift.com)\n\n[https:\/\/lookback.io](https:\/\/lookback.io)\n\n[https:\/\/reply.io](https:\/\/reply.io)\n\n~~~\nbriandear\nBaremetrics for sure. Really effective \u2014 the dashboard gives you all the\nimportant data quickly and then you can easily drill down. I use their product\nseveral times a day and it\u2019s the best interface of all of the many services I\nuse.\n\n------\njonathanbull\n[https:\/\/lookatthatsaas.com](https:\/\/lookatthatsaas.com) is a good resource\nfor inspiration.\n\n------\nqstearns\n[https:\/\/segment.com\/](https:\/\/segment.com\/) seems to take design really\nseriously. They also have a pretty nice React toolkit here:\n[https:\/\/segmentio.github.io\/evergreen\/?selectedKind=alert&se...](https:\/\/segmentio.github.io\/evergreen\/?selectedKind=alert&selectedStory=Alert&full=0&down=0&left=1&panelRight=0&downPanel=storybook%2Factions%2Factions-\npanel)\n\n------\ntschellenbach\nI frequently compare [https:\/\/getstream.io\/](https:\/\/getstream.io\/) with\n[http:\/\/stripe.com\/](http:\/\/stripe.com\/),\n[https:\/\/www.mapbox.com\/](https:\/\/www.mapbox.com\/), sendbird.com, algolia.com,\npusher.com and [https:\/\/layer.com\/](https:\/\/layer.com\/)\n\n------\nruairidhwm\n[https:\/\/stripe.com](https:\/\/stripe.com) \\- It's beautiful but conveys all the\ninformation that you need quickly. It also has excellent copy.\n\n[https:\/\/canny.io](https:\/\/canny.io) \\- Very crisp design and it conveys the\nuse case really well.\n\n[https:\/\/baremetrics.com](https:\/\/baremetrics.com) \\- This has come such a\nlong way and has stunning design.\n\n------\nigorv\nNot really a SaaS website, but I really dig this\n[https:\/\/district0x.io\/](https:\/\/district0x.io\/)\n\n~~~\n2bitencryption\nI'm not too fond of that \"stay up to date\" modal, which tries to mimic system\nnative UI.\n\n------\nwhitepoplar\n[https:\/\/dnsimple.com](https:\/\/dnsimple.com)\n\n[https:\/\/basecamp.com](https:\/\/basecamp.com)\n\n[https:\/\/sentry.io](https:\/\/sentry.io)\n\n[https:\/\/semaphoreci.com](https:\/\/semaphoreci.com)\n\n[https:\/\/instapaper.com](https:\/\/instapaper.com)\n\nOld Heroku :-(\n\n~~~\njohnhenry\nWhat's changed about Heroku that's made you unhappy? (Not an employee, just\ncurious).\n\n~~~\nwhitepoplar\nTake this copywriting, for example:\n\n2011: \"Forget Servers - Get up and running in minutes, and deploy instantly\nwith git. Focus 100% on your code, and never think about servers, instances,\nor VMs again.\"\n\n2018: \"Deploy and run apps on today's most innovative Platform as a Service -\nHeroku is a cloud platform based on a managed container system, with\nintegrated data services and a powerful ecosystem, for deploying and running\nmodern apps. The Heroku developer experience is an app-centric approach for\nsoftware delivery, integrated with today\u2019s most popular developer tools and\nworkflows.\"\n\nWhich is better?\n\n~~~\nHeyLaughingBoy\n2018: it describes what they do in a much clearer way.\n\n------\nsimantel\nFor lots of examples, check out\n[https:\/\/www.pages.xyz\/](https:\/\/www.pages.xyz\/)\n\n------\nCommanderData\nI recently came across toggl for time tracking and reporting.\n\nToggl - Time tracking -\n[https:\/\/toggl.com\/pricing\/](https:\/\/toggl.com\/pricing\/)\n\nTheir pricing page is one of a nicest I've seen, really easy to grasp but also\nfunctional eye candy.\n\nI even hoped it was a WP template so I could customize one myself.\n\n------\nleonroy\nClubhouse is an excellent example of site and web app and their approach and\nintegration with both has clearly had a LOT of thought put into it:\n[https:\/\/clubhouse.io](https:\/\/clubhouse.io)\n\nProbably use it more than any other SaaS and am glad it\u2019s so good.\n\n------\ndeadcoder0904\nFind some great Inspiration at [https:\/\/hyperpixel.io](https:\/\/hyperpixel.io)\n\n------\noferzelig\n[https:\/\/omnystudio.com\/](https:\/\/omnystudio.com\/)\n\n------\ncyberferret\nCan I chime in here, not with a link to any specific site, but just as a call\nout to patterns that I am seeing recently.\n\nA lot of sites now have lists and content that updates automatically as things\nhappen on the back end. One good example is Intercom. I have their screen open\n24x7 on the first tab of my browser so I can monitor users on our site. I love\nhow it updates the 'time last seen' dynamically, and I usually have my\ncustomer list sorted by the 'time last seen' field.\n\nBut sometimes, while the content of the list fields are updated in real time,\nthe sorting of the list is not, and the list goes out of order (i.e. customers\nwho re-login recently are still shown lower down in the list that customers\nwho logged in an hour ago even though the 'last login time' is more recent.\n\nI wish there was a way in these instances to just refresh the list within the\npage, without doing an entire browser page refresh, which could take up to 10\nseconds in the old Intercom UX. Also, while talking about Intercom, jumping\nbetween the Customers page and the Conversations page could also take anything\nfrom 5 to 10 seconds on my browser, and there was NO indication that anything\nwas happening in the meantime, which increased confusion and frustration. I\nthink we need to bring back the hourglass or some other 'waiting' indicator\nfor transitions that take a while.\n\n(NB: The new Intercom UX has improved on the waiting delay significantly, but\nnot the sort ordering of the customer list).\n\nSomeone also mentioned the Stripe design (of their back end, not their\nmarketing site). I tend to like the new design of their admin panel, however\ntheir menu hierarchy was a little confusing, making it hard to find things a\nlot of the time. Also, the redesign tends to break the 'back button' behaviour\na lot. I tend to spend a lot of my time on the Stripe admin panel looking at\nwebhook logs etc., and every time I bring up the log listing, then drill down\nto an entry I can't seem to go 'back' to the list easily without the system\nrebuilding the entire list each time. Makes it frustratingly slow to try and\nfind the exact log entry I want when I have to spend so much time waiting for\npage refreshes.\n\nIn summary, I think we need to go back to these 'old fashioned' design\nconstructs which aren't considered \"trendy\" any more:\n\n* Give the user some sort of 'waiting' indicator if a page redraw is going to take time.\n\n* If a list on your page refreshes in the background, and your user can sort the list, make sure you update the sort order as well as the content\n\n* Don't break the back button behaviour if you can help it.\n\n------\njacobwg\n[https:\/\/webflow.com\/](https:\/\/webflow.com\/) \\- complex product, but the\nmarketing site makes it clear and understandable\n\n------\nhartator\nShameless plug, I kind of like the work we did on our own SaaS website:\n[https:\/\/serpapi.com](https:\/\/serpapi.com)\n\n------\nvelp\npitchbook.com\n\nInteresting mix of content and product information. I like how it's laid out\nas well\n\n------\nMojah\nMy favorites:\n\n\\- stripe.com\n\n\\- ohdearapp.com\n\nSimple, to the point & clean layouts.\n\n------\njohnhenry\nDid you mean that the services themselves are well designed or are you\nreferring to pages that describe them?\n\n------\njiveturkey\ndoes ecommerce count? mcmaster.com\n\n~~~\nmanuaero\nagreed ... mcmaster is one of the best designed sites.\n\n------\nfairpx\nChiming in after seeing us get mentioned here (context: lead designer @\n[http:\/\/Fairpixels.pro](http:\/\/Fairpixels.pro))\n\nWorking with engineers of b2b saas companies every day, for more than a year\nand having analysed all the best SaaS companies who have 10+ internal\ndesigners, I found a couple of principles that anyone can apply to make their\nwebsite look decent:\n\n* Consistency - One practical example: If you use a 4px border-radius, use a 4px radius everywhere. It may sound small, but having a consistent experience across your application makes the product feel so much more polished to your users. Don't use multiple fonts, different navigation menus etc. Keep it consistent.\n\n* Reduction - If anything, design isn't about adding more things to make it 'look nice'. Try to remove as many things as you can. If something doesn't serve a specific function, then remove it. Every pixel should make sense. The more you put in front of your users, the more brain power it'll require to process.\n\n* Divide - This is mostly UX, but one thing I see so many get wrong. A lot of SaaS apps overwhelm their users. They present them with all the features upfront. Whether it's a long onboarding form, or a dashboard with 50 actions one could take. By splitting up things in different ways, you can guide the user through the experience. Your signup process for example (that might be a big block in conversion) might be made so much better if you ask for certain types of information later on in the process.\n\n~~~\nvincentmarle\nI very much like your fixed fee \/ month business model. Exactly what I need, I\nwill likely become a customer soon.\n\nIs there a similar service out there that has fixed pricing for web\/app\ndevelopment?\n\n~~~\nredmaple\nsaw this few weeks ago:\n[https:\/\/greenpine.co\/#pricing](https:\/\/greenpine.co\/#pricing)\n\n------\niampaul\nMost SaaS businesses are run by engineers and unfortunately many of them\/us\nlack the eye for style. That said, here are two of my favorites:\n\n[http:\/\/fairpixels.pro](http:\/\/fairpixels.pro) \\- I found these guys here on\nHN and their work seems spot on.\n\n[https:\/\/www.spotify.com\/](https:\/\/www.spotify.com\/) \\- their simple design\nand IPO should be an example for fellow engineers who\u2019re building saas.\n\n~~~\nrahimnathwani\nFairpixels doesn't appear to be a SaaS service, but a service company that\ndoes design (not only for SaaS).\n\nI'm curious:\n\n\\- is there a particular SaaS designed by fairpixels that you consider an\nexample of good SaaS design?\n\n\\- do you have any relationship with Fairpixels? Your HN account has posted 2\ncomments since being created, and both those comments recommend fairpixels.\n\n~~~\niampaul\nIve been following their progress for over a year and am a customer. They\u2019ve\nstructured their website and business like a Saas. I don\u2019t know about all of\ntheir customers but I love the work they did for Uphex.com for example.\n\n------\nsoulchild37\nWould recommend [https:\/\/stunning.co\/](https:\/\/stunning.co\/) , I like the\nfloating tube animation and the increasing recovered amount is really\nappealling.\n\n~~~\nRjevski\nMeh, I disagree, the design looks dated.\n\n~~~\nsamstave\nI agree - it feels poorly-designed-2012\n\nBut thats not a knock on their offering - if their customers are happy and\nthey are doing a good job, then more power to their servers.\n\n","meta":"{'id': '16837683'}"} -{"text":"\nYou think you know what teachers do. Right? Wrong. - mathattack\nhttp:\/\/www.washingtonpost.com\/blogs\/answer-sheet\/wp\/2014\/02\/22\/you-think-you-know-what-teachers-do-right-wrong\/\n======\nsramsay\nThis is true of being a professor as well (I certainly didn't understand what\nteaching really was about until I starting doing it).\n\nI've always thought that our graduate students should be made to take acting\nlessons, because there's an element of second-order persuasion you have to do\nin a classroom that's hard to learn and difficult to describe but that shares\nsome similarity to acting -- or maybe just rhetoric in the very ancient sense.\n\nYou can't just purvey information and mumble something about its importance.\nUltimately, you're modeling what it means to be an intellectual -- trying to\ngive your students certain habits of mind by showing them how those habits\nplay out in practice.\n\nWe also spend an enormous amount of time trying to devise strategies for\ndealing with students who just don't get it (and you quickly learn -- or\nbetter learn -- that this might be the most important part of the job).\n\nI could say more, of course. It's a very subtle set of skills -- more art than\nscience, as they say. It's hard to do it at the college level, and I think\nit's far, far harder to do it at the elementary level, where the stakes are\nmuch higher.\n\n~~~\nbarry-cotter\nWhat kind of third level institution do you work at? One is under the impress\nthat going from passable to outstanding in teaching has much, much less effect\non one's chances of getting tenure than going from mediocre to good in your\nresearch.\n\n~~~\numanwizard\nThey never said it was particularly important for career advancement. How did\nyou read that into their post?\n\nAlso, what's with the condescending sneery tone?\n\n~~~\nadestefan\nBecause every time a post on education comes on HN everyone thinks they know\nall the answers. The comments end up turning into a \"Well this is the real\nreason...\" or \"Everyone needs to be just like...\"\n\nThe discussions end up being so worthless that I now flag every education\nrelated post on HN because it's just not worth the time here.\n\n~~~\njedmeyers\nWhy do you think it's called master of Arts in teaching, and not master of\nScience?\n\n------\nSniperfish\nMy wife is a teacher. I am consistently shocked how much work she does in\nevenings, weekends. I earn more than twice as much as her and more than her\nmaximum salary cap (we are both early in our careers). She blows me away in\nher dedication and effort, it's a great inspiration for me to continually\nstudy and work harder.\n\nI mention it to people and always hear 'well maybe she is different but I've\nseen lots of teachers and they just do it for the holiday'. As if everyone is\nequally dedicated in any profession. As if the guy that sits at his computer\n'working' for hours a day is a more efficient or effective worker just because\nhe does more hours. As if outside observers of any industry can really spot\nwho is producing vs who is not.\n\n~~~\nmontecarl\nI can echo your story exactly. My wife teaches and is involved in an after\nschool program. It isn't football but has a similar time commitment. During\ncertain parts of the year she works 6 days a week often leaving the house at 8\nam and returning at 9 or 10 pm. It is insane. Two other teachers in her\ndepartment work similar hours. The pay per hour isn't very good once you\nfactor all of that in.\n\n~~~\nGotAnyMegadeth\nAt the other end of the spectrum, one of the teachers at my old school used to\nturn up at 8:30 and leave at 15:30. She used to put a video on, and then hand\nout worksheets to fill in whilst she marked the worksheets from the class\nbefore. Terrible teacher, luckily I only had her for a few weeks.\n\n~~~\nnumo16\nI have a few friends that are teachers and most of their schools wouldn't\nallow for this sort of thing to happen. Teachers aren't allowed to sit at\ntheir desk while a class is in session, they must be instructing or walking\naround (during a test, video, etc...). They get a planning period, where they\nmight have a chance to do some grading or lesson planning, if they don't need\nto meet with a parent or something. This means they need to either stick\naround school several hours after it lets out to grade work and do lesson\nplans, or bring it home and work on it that night.\n\n------\nsaosebastiao\n>The problem with teaching as a profession is that every single adult citizen\nof this country thinks that they know what teachers do. And they don't. So\nthey prescribe solutions, and they develop public policy, and they\neditorialize, and they politicize. And they don't listen to those who do know.\nThose who could teach. The teachers.\n\nSorry, I cant take this seriously. The teachers unions are one of the most\npolitically powerful entities in the US. They can make a candidate, and they\ncan break a candidate. They can pass and tank ballot measures...even ones\ncompletely unrelated to their jobs. They can protect drunkards and criminals\nfrom getting prosecuted, let alone fired. They are fine forcing their agenda\ndown our throats, but they cant take a little pushback?\n\n~~~\npbhjpbhj\n> _They are fine forcing their agenda down our throats_ \/\/\n\nThe agenda of ensuring children have access to life-enhancing educational\nopportunities?\n\n> _They can make a candidate, and they can break a candidate._ \/\/\n\nYou mean a political candidate? You really think that the combined voice of a\ngroup of teachers can do that against the weight of media conglomerates, other\nunions, rich lobbyists and other political groups? Any examples?\n\nPresumably under your assertion the education system in the USA is the one\nthat the teaching unions have won by political action and the politicos and\nbusiness people are looking on powerless to influence it?\n\n~~~\nPaulHoule\nWell, I can say that in two weeks of homeschooling I got my son to write more\nthan they did in five years.\n\nHe was having trouble with bullies and the school did nothing about it. They\npretty much gave up on teaching spelling completely. We found out that our\nschool is a \"magnet school\" for behaviorally disturbed \"special\" kids from\nother districts so kids in the rich school and kids in the poor school where\ncommunities complain a lot get to enjoy a safer environment because the rural\nschool gets all the psychotic kids.\n\nI gave up on them when the superintendent gave a \"town hall\" where he told the\nmother of a \"special\" kid that he was a partner in his education and he told\nme I should just butt out because he was the expert and there's a new paradigm\nand homework is obsolete and because I don't have a phone number to call to\nget Albany breathing down his neck.\n\nF the teacher's unions.\n\n~~~\nking_jester\nThe problems you experienced go beyond teachers unions. Dumping \"problem\" kids\ninto one school is a recipe for disaster and communities are not served by\nthat kind of thing at all (except those that dumped off students, although I\nwould argue those communities aren't fixing their underlying problems).\nAdministrator heavy, top down approaches that override community and teacher\nautonomy are a bad thing in general, and the obsession with testing over\nstandard lessons and homework is a huge problem with the way the public\neducation system is run.\n\nUltimately teachers as a professional class deserve a union. We see in other\nplaces and countries that the unions do not serve as an impediment to a\nquality public education, so we have to ask ourselves what is really going on\nwith current systems and unions that make the situation so shitty (esp. in New\nYork state).\n\n~~~\nPaulHoule\nI'm not saying that teachers shouldn't have a union, but from my perspective\nit is part of the problem rather than the solution more often than not.\n\nFor instance, they opened a charter school in our district which seems to be\nan honest effort to provide a safe (bullying free) environment for the high\nschool and there have been two people associated with the union who have just\nbeen consistently hateful trying to shut it down.\n\n~~~\nking_jester\nThe charter school movement is one of those things that draws strong opinions.\nInitiatives to provide safe school environments are good, but privatized\ncharter schools have a lot of downside in terms of how a community, parents,\nand teachers can retain control over how education happens. In New York state\nin particular, there has been a strong effort to close public schools and open\nprivate charters, which in my opinion is the wrong way to fix problems with\npublic education. The disagreement over charters isn't just a union thing,\nalthough public educators would be upset to see the system they work for\ndismantled instead of repaired.\n\n------\nShivetya\npuff piece, if not pure propaganda bordering on hyperbole.\n\nPeople and students respect teachers as a whole, what they do not respect and\nI bet many in the profession do as well is the inability to remove those who\nare not good teachers.\n\nIt is not a position one walks into without many upon many stories about what\nyour really getting into. My Aunt retired from the trade, her aggravations in\norder that I remember are, Administrative people(usually political\nappointees), other teachers, and parents. There were a few others but mostly\nthe tripe coming down from non teachers within the system seemed to be what we\nheard of.\n\nThat and the personal money she spent to have supplies because it was more\nimportant to blow money on marble floors than supplied, or having someone's\nwife\/kid\/friend in some advisement position that did nothing but occupy space.\n\nGuess what, I can say the same of some other service professions, having a\nneighbor who does night shifts as a nurse and hearing the horror stories of\nwhat she puts up with is enough to let me know some jobs come with extra\nmental if not physical stress.\n\nI think in the end we are all more than willing to heap accolades on good\nteachers. Its a system where the kids aren't first that irritates\n\n------\nsteveplace\nTeacher worship can only go so far.\n\nBecause this post makes the claim that _all_ teachers should be looked up to.\n\nMy entire family consists of teachers. They know who the bad teachers are.\nYou've got Paulina Pensioner who just shows old VHS tapes as a history\ncirriculum. Or Carl the Coach that knows, just _knows_ there's only one way to\nsolve this pre-algebra problem.\n\nAnd some teachers work hard. They bust their ass and bring grading home and\nlesson plan on the weekends.\n\nBut they aren't the problem. There's a bad system that keeps bad teachers in\nat the expense of the good.\n\nSo they design tests and standards as a way to \"firewall\" these bad teachers\nin, to turn their poor performance into mediocre performance. And there's a\ncost, because it removes the creativity and initiative from the good teachers.\n\nI understand that the goal of the author is to criticize common core, but\nwhile the conclusion is sound (Core is garbage) the reasoning is not.\n\nAnd the new standards being developed? One of the main proponents is the\nCouncil of Chief State School officers. Many (probably most) came from the\nteaching profession. Who know what it's like to be a teacher.\n\nThe author gives us some feel-good patronization about how teachers have it so\nhard and we have no right to impose standards upon them. But these standards\nexist because we can't fire bad teachers.\n\n~~~\nrmrfrmrf\nI don't think Core is garbage at all. I think there's a deeply ingrained\nculture of anti-intellectualism in US culture that needs to be nuked out of\nthe school system, and I honestly couldn't care less what the collateral\ndamage is.\n\n~~~\nsteveplace\nHere's the thing.\n\nYou like it when there's wide, sweeping cirriculum on the Federal level...\nwhen you agree with it.\n\nBut what happens if there's enough political pressure (it is a midterm\nelection cycle) to add ID into the cirriculum? Or maybe they look at feel-good\nmath that is just teaching to the test [1]?\n\nAnd that's the issue. Centralized power is great when you agree with it, but\nterrible in the wrong hands.\n\n[1] [http:\/\/www.momdot.com\/common-core-is-making-me-\nstupider\/](http:\/\/www.momdot.com\/common-core-is-making-me-stupider\/)\n\n~~~\nrmrfrmrf\nI agree with your point. I suppose I'm fortunate enough to also agree with the\ngoals of Common Core as they are today.\n\nOnto that article, however:\n\n1\\. I never use an academic degree as an indicator of intellectual capacity. I\nfind that some people are so objective-driven that they zoom right past the\npoint and straight to rageville when they don't understand something.\n\n2\\. A simple Google search on front-end estimation would have helped this mom\nrealize that the example given on the sheet is incorrect. I will concede that\nan effective teacher would have realized that the example given is incorrect\nand would have corrected it.\n\n(In front end estimation, you round the leftmost digit, so the example should\nactually be 400 + 300, not 300 + 200). IMHO 700 is actually a decent estimate\nfor 645, so I don't think there's a problem with the math itself. It's not\nreally feel-good math, but I think some people take for granted that\nestimation is not an innate ability.\n\nNow, it becomes another discussion altogether when the teacher is so horrible\nthat they refuse to accept that the example is wrong. But, I don't think I've\nseen evidence of that, so I won't accuse anyone of anything.\n\nEDIT: I just read some of the comments in that article, and it looks like some\ndistricts teach front-end estimation with truncation rather than rounding, in\nwhich case 300 + 200 = 500 would be correct.\n\nHere are a few more things to note: the parents here _assume_ that estimation\nand rounding are the same thing. That in itself isn't true.\n\nMore importantly, though, look at the _goal_ of the estimation -- to see if\nthe _actual_ answer, 645, is reasonable. That's _not_ the same thing as asking\nif 500 is a reasonable estimate of 645. I think the point of this exercise is\nfor kids to say \"ok, if I add these two numbers together, I _expect_ to get a\n3-digit number somewhere in the ballpark of 500.\" That is to say: if I add 354\nand 291, I shouldn't expect to get 20000 or 7 or 81 or 9750. It's just a\nsimple way of checking your work using a quick, easy method that you can do in\nyour head. Again, I find the value in this -- adding \"common sense\" to the\ncurriculum is definitely something I can get behind, but I understand that\nparents who aren't used to \"common sense on paper\" will struggle.\n\n------\nmildtrepidation\nI hate writing like this. Even if most people don't know the thing you're\nreferring to, basically telling the entire browsing population of the internet\n\"we're all stupid and here's why\" immediately leaves a bad taste, particularly\nfor people -- you know, like _teachers_ \\-- who _do_ know what teachers do, or\npeople who didn't make the assumption being assumed in the first place (which\nsays a lot more about the author than anything else).\n\nPedantic? Maybe. But to me this is a really childish way to make a point that\ncould be better stated in a way that doesn't instantly, baselessly denigrate\nthe reader, particularly when you're writing for a publication that banks on\nits credibility and reputation.\n\n------\npatmcc\nI have tons of respect and sympathy for teachers, but the argument I often\nhear for raising their pay (\"they work really hard, they're super important,\nit's a difficult job\") misses the central point.\n\nIt seems like we have enough teachers at the wages we currently pay. Teachers\nare willing to go into the profession despite the low wages, probably because\nthey want a satisfying job with good benefits. If we didn't have enough\nteachers...we'd have to raise wages. Supply and demand.\n\n~~~\nNursie\nAnd like many other situations which can be summed up as supply and demand, a\nrace to the bottom is an obvious outcome.\n\nMaybe we'd get better teachers if we paid more?\n\n~~~\npatmcc\nWe'd get better teachers if we paid more to good teachers. The problem is no\none can seem to agree on how to measure what makes a good teacher - one side\nis busy arguing seniority should be the primary measure, the other side argues\ntest scores, and neither one seems to want to spend any time or money figuring\nout an actually successful way to measure teacher skill.\n\n~~~\nmindslight\nYou could _ask the kids_. They certainly know which classes are engaging, and\nwhich are time-biding garbage. And ultimately, assuming a teacher isn't\nrunning a movie theatre, student interest _is_ the most important metric.\nYou'd of course have to keep the actual weighting process a bit fluid to avoid\nthe inmates gaining control of the asylum, but it should be quite\nstraightforward to pick out the extreme bad and extreme good teachers.\n\nIt would also be a good introduction to the rationale behind secret ballots,\nand when it is actually appropriate to lie.\n\n~~~\nameister14\nLook at ratemyprofessor and see how well an incredibly difficult professor\nthat is also engaging and interesting does; now imagine that in a situation\nwhere the people in his\/her class are forced to be there.\n\n~~~\nmindslight\nI took a quick look through that site, paging through my alma mater of a\ndecade ago. I do see pathologies in the ratings\/comments that remind me of\ncomplaints I would hear about professors from fellow students that were\nstressed, not getting the material, or used to a more structured environment.\nAnd if these ratings held weight with the university, I can definitely see\nprofessors dumbing down their lessons to avoid bad reviews. So I do see what\nyou're getting at with it going terribly wrong.\n\nStill, I think there's several key differences:\n\n1\\. Every school student would be rating their teachers, rather than just\nthose that loved a professor, had an axe to grind, or were encouraged to by an\nentertaining personality.\n\n2\\. The context would be \"closed\", with each teacher relative to their school,\nrather than open cross-institution competition with a front page of featured\n\"rockstar\" professors that make the rest seem inadequate.\n\n3\\. The high schools officially sanctioning ratings with real results would\ngive kids the feeling that they really do have a stake in the process, rather\nthan simply being its victims.\n\n4\\. High school is a more structured environment where the process details\nmatter a lot more. So a teacher eg giving out an incomplete homework problem\nis actually a valid indictment rather than the stressed out nitpicking of a\nculture shocked freshman.\n\n5\\. In college, there's a certain level of appreciation for the material that\neveryone should have but doesn't necessarily, causing them to get frustrated\nat a professor with a dry personality. Whereas with high school, the idea is\nthat everybody should be learning a cursory understanding of all subjects.\n\n6\\. In college, there's a huge variation in the level of courses. One specific\nprofessor I had for a seminar where it was basically his PhD research group\nand me, an undergrad who'd just started on a simultaneous master's. I learned\n_a lot_ in that class, and really appreciated him. I then ended up in a grad-\nlevel \"intro\" course with him (which I knew was an utter waste going in, but\nit was the only thing that fit my schedule). Most of the students were rote-\nmemorization paying-for-credential types, but his style certainly did them no\nfavors either, and I can definitely see my recollection echoed in a few of his\ncurrent reviews. I'd say that he's still a teaching asset, but not for intro\nlectures where most students aren't already committed to the subject.\n\nReally, there just needs to be _some_ extrinsic motivation\/reward for teachers\nthat are truly making a difference versus simply clock-punching, and that's\nnot more top-down testing edicts that further shackle them. And sure, the\nimmediate reaction shouldn't be to fire the lowest-reviewed, but neither\nshould we pretend that they deserve similar compensation to the exceptional\nones.\n\n------\ncarsongross\nYou think you know what field workers do. Right? Wrong.\n\nYou think you know what factory workers do. Right? Wrong.\n\nYou think you know what farmers do. Right? Wrong.\n\nYou think you know what oil rig operators do. Right? Wrong.\n\nYou think you know what coffee shop owners do. Right? Wrong.\n\nYou think you know what lawn care specialists do. Right? Wrong.\n\n~~~\nmandalar12\nI agree with your point: the title is sensationalist. The difference between\nteaching and owning a coffee shop (and the others examples) is that few people\nwill try to tell you how to handle your coffee shop while a lot think they\nknow better than you how their children should be taught.\n\n~~~\ncarsongross\nI've seen a close friend work 20 hours a day, barely make payroll, deal with\nemployee drug habits and try to minimize the legal damage a sociopathic\nemployee did.\n\nYou don't know what it's like owning a coffee shop.\n\n------\njerf\nIn other words, teachers are human and have real lives. This may be news to an\n18-year-old, but I'd really be surprised if it's really news to that many\npeople above 30. I may not be a teacher but I could fill a very stylistically-\nsimilar paragraph or two with the woes that have befallen me, too. Most people\ncan.\n\nThis strikes me as a variant on the _You don 't know what's like!_ meme... as\na rule of thumb, you should _never_ say that to anybody. You have no idea what\nthey've been through. Everyone you pass on the street has a story, and no\nmatter how bad you think yours is, you've got no guarantee that they don't\nhave one worse than you.\n\nWhat this essay describes is not specially \"teaching\", it's _life_.\n\n~~~\nSniperfish\nYou as an individual and your profession as a whole are different.\n\nThere is a very pertinent and legitimate point made in the article that\n-teaching- is not a respected industry.\n\nIt's not exactly a new comment!\n\n~~~\nhumanrebar\nCome to think of it, I don't think I hear teaching described as an industry\nvery often.\n\nWhat would be different if teaching were considered an industry? Would it be\nbetter?\n\n------\nVengefulCynic\nTeaching falls into the same category as stage magic, stand-up comedy and\nwriting - it looks easy and effortless when done by an expert because that's\npart of the expertise. Capturing attention, exciting young minds and engaging\nthem is something that, when done effectively, is transparent because that's\nhow it works best. The whole host of knock-on problems that are spawned by\nthis apparent ease are well-documented in TFA.\n\n------\nrjzzleep\ni see a lot of comments saying that we're watching from the sidelines\ncriticizing, and therefore have no clue what's going on.\n\nHow is that even remotely true? We are the victims of the system. We\nexperience firsthand what they do or believe they do.\n\nThis is like saying you think you know what the TSA is doing. Right? Wrong. Of\ncourse we do, we're the ones being screened.\n\nwhat we don't know is the logic and culture behind the decisions we see, but\nthat doesn't take any right away to criticize it.\n\nhaving been an overachiever in school, and early university, it's been a\nconstant struggle. \"oh but school is not actually made for people like you\"\nyou say. yeah, i know. how is that not a problem?\n\nedit: don't get me wrong, i've had a few really good teachers. but they've\nbeen rather few. and no, i'm not just counting the teachers i liked as good.\n\n~~~\nlewispollard\nDoes someone who's used a computer all their life know the ins and outs of\nbeing a programmer? Would you listen to their recommendations on how to\nimprove your code? The answer is likely yes, feedback from customers is\nimportant - but you're not gonna get any useful advice re: the architecture or\nthe design patterns used.\n\n------\ndanso\nAwhile ago, I had a teacher for a roommmate, and one who was young and very\npassionate, and I hope, good at it, because we were best friends and I'd hate\nto think I'd be a poor judge :). But I rarely heard her talk about the pure\njoy of teaching, at least compared to the difficulties of dealing with the\nmanagement (the principal) and other logistics issues...such as having to pay\nfor her own classroom supplies, including books that she wanted if they\nweren't on the state-wide curriculum, and pencil and paper for her poorer\nstudents.\n\nHer complaints about office politics were what really surprised me. Even\nthough I know every bureaucracy is universally crushing (well, maybe I grok\nbetter now after watching The Wire), it just seems that being a great,\npassionate teacher, supersedes any kind of office bullshit...such as the way\nprincipal communicates with you. But then again, if you can't get along with\nthe person who runs the place, and you're put in a shitty classroom and have\nto share a teacher's officespace with 3 other novices...how could that _not_\naffect your teaching performance and job satisfaction?\n\nOne memory I still have from high school was one afternoon when I had to stay\nafter school to give a presentation to the teachers on their regular Thursday-\nschool-wide meeting. The meeting was in the cafeteria...and you know how lunch\ntables reflect a sort of social-hierarchy among kids? It was no different for\nthe teachers...and even more surprising, the social lines seemed to fall along\nwith how I, as a student, expected them to (attractive young teachers sat with\nthe other young teachers; cool popular teachers could sit anywhere they want;\nthe weird chemistry teacher sat in the corner). I mean, it's one thing to have\nperceptions as a kid, but I _knew_ I was a petty kid...so it was a surprise to\nsee that things were not much different in the adult world.\n\n~~~\narbitrage\nGrok means to understand in fullness ... from the Heinlein novel, the\netymology of the word comes from to drink or to consume.\n\nYou cannot grok something just by watching it.\n\n~~~\ndanso\nYeah, but we're talking about _The Wire_ here :). But also I was an education\nreporter, worked as an aide, and have been part of other bureaucracies\nmyself...\n\n------\nnawitus\n>Most of all, we need to stop thinking that we know anything about teaching\nmerely by virtue of having once been students.\n\nI know something about teaching by reading peer-reviewed studies which give\nevidence for better teaching methods, but are almost never adopted because the\nteaching systems and\/or teachers are extremely conservative apparently all\naround the world.\n\nIn fact, I'd trust studies over teachers any day.\n\n------\nlarrik\nI feel like you could write this about basically any profession, besides the\nusual \"teachers are underpaid\" rant.\n\n------\nhumanrebar\n\n > All of you former students: you did not design\n > curricula, plan lessons, attend faculty meetings,\n > assess papers, design rubrics, create exams, prepare \n > report cards, and monitor attendance. You did\n > not tutor students, review rough drafts, and create\n > study questions. You did not assign homework. You\n > did not write daily lesson objectives on the\n > white board. You did not write poems of the week\n > on the white board. You did not write homework on\n > the white board. You did not learn to write\n > legibly on the white board while simultaneously\n > making sure that none of your students threw a\n > chair out a window.\n \n\nI'm not a teacher, so I could be wrong, but it seems to me that much of this\nlist falls into two categories:\n\n1\\. Routine things that could be orders of magnitude more efficient (or even\nfully automated) given enough resources. In most cases, the resources needed\nwould be fairly modest compared to the aggregate amount of effort teachers\neverywhere spend on them. Writing and grading elementary-level math tests, for\nexample, shouldn't take any time at all given the right software.\n\n2\\. Routine things that couldn't be automated well but could easily be done by\nsome sort of entry-level assistant. Babysitting and discipline tasks don't\nrequire college degrees.\n\nIt strikes me that the economics of education are structured in a way that\nthere is marginal impetus to improve efficiencies in the day-to-day work of\nteachers.\n\n~~~\nhackluck\nYou are not a teacher. And from your comments, you have not looked too much in\nthe research about how to teach students.\n\nYes, certain things COULD be automated... at considerable expense to student\nachievement. One big thing they have found - remove the personal feedback and\nconnection to students --> lose the motivation of students. If a teacher (the\nsame teacher) isn't interacting with a student consistently at nearly every\nstep of the learning process, the feedback doesn't stick and the student loses\nmotivation.\n\nIt would be interesting to looking to the basic research behind the feedback-\nachievement connection and stereotype threat to start.\n\nHope that helps you address some of the problems with the automate\/delegate\nsolutions so often thrown at teachers.\n\n~~~\nhumanrebar\n> Yes, certain things COULD be automated... at considerable expense to student\n> achievement.\n\nI seriously doubt that letting teachers automatically grade arithmetic tests\nwill hurt student achievement. The fact is that many teachers do that sort of\nthing at home in what should be considered overtime hours. I would like to\nhear how automatic grading causes student achievement to suffer.\n\nLikewise, I'm skeptical that it should be solely educators' responsibility to\nmake sure chairs are not being thrown out windows. Letting teachers focus on\neducating and not babysitting seems like a good thing.\n\n------\ncarlmcqueen\nI did two years of a special ed major in college before switching over to\ncomputer science and I can say that the ed program I was in covered in depth\nhow to teach and handle a class room, it focused on how to teach math to\npeople who don't understand any concepts, and the department had additional\noffered classes if you wanted to do teach for america or inner city schooling.\n\nSpeaking with friends who have become professors they are often jealous of\nthis because they were never given any kind of 'teaching' classes. Their under\ngrad wasn't in education and their teaching experience was trial by fire\nteaching assistant jobs of handling undergrad college courses.\n\nAll that said, I grow tired of the arguments and articles of 'don't speak\nunless you've walked a thousand miles' which I felt as I read this article.\nNot all knowledge and understanding must derive from doing something to have a\nvalid opinion. We need to treat teachers better and find better pay structures\nbut I've found no harsher critics of teachers and our schools than the\nteachers I went to college with as they filter into the systems and find tired\nand broken systems in which they get no voice until they have 'tenure'.\n\n~~~\nhackluck\n> All that said, I grow tired of the arguments and articles > of 'don't speak\n> unless you've walked a thousand miles' > which I felt as I read this\n> article. Not all knowledge and > understanding must derive from doing\n> something to have a > valid opinion. We need to treat teachers better and\n> find > better pay structures but I've found no harsher critics of > teachers\n> and our schools than the teachers I went to > college with as they filter\n> into the systems and find > tired and broken systems in which they get no\n> voice until > they have 'tenure'.\n\nI don't know that this article is so much about \"not speaking bad about\nteachers\", but about having compassion for teachers and talking about\neducation with a little more humility for the institution that helped produce\nyou. I would say this article is more of the \"teachers don't write articles\nabout how to __________ better, so don't let _______________ers tell teachers\nhow to teach better\" variety.\n\nAnd I totally agree that most young teachers are completely overwhelmed by the\nridiculous systems in which they are forced to teach. My only hope is that\nsome of these young, inspiring teachers remain in the profession long enough\nto change the broken system (which might take a LONG time!). The unions are\nbroken; for the most part, teachers are not.\n\n------\nhueving\nI'm not sure what the implication at the end is about public policy? Even if\nwe supposedly do not understand teaching, that does not mean we can't form\nopinions about the current system and develop policies for it. That's\nprecisely how politics work in every other field.\n\nHow many people who want to ban fracking actually understand fracking or\nprecisely what the real risks are? How many people want to ban nuclear energy\nand don't understand any of the actual risks of modern nuclear power plants?\n\nPolitics suck for anyone that isn't a politician. Each industry must learn how\nto deal with that aspect. Writing an appeal to emotion on the Washington Post\nis not going to sway anyone. It just resonates with people already on their\nside and sounds like whining to people that aren't.\n\n------\nnilkn\nSince the author says she started out making 5 times as much as a lawyer than\nas a teacher, I can only assume she landed one of the associate jobs at a\nmajor law firm straight out of law school making $160k+.\n\nShe makes it sound like anybody can hit up law school and come out making\nalmost $200k. The vast majority of law graduates do not land jobs like that.\nThe vast majority also have nearly crippling debt. The vast majority of the\nfirms paying $160k+ are also in hyper expensive metro areas, whereas teachers\ncan live comfortable lives in very rural towns (if they want to).\n\n~~~\nnumo16\nWhile what she is stating might not be the case for all law school grads, it\nisn't as far fetched to come out of school easily making 2-3 times what a\nstarting teacher is making, depepnding on the degree you choose. As a software\nengineer in michigan, you can come out of uni with a BS in computer science\nand easily find a job paying $55k+ and make 2-3 times as much as a starting\nteacher ($35k if you're lucky and find a good school that has funding) in the\nsame state after a year or two of experience.\n\n------\nfuse117\nThis story strikes home with me. Like the author, I too picked up an MAT,\ntaught for a couple years, and then left the field to pursue other\nopportunities. In the 3-4 years since I left, I have worked a lot less, made a\nlot more, and feel much more respected in what I do.\n\n------\nbiesnecker\n\"You think you know what teachers do. Right? Wrong.\"\n\nSo I'm wrong that I think that I know what teachers do?\n\nDo teachers teach you how to write intelligible headlines?\n\n------\nsmoyer\nMy wife teaches classes at a local high school as well as the university here\nshe's successful because she works hard at it, has a natural aptitude to teach\nand she cares about her subjects and shows it. I think I had 3-4 outstanding\nteachers during my 13 years in public school and they all had these same\ncharacteristics. I had plenty of bad teaches too.\n\n------\nPakG1\nMy cousin is a high school teacher and posted this article on Facebook with\nthe comment that it's like saying just because you had parents, you think you\nknow everything there is to know about parenting.\n\n------\nthe_watcher\nThis is just a painful argument to authority.\n\n------\ntokenadult\nEducation policy is the issue that drew me to participate on Hacker News,[1]\nso I'll jump in here too. I get the impression that mathattack, whose comments\nI enjoy reading, may have posted this article for disagreement. The Answer\nSheet blog from which this guest post comes is basically a propaganda organ,\nand some of the guest posts from the same blog that were submitted to Hacker\nNews in the past were exposed as hack jobs after discussion here.[2]\n\nThe obligatory disclosure here is to note that I am a classroom teacher by\noccupation. Over the years, I have been a teacher of Chinese to native\nspeakers of English, a teacher of English to native speakers of Chinese (and\nother languages), and most recently a teacher of advanced elementary\nmathematics (\"prealgebra\" mathematics for third-, fourth-, and fifth-graders)\nfor a nonprofit organization in my town. My HN user profile describes a bit\nmore of my background.\n\nYep, classroom teaching is hard, no doubt about it. It has emotional rewards\nthat some people value highly enough that it is a sought-after occupation, not\na labor-shortage occupation, and that has the most to do with teacher\ncompensation. Classroom teaching by teachers in private practice (like me) can\nalso be poorly compensated (relative to the difficulty of doing the job well)\nbecause most clients have already paid for \"free\" lessons at the local public\nschools through their taxes, and will only pay out of pocket for a private\nlesson if it is truly superior in some way. \"In modern times [as contrasted\nwith ancient times] the diligence of public teachers is more or less corrupted\nby the circumstances which render them more or less independent of their\nsuccess and reputation in their particular professions. Their salaries, too,\nput the private teacher, who would pretend to come into competition with them,\nin the same state with a merchant who attempts to trade without a bounty in\ncompetition with those who trade with a considerable one. . . . The privileges\nof graduation, besides, are in many countries . . . obtained only by attending\nthe lectures of the public teachers. . . . The endowment of schools and\ncolleges have, in this manner, not only corrupted the diligence of public\nteachers, but have rendered it almost impossible to have any good private\nones.\" \\-- Adam Smith, The Wealth of Nations, Book V, Part 3, Article II\n(1776)\n\nA couple of the comments posted here before I arrived in the thread mention\nthe particular skills that a teacher needs to have to teach a class\neffectively. There is much interesting research on this coming from the\ncharter school movement, with some of the best how-to research coming from the\nTeach Like a Champion[3] project. I love learning about new ways to be a more\neffective teacher. Besides actual teacher skills, another grave problem in\nUnited States school is extremely poor teaching materials[4] and I devote\nhundreds of hours to curriculum planning and seeking out the best available\ntextbooks[5] for the subjects I teach.\n\nA good teacher is worth a lot.[6] We would not go far wrong by saying that a\ngood teacher is literally worth his or her weight in gold. But the tricky\nissue in school administration is distinguishing effective from ineffective\nteachers. To ensure that school leaders have incentives to find and reward the\nbest teachers, we need to make sure that learners (or the adult guardians of\nminor learners) have the power to shop, the power to refuse the services of an\nineffective teacher and to seek out the services of an effective teacher.\nTeachers will gain both more pay and more respect if learners gain power to\nshop.\n\n[1]\n[http:\/\/news.ycombinator.com\/item?id=4728123](http:\/\/news.ycombinator.com\/item?id=4728123)\n\n[2]\n[https:\/\/news.ycombinator.com\/item?id=3327847](https:\/\/news.ycombinator.com\/item?id=3327847)\n\n[3] [http:\/\/teachlikeachampion.com\/](http:\/\/teachlikeachampion.com\/)\n\n[4]\n[http:\/\/open.salon.com\/blog\/annie_keeghan\/2012\/02\/17\/afraid_o...](http:\/\/open.salon.com\/blog\/annie_keeghan\/2012\/02\/17\/afraid_of_your_childs_math_textbook_you_should_be)\n\n[5]\n[http:\/\/www.artofproblemsolving.com\/Store\/viewitem.php?item=p...](http:\/\/www.artofproblemsolving.com\/Store\/viewitem.php?item=prealgebra)\n\n[6] [http:\/\/hanushek.stanford.edu\/publications\/valuing-\nteachers-h...](http:\/\/hanushek.stanford.edu\/publications\/valuing-teachers-how-\nmuch-good-teacher-worth)\n\n","meta":"{'id': '7318922'}"} diff --git a/notebooks/quickstart/00-quickstart.ipynb b/notebooks/quickstart/00-quickstart.ipynb deleted file mode 100644 index d5b14e2d8e0..00000000000 --- a/notebooks/quickstart/00-quickstart.ipynb +++ /dev/null @@ -1,115 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9c0d3bd0-cd25-4794-81de-c413f260de3c", - "metadata": {}, - "source": [ - "# HAGrid Quickstart BETA" - ] - }, - { - "cell_type": "markdown", - "id": "b4813d9f-daec-4954-96aa-90c01159d396", - "metadata": {}, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " 📚 quickstart\n", - " \n", - " 🧙‍♂️ Install Wizard\n", - "
" - ] - }, - { - "cell_type": "markdown", - "id": "a50d74d3-66f0-4181-8c2f-b07fdf6b0979", - "metadata": {}, - "source": [ - " " - ] - }, - { - "cell_type": "markdown", - "id": "df038714-df01-4c56-a84c-b66a42d6cd81", - "metadata": {}, - "source": [ - "
Step 1. Run quickstart
" - ] - }, - { - "cell_type": "markdown", - "id": "3e0dd95e-38f8-46d4-b75a-d1bc9c6ac103", - "metadata": {}, - "source": [ - "Simply `import` and run `quickstart` by clicking in the grey cell below 👇🏽 and pressing `Shift` + `Return` on your keyboard, or use the `Run` menu at the top of the window." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c71be622-bb83-4d32-aeaa-00f2aca6ee80", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# third party\n", - "from hagrid import quickstart\n", - "\n", - "quickstart" - ] - }, - { - "cell_type": "markdown", - "id": "ec809a11-95c7-4783-a397-58c93eb19dcf", - "metadata": {}, - "source": [ - "
Step 2. Download a Tutorial
" - ] - }, - { - "cell_type": "markdown", - "id": "27a1c075-3082-408e-8486-ab9df41a8442", - "metadata": {}, - "source": [ - "Above you will see a list of available tutorials, simply add the name in `quotes` into the `quickstart.download` function and run the cell below just like before." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f5507b20-5149-4b6d-b7e0-0883a2358ceb", - "metadata": {}, - "outputs": [], - "source": [ - "# paste and run any commands here" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/quickstart/01-install-wizard.ipynb b/notebooks/quickstart/01-install-wizard.ipynb deleted file mode 100644 index 7050c8b5b8f..00000000000 --- a/notebooks/quickstart/01-install-wizard.ipynb +++ /dev/null @@ -1,500 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "5198b587-cf2f-4354-bdbb-08f9ecb46abf", - "metadata": {}, - "source": [ - "# HAGrid Install 🧙🏽‍♂️ Wizard BETA" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "a156040f-39cc-4660-a32a-5d2c2ff586b7", - "metadata": {}, - "source": [ - "\n", - " \n", - " \n", - " \n", - "
\n", - " 📚 quickstart / 01-install-wizard.ipynb\n", - "
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "cf271e52-b3aa-48f6-b85f-353b328dd463", - "metadata": {}, - "source": [ - "Welcome to the HAGrid Quickstart Install Wizard. \n", - "There are several different components required to use `Syft`. To make setup easy this wizard will automatically detect your current system configuration and make recommendations on what you need to complete setup.\n", - "\n", - "Run each step by clicking in the grey cell below 👇🏽 and pressing `Shift` + `Return` on your keyboard, or use the `Run` menu at the top of the window.\n", - "\n", - "**How the Install Wizard Works** \n", - "At each step the 🧙🏽‍♂️ Wizard will try to find various software and packages on your machine. \n", - "If you see an item marked with a ❌ red cross and the message `🚨 Some issues were found` it should include a description of the issue, a solution and optionally a way to resolve the solution directly by running a command. These commands can br Copy + Pasted into a cell and ran here, or if you know how to use the terminal on your computer simply remove the `!` at the start of the command and paste it there instead. After you have resolved the issue you can run the step again to verify it is fixed with a ✅ green tick." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d657c28d-5388-45cc-ba37-f5e6401c0c88", - "metadata": {}, - "source": [ - "
Step 1. Import Install 🧙🏽‍♂️ Wizard BETA
" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ad558ade-ee9b-4c61-8cbd-68eadd2f07fa", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# third party\n", - "import hagrid\n", - "from hagrid import wizard" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5568b6bd-0015-458a-bb33-be9a927d0ffa", - "metadata": {}, - "source": [ - "
Step 2. HAGrid Updates
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "f5ab8626-0da1-4a8e-be94-8871724da34c", - "metadata": {}, - "source": [ - "It's a good idea to keep `HAGrid` updated as we push out fixes and features very frequently. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a546b80e-ec38-4662-9f6b-0caa5b581bdc", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "wizard.check_hagrid" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8060ab70-f5e1-4a09-bcb5-0646c84c478c", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# paste and run any commands here" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "871d2ad4-77ff-4381-a26c-036717d948b8", - "metadata": {}, - "source": [ - "
Step 3. Installing PySyft
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1998c1a5-2fb4-4730-98e1-a4e2c360c5b7", - "metadata": {}, - "source": [ - "`PySyft` is a python library which requires Python 3.9+." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b954a90e-ad48-417f-bbe7-cd2d75cecc5e", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "wizard.check_syft" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "554dfac3-9022-44f2-8c32-30af1b3a8dd8", - "metadata": {}, - "outputs": [], - "source": [ - "# paste and run any commands here" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "3d82f627-6fb6-4f03-a0c4-2ef68c6bf7f9", - "metadata": { - "tags": [] - }, - "source": [ - "
Step 4. Python Server
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7278fdb2-f1ba-4f12-a10b-50a43270c59f", - "metadata": { - "tags": [] - }, - "source": [ - "To do the `quickstart` tutorials, you can just run a basic `PyGrid` domain directly in python.\n", - "\n", - "You can do this either from `jupyter` / `python` or from the command line." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8b205fc3-c84e-4638-82cf-0bfa25b206b5", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(\">=0.8.2.b0\")\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=8080, dev_mode=True, reset=True)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "aa136f0a-debc-4219-858b-1814ed3cad46", - "metadata": { - "tags": [] - }, - "source": [ - "We can now log into the node with the default credentials." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4dd63cc9-5fe1-42af-9863-65a74eb2fc28", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "domain = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=8080)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5259b7f2-b480-4346-9cc7-86305fef76b5", - "metadata": {}, - "source": [ - "Let's see whats available on the API." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "312d1e50-99c4-4120-9dfc-caa56cee62fd", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "domain.api" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cda9bf88-4273-48aa-b656-005a8f456e6c", - "metadata": {}, - "outputs": [], - "source": [ - "# paste and run any commands here" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "956202c6-1d24-4889-aeda-6a7efe4a4055", - "metadata": {}, - "source": [ - "Okay, now let's shutdown the server." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb2a0096-6c15-4ac9-9446-6532c1524381", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "node.land()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7b2f1e6c-e469-4be5-b771-3724f92d2305", - "metadata": {}, - "source": [ - "👈🏿 Click here to go back to Quickstart Home" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "efc6ebc7-43e6-4612-bf44-7ce14e488d01", - "metadata": {}, - "source": [ - "
\n", - "
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "e05d1893-3347-41a3-8ee3-c04fd8f12d0f", - "metadata": {}, - "source": [ - "
Step 5. Docker Setup (Optional)
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "2fa85963-4635-4ce3-8be6-36b308aa26d5", - "metadata": {}, - "source": [ - "`PyGrid` can also run as a set of containerized services on a container host. Let's ask `hagrid` to check if we have all the right dependencies installed. If we don't it will make some recommendations on what to install." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "83f58a56-7c5e-4dfb-94c6-0fd1ce950a3f", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# third party\n", - "from hagrid import wizard\n", - "\n", - "wizard.check_docker" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "98b47e48-582f-4261-838b-1b7b2844f945", - "metadata": {}, - "outputs": [], - "source": [ - "# paste and run any commands here" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "ee41f9c5-8014-41e6-b9a1-542694cf2d31", - "metadata": {}, - "source": [ - "
Step 6. Start a Test Domain
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "068440ad-f0a0-4a71-bcca-52e598bf1968", - "metadata": {}, - "source": [ - "You are now ready to start a `domain` on your local machine with 🐳 Docker. Simply run the next cell and wait until it is completed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f06b0bf-c8c1-4b3c-87c8-b0f0809fcbfc", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(\">=0.8.2.b0\")\n", - "node = sy.orchestra.launch(\n", - " name=\"test-domain-1\", node_type=\"domain\", port=8081, tag=\"beta\", verbose=True\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7d5c9892-1b70-4a68-b41d-8c9c06c19df0", - "metadata": {}, - "source": [ - "
Step 7. Check Domain Health
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "b7846431-69b1-46bb-a611-e2ce47eb277f", - "metadata": {}, - "source": [ - "To ensure our domain has finished starting we can ask `hagrid` to check its health for us. Run the below cell to check your `domain` on localhost. You can also visit the links to see the `UI` and `api` endpoints in your browser." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "71c1fd7d-fb7b-43dc-9608-053a6313b1b4", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "hagrid.check(\"localhost:8081\", timeout=120)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "91148fc9-a26f-4964-bb17-efd5a26f465d", - "metadata": {}, - "source": [ - "
Step 8. Domain Login
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "07093a4b-d463-4fe5-9861-09e7d32b63e0", - "metadata": {}, - "source": [ - "We now log into the Domain Node using the default admin username and password." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "01126584-3449-4c4b-9fe4-1c727a7a0ee3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(\">=0.8.2.b0\")\n", - "domain = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=8081)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d4d7ccbb-0b48-41cf-bf7c-9b41f77924d0", - "metadata": {}, - "source": [ - "
Step 9. Shutdown Domain
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "8e3fc2ee-5247-4fa1-9e31-9d9464765e0d", - "metadata": {}, - "source": [ - "If your domain started correctly you are now done with the Install Wizard and ready to do some tutorials. We can shutdown this domain by running the `hagrid` land command in the below cell. If you are done now you can go ahead and shutdown your domain, or if you would prefer to keep it running skip this step." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0d12f71-26eb-4317-a8ea-7e45579b7b59", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "node.land()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "c288eae3-6109-4b26-ac68-7ab772518919", - "metadata": {}, - "source": [ - "
✅ Install 🧙🏽‍♂️ Wizard Complete
" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6663f1cc-648c-43e0-aca8-4d55039e8a6d", - "metadata": {}, - "source": [ - "👈🏿 Click here to go back to Quickstart Home" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.1" - }, - "vscode": { - "interpreter": { - "hash": "1e7e90b573593ba97b24c163dae9a6c9173808a1bc968e87367841cbed28165e" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/quickstart/img/edit.png b/notebooks/quickstart/img/edit.png deleted file mode 100644 index 3ceaaf9438d..00000000000 Binary files a/notebooks/quickstart/img/edit.png and /dev/null differ diff --git a/notebooks/quickstart/img/head.png b/notebooks/quickstart/img/head.png deleted file mode 100644 index 9d220749f31..00000000000 Binary files a/notebooks/quickstart/img/head.png and /dev/null differ diff --git a/notebooks/quickstart/img/run.png b/notebooks/quickstart/img/run.png deleted file mode 100644 index c3a678a9fd1..00000000000 Binary files a/notebooks/quickstart/img/run.png and /dev/null differ diff --git a/notebooks/scenarios/bigquery/000-start-and-configure-server-and-admins.ipynb b/notebooks/scenarios/bigquery/000-start-and-configure-server-and-admins.ipynb new file mode 100644 index 00000000000..61211d1506e --- /dev/null +++ b/notebooks/scenarios/bigquery/000-start-and-configure-server-and-admins.ipynb @@ -0,0 +1,289 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# # stdlib\n", + "# import os\n", + "\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from os import environ as env\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.server.credentials import SyftVerifyKey\n", + "from syft.util.test_helpers.checkpoint import create_checkpoint\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# when in k8s these are the default values\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " reset=True,\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_client = server.login(email=ROOT_EMAIL, password=ROOT_PASSWORD)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_client.users" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Verify we cannot update the root role or verify key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_admin_id = root_client.users.search(email=ROOT_EMAIL)[0].id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException):\n", + " root_client.users.update(uid=root_admin_id, role=\"guest\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException):\n", + " root_client.users.update(\n", + " uid=root_admin_id, verify_key=SyftVerifyKey.from_string(\"0\" * 64)\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create new admin client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create\n", + "root_client.register(\n", + " name=\"second admin\", email=ADMIN_EMAIL, password=ADMIN_PW, password_verify=ADMIN_PW\n", + ")\n", + "# update role\n", + "new_user_id = root_client.users.search(email=ADMIN_EMAIL)[0].id\n", + "root_client.users.update(uid=new_user_id, role=\"admin\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# We cannot delete the root client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_admin_id = root_client.users.search(email=ROOT_EMAIL)[0].id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException):\n", + " high_client.users.delete(root_admin_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create ephemeral admin and delete it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# register 2nd new admin (to delete)\n", + "user_email, user_pw = \"admin3@bigquery.org\", \"bqpw3\"\n", + "## create\n", + "root_client.register(\n", + " name=\"x\", email=user_email, password=user_pw, password_verify=user_pw\n", + ")\n", + "## update role\n", + "new_user_id2 = root_client.users.search(email=user_email)[0].id\n", + "root_client.users.update(uid=new_user_id, role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_client.users.delete(new_user_id2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "create_checkpoint(name=\"000-start-and-config\", client=root_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/001-scale-delete-worker-pools.ipynb b/notebooks/scenarios/bigquery/001-scale-delete-worker-pools.ipynb new file mode 100644 index 00000000000..65ad4ae6dde --- /dev/null +++ b/notebooks/scenarios/bigquery/001-scale-delete-worker-pools.ipynb @@ -0,0 +1,383 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "from os import environ as env\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.checkpoint import load_from_checkpoint\n", + "from syft.util.test_helpers.email_helpers import Timeout\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "\n", + "num_workers = int(os.environ.get(\"NUM_TEST_WORKERS\", 1))\n", + "\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"\n", + "environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=num_workers, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + " log_level=10,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "load_from_checkpoint(\n", + " name=\"000-start-and-config\",\n", + " client=server.client,\n", + " root_email=ROOT_EMAIL,\n", + " root_password=ROOT_PASSWORD,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ROOT_EMAIL, password=ROOT_PASSWORD\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "high_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "default_worker_pool = high_client.worker_pools.get_by_name(\"default-pool\")\n", + "default_worker_pool" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "### Scale Worker pool" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "##### Scale up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale to 1\n", + "if environment == \"remote\":\n", + " high_client.api.worker_pool.scale(\n", + " number=num_workers, pool_name=default_worker_pool.name\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.worker_pool[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale up workers\n", + "if environment == \"remote\":\n", + " scale_up_result = high_client.api.worker_pool.scale(\n", + " number=5, pool_name=default_worker_pool.name\n", + " )\n", + " if environment == \"remote\":\n", + " assert scale_up_result, scale_up_result\n", + "\n", + " assert (\n", + " high_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == 5\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "##### Scale down" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale down workers, this gracefully shutdowns the consumers\n", + "if environment == \"remote\":\n", + " scale_down_result = high_client.api.worker_pool.scale(\n", + " number=num_workers, pool_name=default_worker_pool.name\n", + " )\n", + " assert scale_down_result, scale_down_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + "\n", + " def has_worker_scaled_down():\n", + " return (\n", + " high_client.api.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + " )\n", + "\n", + " worker_scale_timeout = Timeout(timeout_duration=20)\n", + " worker_scale_timeout.run_with_timeout(has_worker_scaled_down)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " assert (\n", + " high_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "#### Delete Worker Pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "pool_delete_result = high_client.api.services.worker_pool.delete(\n", + " pool_name=default_worker_pool.name\n", + ")\n", + "pool_delete_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(KeyError):\n", + " _ = high_client.api.services.worker_pool[default_worker_pool.name]" + ] + }, + { + "cell_type": "markdown", + "id": "23", + "metadata": {}, + "source": [ + "#### Re-launch the default worker pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "default_worker_image = default_worker_pool.image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "launch_result = high_client.api.services.worker_pool.launch(\n", + " pool_name=default_worker_pool.name,\n", + " image_uid=default_worker_image.id,\n", + " num_workers=num_workers,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "assert high_client.api.services.worker_pool[default_worker_pool.name]\n", + "assert (\n", + " high_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/010-setup-bigquery-pool.ipynb b/notebooks/scenarios/bigquery/010-setup-bigquery-pool.ipynb new file mode 100644 index 00000000000..b478a79f27f --- /dev/null +++ b/notebooks/scenarios/bigquery/010-setup-bigquery-pool.ipynb @@ -0,0 +1,544 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "environment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "_, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Submit images and build pools" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add registry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "external_registry = test_settings.get(\"external_registry\", default=\"docker.io\")\n", + "external_registry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = high_client.api.services.image_registry.add(external_registry)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "image_registry_list = high_client.api.services.image_registry.get_all()\n", + "image_registry_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "local_registry = image_registry_list[0]\n", + "local_registry" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Upload Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_worker_image = next(\n", + " (\n", + " image\n", + " for image in dockerfile_list\n", + " if image.is_prebuilt and \"syft-backend\" in str(image.config)\n", + " ),\n", + " None,\n", + ")\n", + "base_worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_worker_image.image_identifier" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_dockerfile = f\"\"\"\n", + "FROM {str(base_worker_image.image_identifier)}\n", + "\n", + "RUN uv pip install db-dtypes google-cloud-bigquery\n", + "\n", + "\"\"\".strip()\n", + "worker_dockerfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docker_config = sy.DockerWorkerConfig(dockerfile=worker_dockerfile)\n", + "assert docker_config.dockerfile == worker_dockerfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submit_result = high_client.api.services.worker_image.submit(\n", + " worker_config=docker_config\n", + ")\n", + "submit_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_image = next(\n", + " (\n", + " image\n", + " for image in dockerfile_list\n", + " if not image.is_prebuilt and image.config.dockerfile == worker_dockerfile\n", + " ),\n", + " None,\n", + ")\n", + "worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(worker_image.config.dockerfile)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# worker_docker_tag = f\"openmined/bigquery:{sy.__version__}\"\n", + "# worker_docker_tag" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Build image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docker_tag = str(base_worker_image.image_identifier).replace(\n", + " \"backend\", \"worker-bigquery\"\n", + ")\n", + "docker_tag" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " docker_build_result = high_client.api.services.worker_image.build(\n", + " image_uid=worker_image.id,\n", + " tag=docker_tag,\n", + " registry_uid=local_registry.id,\n", + " )\n", + " print(docker_build_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " push_result = high_client.api.services.worker_image.push(worker_image.id)\n", + " print(push_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docker_config = sy.PrebuiltWorkerConfig(tag=docker_tag)\n", + "docker_config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# overwrite it for now Postgres ignore\n", + "result = high_client.api.services.worker_image.submit(worker_config=docker_config)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: fix\n", + "# something is wrong here, sometimes it has the non prebuilt one\n", + "# other times it only has the one we built; in python there are multiple\n", + "# for now lets just use which ever one has worker-bigquery in its\n", + "# identifier so we can create a k8s worker pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_image = next(\n", + " (\n", + " image\n", + " for image in dockerfile_list\n", + " if image.is_prebuilt and \"worker-bigquery\" in str(image.image_identifier)\n", + " ),\n", + " None,\n", + ")\n", + "worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool_name = \"bigquery-pool\"\n", + "custom_pool_pod_annotations = {\"bigquery-custom-pool\": \"Pod annotation for bigquery\"}\n", + "custom_pool_pod_labels = {\"bigquery-custom-pool\": \"Pod_label_for_bigquery\"}\n", + "\n", + "num_workers = int(os.environ.get(\"NUM_TEST_WORKERS\", 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Launch pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = high_client.api.services.worker_pool.launch(\n", + " pool_name=worker_pool_name,\n", + " image_uid=worker_image.id,\n", + " num_workers=1,\n", + " pod_annotations=custom_pool_pod_annotations,\n", + " pod_labels=custom_pool_pod_labels,\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Scale pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " result = high_client.worker_pools.scale(\n", + " number=num_workers, pool_name=worker_pool_name\n", + " )\n", + " print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.api.services.user.get_all()) == 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/011-users-emails-passwords.ipynb b/notebooks/scenarios/bigquery/011-users-emails-passwords.ipynb new file mode 100644 index 00000000000..9a8bfdcdf9c --- /dev/null +++ b/notebooks/scenarios/bigquery/011-users-emails-passwords.ipynb @@ -0,0 +1,650 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"\n", + "# # !pip install aiosmtpd\n", + "# # !uv pip install aiosmtpd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import SENDER\n", + "from syft.util.test_helpers.email_helpers import create_user\n", + "from syft.util.test_helpers.email_helpers import get_email_server\n", + "from syft.util.test_helpers.email_helpers import make_user\n", + "from syft.util.test_helpers.email_helpers import save_users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "environment" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "high_client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: this should show SyftSuccess?\n", + "high_client.api.services.settings.enable_notifications(\n", + " email_sender=SENDER,\n", + " email_server=\"localhost\",\n", + " email_port=\"9025\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# # TODO: this should show SyftSuccess?\n", + "# high_client.api.services.settings.disable_notifications()" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "# Register users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "num_users = int(os.environ.get(\"NUM_TEST_USERS\", 5))\n", + "print(f\"registering {num_users} users\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "users = []\n", + "email_disable_index = 0\n", + "reset_password_index = 1\n", + "for i in range(num_users):\n", + " user = make_user()\n", + " user._email_server = email_server\n", + " create_user(high_client, user)\n", + " user.client = high_client\n", + " if email_disable_index == i:\n", + " user.email_disabled = True\n", + " if reset_password_index == i:\n", + " user.reset_password = True\n", + " users.append(user)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "save_users(users)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import asyncio\n", + "\n", + "await asyncio.sleep(5)" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "## Verify Emails are sent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# everyone gets a welcome email\n", + "server_name = high_client.name\n", + "for user in users:\n", + " emails = user.emails\n", + " assert len(emails) == 1\n", + " welcome_email = user.emails[0]\n", + " assert welcome_email.email_from == SENDER\n", + " assert len(welcome_email.email_to) == 1\n", + " assert welcome_email.email_to[0] == user.email\n", + " assert f\"Welcome to {server_name}\" in welcome_email.email_content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "# one user disables notifications\n", + "# one user requests a password reset\n", + "no_email_user = None\n", + "reset_password_user = None\n", + "for user in users:\n", + " user.client = high_client # get user client\n", + " if user.email_disabled:\n", + " no_email_user = user\n", + " # disable for this user only\n", + " user.client.api.notifications.deactivate()\n", + "\n", + " if user.reset_password:\n", + " # ask admin for forgot password flow\n", + " user.client.guest().forgot_password(email=user.email)\n", + " assert \"Password Reset Requested\" in user.emails[1].email_content\n", + " reset_password_user = user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "ds0 = users[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "ds0_user = ds0.client.account\n", + "ds0_user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# with sy.raises(\n", + "# sy.SyftException(public_message=\"*tried to update user*\"\n", + "# ), show=True): this is different on k8s no idea why\n", + "with sy.raises(sy.SyftException, show=True):\n", + " ds0.client.users.update(uid=ds0_user.id, role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "# with sy.raises(sy.SyftException(public_message=\"*tried to update user*\"), show=True):\n", + "with sy.raises(sy.SyftException, show=True):\n", + " ds0_user.update(role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: test disabling and re-enabling all notifications" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# high_client.api.services.settings.disable_notifications()\n", + "# high_client.api.services.settings.enable_notifications()" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## Test reset password" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "# This is necessary as it sets the new token value in user.reset_token\n", + "token = reset_password_user.get_token()\n", + "token" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "# This is necessary as it sets the new password value in user.new_password\n", + "passw = reset_password_user.make_new_password()\n", + "passw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "assert token\n", + "assert passw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.reset_token, new_password=reset_password_user.new_password\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "reset_password_user.relogin()\n", + "# reset_password_user.client = reset_password_user.client" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "## Reset password second time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.client.guest().forgot_password(email=reset_password_user.email)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.get_token(),\n", + " new_password=reset_password_user.make_new_password(),\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "# print(f\"token:\\t\\t {reset_password_user.reset_token}\\n\\\n", + "# password:\\t {reset_password_user.password}\\n\\\n", + "# new password:\\t {reset_password_user.new_password}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.update_password()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "# print(f\"token:\\t\\t {reset_password_user.reset_token}\\n\\\n", + "# password:\\t {reset_password_user.password}\\n\\\n", + "# new password:\\t {reset_password_user.new_password}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "reset_password_user.relogin()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [ + "save_users(users)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user" + ] + }, + { + "cell_type": "markdown", + "id": "41", + "metadata": {}, + "source": [ + "## Reduce token expiration and try resetting" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42", + "metadata": {}, + "outputs": [], + "source": [ + "# Variable is poorly named, token expiration time is in seconds and not minutes\n", + "high_client.api.services.settings.update(pwd_token_config={\"token_exp_min\": 3})\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.client.guest().forgot_password(email=reset_password_user.email)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44", + "metadata": {}, + "outputs": [], + "source": [ + "# Wait 3 seconds to ensure token expires\n", + "await asyncio.sleep(3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45", + "metadata": {}, + "outputs": [], + "source": [ + "# This should throw a SyftError because we waited too long\n", + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.get_token(),\n", + " new_password=reset_password_user.make_new_password(),\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftError)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "with sy.raises(sy.SyftException, show=True):\n", + " reset_password_user.relogin()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48", + "metadata": {}, + "outputs": [], + "source": [ + "# Set things back to the the default settings\n", + "high_client.api.services.settings.update(pwd_token_config={\"token_exp_min\": 1800})\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "markdown", + "id": "49", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/020-configure-api.ipynb b/notebooks/scenarios/bigquery/020-configure-api.ipynb new file mode 100644 index 00000000000..7e5ddcde2e6 --- /dev/null +++ b/notebooks/scenarios/bigquery/020-configure-api.ipynb @@ -0,0 +1,632 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# set to use the live APIs\n", + "# os.environ[\"TEST_BIGQUERY_APIS_LIVE\"] = \"True\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings\n", + "from syft.util.test_helpers.apis import make_schema\n", + "from syft.util.test_helpers.apis import make_submit_query\n", + "from syft.util.test_helpers.apis import make_test_query\n", + "\n", + "# run email server\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "this_worker_pool_name = \"bigquery-pool\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create `test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip list | grep bigquery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install db-dtypes google-cloud-bigquery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Look up the worker pools and identify the name of the one that has the required packages\n", + "# After, bind the endpoint to that workerpool\n", + "high_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mock_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": True,\n", + " \"calls_per_min\": 10,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "private_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": False,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_endpoint = sy.TwinAPIEndpoint(\n", + " path=\"bigquery.test_query\",\n", + " description=\"This endpoint allows to query Bigquery storage via SQL queries.\",\n", + " private_function=private_func,\n", + " mock_function=mock_func,\n", + " worker_pool_name=this_worker_pool_name,\n", + ")\n", + "\n", + "high_client.custom_api.add(endpoint=new_endpoint)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Update `test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Here, we update the endpoint to timeout after 100s (rather the default of 60s)\n", + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", endpoint_timeout=120\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test `test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_1 = test_settings.get(\"dataset_1\", default=\"dataset_1\")\n", + "dataset_2 = test_settings.get(\"dataset_2\", default=\"dataset_2\")\n", + "table_1 = test_settings.get(\"table_1\", default=\"table_1\")\n", + "table_2 = test_settings.get(\"table_2\", default=\"table_2\")\n", + "table_2_col_id = test_settings.get(\"table_2_col_id\", default=\"table_id\")\n", + "table_2_col_score = test_settings.get(\"table_2_col_score\", default=\"colname\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version\n", + "result = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test private version\n", + "result = high_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version for wrong queries\n", + "with sy.raises(\n", + " sy.SyftException(public_message=\"*must be qualified with a dataset*\"), show=True\n", + "):\n", + " _ = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=\"SELECT * FROM invalid_table LIMIT 1\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test private version\n", + "result = high_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 1\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inspect endpoint state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Inspect the context state on an endpoint\n", + "state = high_client.api.services.bigquery.test_query.mock.context.state\n", + "state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(state[ADMIN_EMAIL])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(state[ADMIN_EMAIL]) >= 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create `schema` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "schema_function = make_schema(\n", + " settings={\n", + " \"calls_per_min\": 5,\n", + " },\n", + " worker_pool_name=this_worker_pool_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=schema_function)\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test `schema` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Testing schema\n", + "result = high_client.api.services.bigquery.schema()\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 23" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create `submit_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submit_query_function = make_submit_query(\n", + " settings={}, worker_pool_name=this_worker_pool_name\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=submit_query_function)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.submit_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.api_endpoints()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.bigquery.test_query" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.bigquery.submit_query" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test `submit_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Testing submit query\n", + "result = high_client.api.services.bigquery.submit_query(\n", + " func_name=\"my_func\",\n", + " query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 1\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test emails" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " \"Job Failed\"\n", + " in email_server.get_emails_for_user(user_email=ADMIN_EMAIL)[0].email_content\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server.get_emails_for_user(user_email=\"admin@bigquery.org\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: change this to be all admins or configure which ones etc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(email_server.get_emails_for_user(user_email=\"admin@bigquery.org\")) > 0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"Query submitted\" in result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/021-create-jobs.ipynb b/notebooks/scenarios/bigquery/021-create-jobs.ipynb new file mode 100644 index 00000000000..392103a751c --- /dev/null +++ b/notebooks/scenarios/bigquery/021-create-jobs.ipynb @@ -0,0 +1,471 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "# import os\n", + "\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# use_live_bigquery = False\n", + "# os.environ[\"TEST_BIGQUERY_APIS_LIVE\"] = str(use_live_bigquery)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from collections import Counter\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.service.job.job_stash import JobStatus\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "SERVER_PORT = \"8080\"\n", + "SERVER_URL = f\"http://localhost:{SERVER_PORT}\"\n", + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=SERVER_PORT,\n", + " n_consumers=4, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.util.test_helpers.email_helpers import load_users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(high_client)" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "# Create jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.util.test_helpers.job_helpers import TestJob\n", + "from syft.util.test_helpers.job_helpers import create_jobs\n", + "from syft.util.test_helpers.job_helpers import extract_code_path" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "# Inspect job data (requests for these jobs to be created)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "num_jobs = int(os.environ.get(\"NUM_TEST_JOBS\", 10))\n", + "\n", + "jobs_data = create_jobs(users, total_jobs=num_jobs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "counts = Counter([j.job_type for j in jobs_data])\n", + "for k, v in counts.most_common():\n", + " print(f\"{k}: #{v}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.util.test_helpers.job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"{len(jobs_data)=}\")\n", + "\n", + "for job in jobs_data:\n", + " print(f\"{job.job_type=}, {job.should_succeed=}, {job.should_submit=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(jobs_data) == num_jobs\n", + "assert all(isinstance(j, TestJob) for j in jobs_data)\n", + "assert all(job.client is not None for job in jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "# Submit jobs\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "admin_emails_before = len(email_server.get_emails_for_user(\"admin@bigquery.org\"))\n", + "print(f\"{admin_emails_before=}\")" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "## Test Succesful jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "jobs_submit_should_succeed = [j for j in jobs_data if j.should_submit]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_submit_should_succeed:\n", + " client = job.client\n", + " response = client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + " )\n", + " job.code_path = extract_code_path(response)" + ] + }, + { + "cell_type": "markdown", + "id": "25", + "metadata": {}, + "source": [ + "## Test failures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "jobs_submit_should_fail = [j for j in jobs_data if not j.should_submit]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_submit_should_fail:\n", + " client = job.client\n", + "\n", + " with sy.raises(sy.SyftException):\n", + " client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_data:\n", + " print(f\"Job {job.func_name:.20} {job.should_submit=}, {job.is_submitted=}\")\n", + "\n", + "assert all(job.is_submitted == job.should_submit for job in jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "id": "30", + "metadata": {}, + "source": [ + "## Test: cannot execute submitted jobs yet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "submitted_jobs = [job for job in jobs_data if job.should_submit]\n", + "job_execution_fns = [getattr(job.client.code, job.code_path) for job in submitted_jobs]\n", + "assert len(submitted_jobs) # failsafe for next tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "for fn in job_execution_fns:\n", + " # blocking\n", + " with sy.raises(\n", + " sy.SyftException(public_message=\"*Your code is waiting for approval*\")\n", + " ):\n", + " result = fn()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "for fn in job_execution_fns:\n", + " # nonblocking\n", + " result_job = fn(blocking=False)\n", + " result_job.wait()\n", + " assert isinstance(result_job.result, sy.SyftError)\n", + " assert result_job.status == JobStatus.ERRORED" + ] + }, + { + "cell_type": "markdown", + "id": "34", + "metadata": {}, + "source": [ + "# Verify that admin has emails for submitted requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "num_should_submit = sum(j.should_submit for j in jobs_data)\n", + "admin_emails_after = len(email_server.get_emails_for_user(\"admin@bigquery.org\"))\n", + "print(\"admin emails after\", admin_emails_after)\n", + "assert admin_emails_after >= admin_emails_before + num_should_submit\n", + "# assert len(users_emails) > after_number_of_emails\n", + "# assert len(users_emails) == after_number_of_emails + 1" + ] + }, + { + "cell_type": "markdown", + "id": "36", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/040-do-review-requests.ipynb b/notebooks/scenarios/bigquery/040-do-review-requests.ipynb new file mode 100644 index 00000000000..08eb24f2fda --- /dev/null +++ b/notebooks/scenarios/bigquery/040-do-review-requests.ipynb @@ -0,0 +1,354 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from collections import Counter\n", + "import random\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.service.job.job_stash import Job\n", + "from syft.util.test_helpers.email_helpers import get_email_server\n", + "from syft.util.test_helpers.job_helpers import approve_by_running\n", + "from syft.util.test_helpers.job_helpers import get_job_emails\n", + "from syft.util.test_helpers.job_helpers import get_request_for_job_info" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Start server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Review requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.util.test_helpers.email_helpers import load_users\n", + "from syft.util.test_helpers.job_helpers import load_jobs\n", + "from syft.util.test_helpers.job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.requests.get_all_pending()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(high_client)\n", + "jobs_data = load_jobs(users, high_client)\n", + "all_requests = high_client.requests\n", + "submitted_jobs_data = [job for job in jobs_data if job.is_submitted]\n", + "n_emails_per_job_user = {\n", + " k: len(v)\n", + " for k, v in get_job_emails(submitted_jobs_data, high_client, email_server).items()\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO we should record whether it was approved or deposited\n", + "# and test doing both in either order as there might be a bug when\n", + "# force overwriting\n", + "# also changing deny to approve and back again" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run or deny" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submitted_jobs_data_should_succeed = [\n", + " j for j in submitted_jobs_data if j.should_succeed\n", + "]\n", + "submitted_jobs_data_should_fail = [\n", + " j for j in submitted_jobs_data if not j.should_succeed\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_succeed:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " if random.randrange(2):\n", + " choice = \"approved with deposit_result\"\n", + " response = approve_by_running(request)\n", + " assert isinstance(response, Job)\n", + " else:\n", + " choice = \"approved\"\n", + " response = request.approve()\n", + " assert isinstance(response, sy.SyftSuccess)\n", + " print(f\"Job {job.func_name} should succeed: {job.should_succeed} and was {choice}\")\n", + " job.admin_reviewed = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_fail:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " response = request.deny(\n", + " reason=f\"Your request {job.func_name} looks wrong, try again.\"\n", + " )\n", + " assert isinstance(response, sy.SyftSuccess)\n", + " assert not job.should_succeed\n", + " job.admin_reviewed = True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Verify that users have new emails" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_n_emails_per_job_user = {\n", + " k: len(v)\n", + " for k, v in get_job_emails(submitted_jobs_data, high_client, email_server).items()\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_emails = get_job_emails(submitted_jobs_data, high_client, email_server)\n", + "rejected_email_counts = {\n", + " k: sum(\"rejected\" in email[\"email_content\"].lower() for email in v)\n", + " for k, v in job_emails.items()\n", + "}\n", + "approved_email_counts = {\n", + " k: sum(\"approved\" in email[\"email_content\"].lower() for email in v)\n", + " for k, v in job_emails.items()\n", + "}\n", + "expected_rejected_email_counts = Counter(\n", + " job.user_email for job in submitted_jobs_data_should_fail\n", + ")\n", + "\n", + "expected_approved_email_counts = Counter(\n", + " job.user_email for job in submitted_jobs_data_should_succeed\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# if user's email notifications are enabled,\n", + "# should have received either approved or rejected email\n", + "for user_email, new_count in new_n_emails_per_job_user.items():\n", + " user = [u for u in users if u.email == user_email][0]\n", + " old_count = n_emails_per_job_user[user_email]\n", + " if not user.email_disabled:\n", + " # greater than or equal to since duplicates can happen\n", + " assert new_count > old_count\n", + " assert rejected_email_counts.get(\n", + " user_email, 0\n", + " ) >= expected_rejected_email_counts.get(user_email, 0)\n", + " assert approved_email_counts.get(\n", + " user_email, 0\n", + " ) >= expected_approved_email_counts.get(user_email, 0)\n", + " else:\n", + " assert new_count == old_count" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Save state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.requests.get_all_approved()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.requests.get_all_rejected()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/050-ds-get-results.ipynb b/notebooks/scenarios/bigquery/050-ds-get-results.ipynb new file mode 100644 index 00000000000..35791771b2f --- /dev/null +++ b/notebooks/scenarios/bigquery/050-ds-get-results.ipynb @@ -0,0 +1,232 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import get_email_server\n", + "from syft.util.test_helpers.email_helpers import load_users\n", + "from syft.util.test_helpers.job_helpers import load_jobs\n", + "from syft.util.test_helpers.job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Download results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(high_client)\n", + "jobs = load_jobs(users, high_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# submitted_jobs = [job for job in jobs if job.is_submitted]\n", + "reviewed_jobs = [job for job in jobs if job.admin_reviewed]\n", + "reviewed_jobs_should_succeed = [j for j in reviewed_jobs if j.should_succeed]\n", + "reviewed_jobs_should_fail = [j for j in reviewed_jobs if not j.should_succeed]\n", + "\n", + "print(\n", + " f\"{len(reviewed_jobs)=}, {len(reviewed_jobs_should_succeed)=}, {len(reviewed_jobs_should_fail)=}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: test jobs that were never approved\n", + "# they seem to give weird errors like\n", + "# \"You uploaded an ActionObject that is not yet in the blob storage\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_succeed:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + "\n", + " if isinstance(res, sy.SyftError):\n", + " raise sy.SyftException(public_message=\"Expected success, got error\")\n", + "\n", + " result = res.get()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_fail:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + "\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + " if isinstance(res, sy.SyftError):\n", + " job.result_as_expected = True\n", + " else:\n", + " raise sy.SyftException(public_message=f\"failed, job didnt raise {type(j)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO fix\n", + "expected_jobs = [job for job in jobs if job.result_as_expected]\n", + "print(f\"got expected_jobs: {len(expected_jobs)} == reviewed_jobs: {len(reviewed_jobs)}\")\n", + "assert len(reviewed_jobs) == len(expected_jobs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/packages/grid/backend/grid/logger/__init__.py b/notebooks/scenarios/bigquery/README.md similarity index 100% rename from packages/grid/backend/grid/logger/__init__.py rename to notebooks/scenarios/bigquery/README.md diff --git a/packages/hagrid/tests/__init__.py b/notebooks/scenarios/bigquery/__init__.py similarity index 100% rename from packages/hagrid/tests/__init__.py rename to notebooks/scenarios/bigquery/__init__.py diff --git a/notebooks/scenarios/bigquery/sync/000-start-and-configure-server-and-admins.ipynb b/notebooks/scenarios/bigquery/sync/000-start-and-configure-server-and-admins.ipynb new file mode 100644 index 00000000000..4b2448d4372 --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/000-start-and-configure-server-and-admins.ipynb @@ -0,0 +1,253 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from os import environ as env\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "environment = env.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "high_port = env.get(\"CLUSTER_HTTP_PORT_HIGH\", \"9081\")\n", + "low_port = env.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, high_port, low_port)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# when in k8s these are the default values\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " reset=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " reset=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=high_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_client_low = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ROOT_EMAIL, password=ROOT_PASSWORD\n", + ")\n", + "root_client_high = sy.login(\n", + " url=f\"http://localhost:{high_port}\", email=ROOT_EMAIL, password=ROOT_PASSWORD\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create new admin client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create admin account on low side\n", + "root_client_low.register(\n", + " name=\"second admin\", email=ADMIN_EMAIL, password=ADMIN_PW, password_verify=ADMIN_PW\n", + ")\n", + "# update role\n", + "new_user_id = root_client_low.users.search(email=ADMIN_EMAIL)[0].id\n", + "root_client_low.users.update(uid=new_user_id, role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create admin account on high side\n", + "root_client_high.register(\n", + " name=\"second admin\", email=ADMIN_EMAIL, password=ADMIN_PW, password_verify=ADMIN_PW\n", + ")\n", + "# update role\n", + "new_user_id = root_client_high.users.search(email=ADMIN_EMAIL)[0].id\n", + "root_client_high.users.update(uid=new_user_id, role=\"admin\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# We cannot delete the root client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_admin_id = low_client.users.search(email=ROOT_EMAIL)[0].id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException):\n", + " low_client.users.delete(root_admin_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create ephemeral admin and delete it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# register 2nd new admin (to delete)\n", + "user_email, user_pw = \"admin3@bigquery.org\", \"bqpw3\"\n", + "## create\n", + "root_client_low.register(\n", + " name=\"x\", email=user_email, password=user_pw, password_verify=user_pw\n", + ")\n", + "## update role\n", + "new_user_id2 = root_client_low.users.search(email=user_email)[0].id\n", + "root_client_low.users.update(uid=new_user_id2, role=\"admin\")\n", + "\n", + "# delete\n", + "root_client_low.users.delete(new_user_id2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_high.land()\n", + " server_low.land()\n", + "smtp_server.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/sync/001-scale-delete-worker-pools.ipynb b/notebooks/scenarios/bigquery/sync/001-scale-delete-worker-pools.ipynb new file mode 100644 index 00000000000..74e6612cb39 --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/001-scale-delete-worker-pools.ipynb @@ -0,0 +1,337 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import Timeout" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "high_port = os.environ.get(\"CLUSTER_HTTP_PORT_HIGH\", \"9081\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, high_port, low_port)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "num_workers = int(os.environ.get(\"NUM_TEST_WORKERS\", 1))\n", + "\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "### Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ROOT_EMAIL, password=ROOT_PASSWORD\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(low_client.worker_pools.get_all()) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "default_worker_pool = low_client.worker_pools.get_by_name(\"default-pool\")\n", + "default_worker_pool" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "### Scale Worker pool" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "##### Scale up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale to 1\n", + "if environment == \"remote\":\n", + " low_client.api.worker_pool.scale(\n", + " number=num_workers, pool_name=default_worker_pool.name\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "low_client.api.services.worker_pool[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale up workers\n", + "if environment == \"remote\":\n", + " scale_up_result = low_client.api.worker_pool.scale(\n", + " number=5, pool_name=default_worker_pool.name\n", + " )\n", + " if environment == \"remote\":\n", + " assert scale_up_result, scale_up_result\n", + "\n", + " assert (\n", + " low_client.api.services.worker_pool[default_worker_pool.name].max_count == 5\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "##### Scale down" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale down workers, this gracefully shutdowns the consumers\n", + "if environment == \"remote\":\n", + " scale_down_result = low_client.api.worker_pool.scale(\n", + " number=num_workers, pool_name=default_worker_pool.name\n", + " )\n", + " assert scale_down_result, scale_down_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + "\n", + " def has_worker_scaled_down():\n", + " return (\n", + " low_client.api.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + " )\n", + "\n", + " worker_scale_timeout = Timeout(timeout_duration=20)\n", + " worker_scale_timeout.run_with_timeout(has_worker_scaled_down)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " assert (\n", + " low_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "#### Delete Worker Pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "pool_delete_result = low_client.api.services.worker_pool.delete(\n", + " pool_name=default_worker_pool.name\n", + ")\n", + "pool_delete_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(KeyError):\n", + " _ = low_client.api.services.worker_pool[default_worker_pool.name]" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "#### Re-launch the default worker pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "default_worker_image = default_worker_pool.image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "launch_result = low_client.api.services.worker_pool.launch(\n", + " pool_name=default_worker_pool.name,\n", + " image_uid=default_worker_image.id,\n", + " num_workers=num_workers,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "assert low_client.api.services.worker_pool[default_worker_pool.name]\n", + "assert (\n", + " low_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/sync/010-setup-bigquery-pool.ipynb b/notebooks/scenarios/bigquery/sync/010-setup-bigquery-pool.ipynb new file mode 100644 index 00000000000..5cf5457e5bb --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/010-setup-bigquery-pool.ipynb @@ -0,0 +1,296 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings\n", + "from syft.util.test_helpers.worker_helpers import (\n", + " build_and_launch_worker_pool_from_docker_str,\n", + ")\n", + "from syft.util.test_helpers.worker_helpers import (\n", + " launch_worker_pool_from_docker_tag_and_registry,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "high_port = os.environ.get(\"CLUSTER_HTTP_PORT_HIGH\", \"9081\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, high_port, low_port)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=high_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")\n", + "high_client = sy.login(\n", + " url=f\"http://localhost:{high_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup High First\n", + "\n", + "- If using an external registery, we want to get this from the test_settings.\n", + "- We build the docker image over the base docker image in Syft\n", + "- We give a tag called worker-bigquery to our custom pool image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "external_registry = test_settings.get(\"external_registry\", default=\"docker.io\")\n", + "\n", + "base_worker_image = high_client.images.get_all()[0]\n", + "\n", + "worker_dockerfile = f\"\"\"\n", + "FROM {str(base_worker_image.image_identifier)}\n", + "\n", + "RUN uv pip install db-dtypes google-cloud-bigquery \n", + "\n", + "\"\"\".strip()\n", + "\n", + "docker_tag = str(base_worker_image.image_identifier).replace(\n", + " \"backend\", \"worker-bigquery\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool_name = \"bigquery-pool\"\n", + "custom_pool_pod_annotations = {\"bigquery-custom-pool\": \"Pod annotation for bigquery\"}\n", + "custom_pool_pod_labels = {\"bigquery-custom-pool\": \"Pod_label_for_bigquery\"}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "build_and_launch_worker_pool_from_docker_str(\n", + " environment=environment,\n", + " client=high_client,\n", + " worker_pool_name=worker_pool_name,\n", + " custom_pool_pod_annotations=custom_pool_pod_annotations,\n", + " custom_pool_pod_labels=custom_pool_pod_labels,\n", + " worker_dockerfile=worker_dockerfile,\n", + " external_registry=external_registry,\n", + " docker_tag=docker_tag,\n", + " scale_to=2,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup Low" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "launch_result = launch_worker_pool_from_docker_tag_and_registry(\n", + " environment=environment,\n", + " client=low_client,\n", + " worker_pool_name=worker_pool_name,\n", + " custom_pool_pod_annotations=custom_pool_pod_annotations,\n", + " custom_pool_pod_labels=custom_pool_pod_labels,\n", + " docker_tag=docker_tag,\n", + " external_registry=external_registry,\n", + " scale_to=1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(low_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Register a DS only on the low side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client.register(\n", + " email=\"data_scientist@openmined.org\",\n", + " password=\"verysecurepassword\",\n", + " password_verify=\"verysecurepassword\",\n", + " name=\"John Doe\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " len(low_client.api.services.user.get_all()) == 3\n", + "), \"Only DS and 2 Admin should be at low side\"\n", + "assert (\n", + " len(high_client.api.services.user.get_all()) == 2\n", + "), \"Only 2 Admin should be at high side\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Close" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_high.land()\n", + " server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/sync/011-users-emails-passwords.ipynb b/notebooks/scenarios/bigquery/sync/011-users-emails-passwords.ipynb new file mode 100644 index 00000000000..4dbc1effc0d --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/011-users-emails-passwords.ipynb @@ -0,0 +1,605 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import SENDER\n", + "from syft.util.test_helpers.email_helpers import create_user\n", + "from syft.util.test_helpers.email_helpers import get_email_server\n", + "from syft.util.test_helpers.email_helpers import make_user\n", + "from syft.util.test_helpers.email_helpers import save_users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "high_port = os.environ.get(\"CLUSTER_HTTP_PORT_HIGH\", \"9081\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, high_port, low_port)" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=high_port,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Email Server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")\n", + "high_client = sy.login(\n", + " url=f\"http://localhost:{high_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "low_client.api.services.settings.enable_notifications(\n", + " email_sender=SENDER,\n", + " email_server=\"localhost\",\n", + " email_port=\"9025\",\n", + ")\n", + "\n", + "high_client.api.services.settings.enable_notifications(\n", + " email_sender=SENDER,\n", + " email_server=\"localhost\",\n", + " email_port=\"9025\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "# Register users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "num_users = int(os.environ.get(\"NUM_TEST_USERS\", 5))\n", + "print(f\"registering {num_users} users\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "users = []\n", + "email_disable_index = 0\n", + "reset_password_index = 1\n", + "for i in range(num_users):\n", + " user = make_user()\n", + " user._email_server = email_server\n", + " create_user(low_client, user)\n", + " user.client = low_client\n", + " if email_disable_index == i:\n", + " user.email_disabled = True\n", + " if reset_password_index == i:\n", + " user.reset_password = True\n", + " users.append(user)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "save_users(users)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import asyncio\n", + "\n", + "await asyncio.sleep(5)" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Verify Emails are sent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# everyone gets a welcome email\n", + "server_name = low_client.name\n", + "for user in users:\n", + " emails = user.emails\n", + " assert len(emails) == 1\n", + " welcome_email = user.emails[0]\n", + " assert welcome_email.email_from == SENDER\n", + " assert len(welcome_email.email_to) == 1\n", + " assert welcome_email.email_to[0] == user.email\n", + " assert f\"Welcome to {server_name}\" in welcome_email.email_content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# one user disables notifications\n", + "# one user requests a password reset\n", + "no_email_user = None\n", + "reset_password_user = None\n", + "for user in users:\n", + " user.client = low_client # get user client\n", + " if user.email_disabled:\n", + " no_email_user = user\n", + " # disable for this user only\n", + " user.client.api.notifications.deactivate()\n", + "\n", + " if user.reset_password:\n", + " # ask admin for forgot password flow\n", + " user.client.guest().forgot_password(email=user.email)\n", + " assert \"Password Reset Requested\" in user.emails[1].email_content\n", + " reset_password_user = user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "ds0 = users[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "ds0_user = ds0.client.account\n", + "ds0_user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "# with sy.raises(\n", + "# sy.SyftException(public_message=\"*tried to update user*\"\n", + "# ), show=True): this is different on k8s no idea why\n", + "with sy.raises(sy.SyftException, show=True):\n", + " ds0.client.users.update(uid=ds0_user.id, role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "# with sy.raises(sy.SyftException(public_message=\"*tried to update user*\"), show=True):\n", + "with sy.raises(sy.SyftException, show=True):\n", + " ds0_user.update(role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: test disabling and re-enabling all notifications" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "## Test reset password" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "# This is necessary as it sets the new token value in user.reset_token\n", + "token = reset_password_user.get_token()\n", + "token" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# This is necessary as it sets the new password value in user.new_password\n", + "passw = reset_password_user.make_new_password()\n", + "passw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "assert token\n", + "assert passw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.reset_token, new_password=reset_password_user.new_password\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "reset_password_user.relogin()" + ] + }, + { + "cell_type": "markdown", + "id": "28", + "metadata": {}, + "source": [ + "## Reset password second time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.client.guest().forgot_password(email=reset_password_user.email)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.get_token(),\n", + " new_password=reset_password_user.make_new_password(),\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.update_password()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "reset_password_user.relogin()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "save_users(users)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user" + ] + }, + { + "cell_type": "markdown", + "id": "36", + "metadata": {}, + "source": [ + "## Reduce token expiration and try resetting" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "# Variable is poorly named, token expiration time is in seconds and not minutes\n", + "low_client.api.services.settings.update(pwd_token_config={\"token_exp_min\": 3})\n", + "low_client.refresh()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.client.guest().forgot_password(email=reset_password_user.email)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [ + "# Wait 3 seconds to ensure token expires\n", + "await asyncio.sleep(3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40", + "metadata": {}, + "outputs": [], + "source": [ + "# This should throw a SyftError because we waited too long\n", + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.get_token(),\n", + " new_password=reset_password_user.make_new_password(),\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftError)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "with sy.raises(sy.SyftException, show=True):\n", + " reset_password_user.relogin()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43", + "metadata": {}, + "outputs": [], + "source": [ + "# Set things back to the the default settings\n", + "low_client.api.services.settings.update(pwd_token_config={\"token_exp_min\": 1800})\n", + "low_client.refresh()" + ] + }, + { + "cell_type": "markdown", + "id": "44", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45", + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_high.land()\n", + " server_low.land()\n", + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/sync/020-configure-api-and-sync.ipynb b/notebooks/scenarios/bigquery/sync/020-configure-api-and-sync.ipynb new file mode 100644 index 00000000000..5cf59d85224 --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/020-configure-api-and-sync.ipynb @@ -0,0 +1,675 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# third party\n", + "# set to use the live APIs\n", + "# os.environ[\"TEST_BIGQUERY_APIS_LIVE\"] = \"True\"\n", + "import pandas as pd\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings\n", + "from syft.client.syncing import compare_clients\n", + "from syft.util.test_helpers.apis import make_schema\n", + "from syft.util.test_helpers.apis import make_submit_query\n", + "from syft.util.test_helpers.apis import make_test_query\n", + "from syft.util.test_helpers.email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "ROOT_EMAIL, ROOT_PW = \"admin@bigquery.org\", \"bqpw\"\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "high_port = os.environ.get(\"CLUSTER_HTTP_PORT_HIGH\", \"9081\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, high_port, low_port)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server and login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=high_port,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Email Server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.controller" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")\n", + "high_client = sy.login(\n", + " url=f\"http://localhost:{high_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2\n", + "assert len(low_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "this_worker_pool_name = \"bigquery-pool\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load database information from test_settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_1 = test_settings.get(\"dataset_1\", default=\"dataset_1\")\n", + "dataset_2 = test_settings.get(\"dataset_2\", default=\"dataset_2\")\n", + "table_1 = test_settings.get(\"table_1\", default=\"table_1\")\n", + "table_2 = test_settings.get(\"table_2\", default=\"table_2\")\n", + "table_2_col_id = test_settings.get(\"table_2_col_id\", default=\"table_id\")\n", + "table_2_col_score = test_settings.get(\"table_2_col_score\", default=\"colname\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create and test different endpoints" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "----" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create `biquery.schema` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "schema_function = make_schema(\n", + " settings={\n", + " \"calls_per_min\": 5,\n", + " },\n", + " worker_pool_name=this_worker_pool_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=schema_function)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = high_client.api.services.bigquery.schema()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 23" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODO: Note that when we do not create a job, the type of result is `syft.service.action.pandas.PandasDataFrameObject` and not pandas but the `.get()` method will get you the expected answer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.service.action.pandas import PandasDataFrameObject\n", + "\n", + "# assert isinstance(result, pd.DataFrame)\n", + "assert isinstance(result, PandasDataFrameObject)\n", + "assert isinstance(result.get(), pd.DataFrame)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "____" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create `biquery.test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mock_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": True,\n", + " \"calls_per_min\": 10,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "private_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": False,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_endpoint = sy.TwinAPIEndpoint(\n", + " path=\"bigquery.test_query\",\n", + " description=\"This endpoint allows to query Bigquery storage via SQL queries.\",\n", + " private_function=private_func,\n", + " mock_function=mock_func,\n", + " worker_pool_name=this_worker_pool_name,\n", + ")\n", + "\n", + "high_client.custom_api.add(endpoint=new_endpoint)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Some features for updating endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Here, we update the endpoint to timeout after 100s (rather the default of 60s)\n", + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", endpoint_timeout=120\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Test the `bigquery.test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version\n", + "result = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version for wrong queries\n", + "with sy.raises(\n", + " sy.SyftException(public_message=\"*must be qualified with a dataset*\"), show=True\n", + "):\n", + " high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=\"SELECT * FROM invalid_table LIMIT 1\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test private version\n", + "result = high_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 12\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 12" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "____" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create `submit_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submit_query_function = make_submit_query(\n", + " settings={}, worker_pool_name=this_worker_pool_name\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=submit_query_function)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.submit_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Testing submit query\n", + "result = high_client.api.services.bigquery.submit_query(\n", + " func_name=\"my_func\",\n", + " query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 2\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"Query submitted\" in result\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job = high_client.code.my_func(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = job.wait().get()\n", + "assert len(res) == 2\n", + "assert isinstance(res, pd.DataFrame)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test endpoints" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.api_endpoints()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " high_client.api.services.bigquery.test_query\n", + " and high_client.api.services.bigquery.submit_query\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Syncing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diff = compare_clients(\n", + " from_client=high_client, to_client=low_client, hide_usercode=False\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "widget = diff.resolve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "widget._share_all()\n", + "widget._sync_all()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(low_client.jobs.get_all()) == 0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(low_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test emails" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# email_server_low.get_emails_for_user(user_email=\"info@openmined.org\")\n", + "assert len(email_server.get_emails_for_user(user_email=ADMIN_EMAIL)) == 1\n", + "assert len(email_server.get_emails_for_user(user_email=ROOT_EMAIL)) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " \"Job Failed\"\n", + " in email_server.get_emails_for_user(user_email=ADMIN_EMAIL)[0].email_content\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " \"A new request has been submitted and requires your attention\"\n", + " in email_server.get_emails_for_user(user_email=ROOT_EMAIL)[0].email_content\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Clean up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_high.land()\n", + " server_low.land()\n", + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/sync/021-create-jobs.ipynb b/notebooks/scenarios/bigquery/sync/021-create-jobs.ipynb new file mode 100644 index 00000000000..06fbaa6bc83 --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/021-create-jobs.ipynb @@ -0,0 +1,440 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from collections import Counter\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import get_email_server\n", + "from syft.util.test_helpers.email_helpers import load_users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "ROOT_EMAIL, ROOT_PASSWORD = \"admin@bigquery.org\", \"bqpw\"\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, low_port)" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=4,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(low_client)" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "# Create jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.util.test_helpers.job_helpers import TestJob\n", + "from syft.util.test_helpers.job_helpers import create_jobs\n", + "from syft.util.test_helpers.job_helpers import extract_code_path" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "# Inspect job data (requests for these jobs to be created)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "num_jobs = int(os.environ.get(\"NUM_TEST_JOBS\", 10))\n", + "\n", + "jobs_data = create_jobs(users, total_jobs=num_jobs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "counts = Counter([j.job_type for j in jobs_data])\n", + "for k, v in counts.most_common():\n", + " print(f\"{k}: #{v}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.util.test_helpers.job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"{len(jobs_data)=}\")\n", + "\n", + "for job in jobs_data:\n", + " print(f\"{job.job_type=}, {job.should_succeed=}, {job.should_submit=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(jobs_data) == num_jobs\n", + "assert all(isinstance(j, TestJob) for j in jobs_data)\n", + "assert all(job.client is not None for job in jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "# Submit jobs\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "admin_emails_before = len(email_server.get_emails_for_user(\"admin@bigquery.org\"))\n", + "print(f\"{admin_emails_before=}\")" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "## Test Succesful jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "jobs_submit_should_succeed = [j for j in jobs_data if j.should_submit]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_submit_should_succeed:\n", + " client = job.client\n", + " response = client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + " )\n", + " job.code_path = extract_code_path(response)" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "## Test failures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "jobs_submit_should_fail = [j for j in jobs_data if not j.should_submit]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_submit_should_fail:\n", + " client = job.client\n", + "\n", + " with sy.raises(sy.SyftException):\n", + " client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_data:\n", + " print(f\"Job {job.func_name:.20} {job.should_submit=}, {job.is_submitted=}\")\n", + "\n", + "assert all(job.is_submitted == job.should_submit for job in jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "## Test: cannot execute submitted jobs yet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "submitted_jobs = [job for job in jobs_data if job.should_submit]\n", + "job_execution_fns = [getattr(job.client.code, job.code_path) for job in submitted_jobs]\n", + "assert len(submitted_jobs) # failsafe for next tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "for fn in job_execution_fns:\n", + " # blocking\n", + " with sy.raises(\n", + " sy.SyftException(public_message=\"*Your code is waiting for approval*\")\n", + " ):\n", + " result = fn()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "# currently errors out with\n", + "# syft.types.errors.SyftException: Please wait for the admin to allow the execution of this code\n", + "\n", + "for fn in job_execution_fns:\n", + " # nonblocking\n", + " with sy.raises(\n", + " sy.SyftException(\n", + " public_message=\"*Please wait for the admin to allow the execution of this code*\"\n", + " )\n", + " ):\n", + " result_job = fn(blocking=False)\n", + " result_job.wait()" + ] + }, + { + "cell_type": "markdown", + "id": "30", + "metadata": {}, + "source": [ + "# Verify that admin has emails for submitted requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "num_should_submit = sum(j.should_submit for j in jobs_data)\n", + "admin_emails_after = len(email_server.get_emails_for_user(ROOT_EMAIL))\n", + "print(\"admin emails after\", admin_emails_after)\n", + "assert admin_emails_after >= admin_emails_before + num_should_submit" + ] + }, + { + "cell_type": "markdown", + "id": "32", + "metadata": {}, + "source": [ + "# Submit a broken query" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "ds_client = users[0].client\n", + "submission = ds_client.api.services.bigquery.submit_query(\n", + " func_name=\"broken_query\", query=\"BROKEN QUERY\"\n", + ")\n", + "submission" + ] + }, + { + "cell_type": "markdown", + "id": "34", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_low.land()\n", + "smtp_server.stop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/sync/040-do-review-requests.ipynb b/notebooks/scenarios/bigquery/sync/040-do-review-requests.ipynb new file mode 100644 index 00000000000..a0db11acf1d --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/040-do-review-requests.ipynb @@ -0,0 +1,423 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from collections import Counter\n", + "import os\n", + "\n", + "# third party\n", + "import pandas as pd\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.client.syncing import compare_clients\n", + "from syft.util.test_helpers.email_helpers import get_email_server\n", + "from syft.util.test_helpers.email_helpers import load_users\n", + "from syft.util.test_helpers.job_helpers import get_job_emails\n", + "from syft.util.test_helpers.job_helpers import get_request_for_job_info\n", + "from syft.util.test_helpers.job_helpers import load_jobs\n", + "from syft.util.test_helpers.job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "ROOT_EMAIL, ROOT_PASSWORD = \"admin@bigquery.org\", \"bqpw\"\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "high_port = os.environ.get(\"CLUSTER_HTTP_PORT_HIGH\", \"9081\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "num_jobs = int(os.environ.get(\"NUM_TEST_JOBS\", 10))\n", + "print(environment, low_port)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server and login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=high_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")\n", + "high_client = sy.login(\n", + " url=f\"http://localhost:{high_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Sync UserCode and Requests to High Side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "widget = sy.sync(low_client, high_client)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Ignore batches we dont want to sync" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "idxs_to_ignore = []\n", + "\n", + "for idx in range(len(widget)):\n", + " batch = widget[idx].obj_diff_batch\n", + " request = batch.root.low_obj\n", + " if request is not None and \"broken\" in request.code.service_func_name:\n", + " idxs_to_ignore.append(idx)\n", + "\n", + "for idx in idxs_to_ignore:\n", + " widget[idx].deny_and_ignore(\"query is broken\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffs = compare_clients(low_client, high_client)\n", + "# # check that only requests and usercode are in the diff\n", + "assert {diff.root_diff.obj_type.__qualname__ for diff in diffs.batches} == {\n", + " \"Request\",\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# widget._share_all()\n", + "widget._sync_all()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.service.request.request import RequestStatus" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert any(x.status == RequestStatus.REJECTED for x in low_client.requests)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Check that request synced over to high side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(high_client.code.get_all())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.code.get_all()) == num_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "requests = high_client.requests.get_all_pending()\n", + "requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(low_client)\n", + "jobs_data = load_jobs(users, low_client)\n", + "all_requests = high_client.requests\n", + "submitted_jobs_data = [job for job in jobs_data if job.is_submitted]\n", + "n_emails_per_job_user = {\n", + " k: len(v)\n", + " for k, v in get_job_emails(submitted_jobs_data, high_client, email_server).items()\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run or Deny" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submitted_jobs_data_should_succeed = [\n", + " j for j in submitted_jobs_data if j.should_succeed\n", + "]\n", + "submitted_jobs_data_should_fail = [\n", + " j for j in submitted_jobs_data if not j.should_succeed\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_succeed:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " j = request.code(blocking=False)\n", + " result = j.wait().get()\n", + " assert isinstance(result, pd.DataFrame)\n", + " job.admin_reviewed = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_fail:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " response = request.deny(\n", + " reason=f\"Your request {job.func_name} looks wrong, try again.\"\n", + " )\n", + " assert isinstance(response, sy.SyftSuccess)\n", + " assert not job.should_succeed\n", + " job.admin_reviewed = True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Sync job result to low side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "widget = sy.sync(from_client=high_client, to_client=low_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffs = sy.compare_clients(high_client, low_client)\n", + "batch_root_strs = [x.root_diff.obj_type.__qualname__ for x in diffs.batches]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffs = sy.compare_clients(high_client, low_client)\n", + "batch_root_strs = [x.root_diff.obj_type.__qualname__ for x in diffs.batches]\n", + "root_str_counts = Counter(batch_root_strs)\n", + "# for successful jobs, root diff should be job. Otherwise request\n", + "assert root_str_counts[\"Job\"] == len(submitted_jobs_data_should_succeed)\n", + "assert root_str_counts[\"Request\"] == len(submitted_jobs_data_should_fail)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "widget._share_all()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "widget._sync_all()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Check requests status on the high side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_succeed:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " assert request.status == RequestStatus.APPROVED" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Save state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Shutdown" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_high.land()\n", + " server_low.land()\n", + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/sync/050-ds-get-results.ipynb b/notebooks/scenarios/bigquery/sync/050-ds-get-results.ipynb new file mode 100644 index 00000000000..c7be9a2e680 --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/050-ds-get-results.ipynb @@ -0,0 +1,199 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.test_helpers.email_helpers import load_users\n", + "from syft.util.test_helpers.job_helpers import load_jobs\n", + "from syft.util.test_helpers.job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "ROOT_EMAIL, ROOT_PW = \"admin@bigquery.org\", \"bqpw\"\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "low_port = os.environ.get(\"CLUSTER_HTTP_PORT_LOW\", \"9083\")\n", + "print(environment, low_port)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server and login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=low_port,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Download Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client = sy.login(\n", + " url=f\"http://localhost:{low_port}\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(low_client)\n", + "jobs = load_jobs(users, low_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reviewed_jobs = [job for job in jobs if job.admin_reviewed]\n", + "reviewed_jobs_should_succeed = [j for j in reviewed_jobs if j.should_succeed]\n", + "reviewed_jobs_should_fail = [j for j in reviewed_jobs if not j.should_succeed]\n", + "\n", + "print(\n", + " f\"{len(reviewed_jobs)=}, {len(reviewed_jobs_should_succeed)=}, {len(reviewed_jobs_should_fail)=}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_succeed:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + "\n", + " if isinstance(res, sy.SyftError):\n", + " raise sy.SyftException(public_message=\"Expected success, got error\")\n", + "\n", + " result = res.get()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_fail:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + "\n", + " with sy.raises(sy.SyftException):\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expected_jobs = [job for job in jobs if job.result_as_expected]\n", + "print(f\"got expected_jobs: {len(expected_jobs)} == reviewed_jobs: {len(reviewed_jobs)}\")\n", + "assert len(reviewed_jobs) == len(expected_jobs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment != \"remote\":\n", + " server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/sync/README.md b/notebooks/scenarios/bigquery/sync/README.md new file mode 100644 index 00000000000..ca51f9ae71d --- /dev/null +++ b/notebooks/scenarios/bigquery/sync/README.md @@ -0,0 +1,16 @@ +# Testing works over 4 possibilities + +1. (python/in-memory workers and using tox commands) +2. (python/in-memory workers and manually running notebooks) +3. (using k8s and using tox commands) +4. (using k8s and manually running notebooks) + +Add the lines below to notebook cells if in the 4th possibility + +```python +os.environ["ORCHESTRA_DEPLOYMENT_TYPE"] = "remote" +os.environ["DEV_MODE"] = "True" +os.environ["TEST_EXTERNAL_REGISTRY"] = "k3d-registry.localhost:5800" +os.environ["CLUSTER_HTTP_PORT_HIGH"] = "9081" +os.environ["CLUSTER_HTTP_PORT_LOW"] = "9083" +``` diff --git a/packages/hagrid/tests/hagrid/__init__.py b/notebooks/scenarios/bigquery/sync/__init__.py similarity index 100% rename from packages/hagrid/tests/hagrid/__init__.py rename to notebooks/scenarios/bigquery/sync/__init__.py diff --git a/packages/syft/src/syft/exceptions/__init__.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/__init__.py similarity index 100% rename from packages/syft/src/syft/exceptions/__init__.py rename to notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/__init__.py diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/__init__.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/__init__.py new file mode 100644 index 00000000000..7231b580696 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/__init__.py @@ -0,0 +1,23 @@ +# stdlib +import os + +# syft absolute +from syft.util.util import str_to_bool + +# relative +from .submit_query import make_submit_query + +env_var = "TEST_BIGQUERY_APIS_LIVE" +use_live = str_to_bool(str(os.environ.get(env_var, "False"))) +env_name = "Live" if use_live else "Mock" +print(f"Using {env_name} API Code, this will query BigQuery. ${env_var}=={use_live}") + + +if use_live: + # relative + from .live.schema import make_schema + from .live.test_query import make_test_query +else: + # relative + from .mock.schema import make_schema + from .mock.test_query import make_test_query diff --git a/packages/syft/src/syft/node/__init__.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/__init__.py similarity index 100% rename from packages/syft/src/syft/node/__init__.py rename to notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/__init__.py diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/schema.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/schema.py new file mode 100644 index 00000000000..5b39d9d9066 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/schema.py @@ -0,0 +1,108 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy +from syft import test_settings + +# relative +from ..rate_limiter import is_within_rate_limit + + +def make_schema(settings: dict, worker_pool: str) -> Callable: + updated_settings = { + "calls_per_min": 5, + "rate_limiter_enabled": True, + "credentials": test_settings.gce_service_account.to_dict(), + "region": test_settings.gce_region, + "project_id": test_settings.gce_project_id, + "dataset_1": test_settings.dataset_1, + "table_1": test_settings.table_1, + "table_2": test_settings.table_2, + } | settings + + @sy.api_endpoint( + path="bigquery.schema", + description="This endpoint allows for visualising the metadata of tables available in BigQuery.", + settings=updated_settings, + helper_functions=[ + is_within_rate_limit + ], # Adds ratelimit as this is also a method available to data scientists + worker_pool=worker_pool, + ) + def live_schema( + context, + ) -> str: + # stdlib + import datetime + + # third party + from google.cloud import bigquery # noqa: F811 + from google.oauth2 import service_account + import pandas as pd + + # syft absolute + from syft import SyftException + + # Auth for Bigquer based on the workload identity + credentials = service_account.Credentials.from_service_account_info( + context.settings["credentials"] + ) + scoped_credentials = credentials.with_scopes( + ["https://www.googleapis.com/auth/cloud-platform"] + ) + + client = bigquery.Client( + credentials=scoped_credentials, + location=context.settings["region"], + ) + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + try: + # Formats the data schema in a data frame format + # Warning: the only supported format types are primitives, np.ndarrays and pd.DataFrames + + data_schema = [] + for table_id in [ + f"{context.settings['dataset_1']}.{context.settings['table_1']}", + f"{context.settings['dataset_1']}.{context.settings['table_2']}", + ]: + table = client.get_table(table_id) + for schema in table.schema: + data_schema.append( + { + "project": str(table.project), + "dataset_id": str(table.dataset_id), + "table_id": str(table.table_id), + "schema_name": str(schema.name), + "schema_field": str(schema.field_type), + "description": str(table.description), + "num_rows": str(table.num_rows), + } + ) + return pd.DataFrame(data_schema) + + except Exception as e: + # not a bigquery exception + if not hasattr(e, "_errors"): + output = f"got exception e: {type(e)} {str(e)}" + raise SyftException( + public_message=f"An error occured executing the API call {output}" + ) + + # Should add appropriate error handling for what should be exposed to the data scientists. + raise SyftException( + public_message="An error occured executing the API call, please contact the domain owner." + ) + + return live_schema diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/test_query.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/test_query.py new file mode 100644 index 00000000000..344879dcb62 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/live/test_query.py @@ -0,0 +1,113 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy +from syft import test_settings + +# relative +from ..rate_limiter import is_within_rate_limit + + +def make_test_query(settings) -> Callable: + updated_settings = { + "calls_per_min": 10, + "rate_limiter_enabled": True, + "credentials": test_settings.gce_service_account.to_dict(), + "region": test_settings.gce_region, + "project_id": test_settings.gce_project_id, + } | settings + + # these are the same if you allow the rate limiter to be turned on and off + @sy.api_endpoint_method( + settings=updated_settings, + helper_functions=[is_within_rate_limit], + ) + def live_test_query( + context, + sql_query: str, + ) -> str: + # stdlib + import datetime + + # third party + from google.cloud import bigquery # noqa: F811 + from google.oauth2 import service_account + + # syft absolute + from syft import SyftException + + # Auth for Bigquer based on the workload identity + credentials = service_account.Credentials.from_service_account_info( + context.settings["credentials"] + ) + scoped_credentials = credentials.with_scopes( + ["https://www.googleapis.com/auth/cloud-platform"] + ) + + client = bigquery.Client( + credentials=scoped_credentials, + location=context.settings["region"], + ) + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + try: + rows = client.query_and_wait( + sql_query, + project=context.settings["project_id"], + ) + + if rows.total_rows > 1_000_000: + raise SyftException( + public_message="Please only write queries that gather aggregate statistics" + ) + + return rows.to_dataframe() + + except Exception as e: + # not a bigquery exception + if not hasattr(e, "_errors"): + output = f"got exception e: {type(e)} {str(e)}" + raise SyftException( + public_message=f"An error occured executing the API call {output}" + ) + + # Treat all errors that we would like to be forwarded to the data scientists + # By default, any exception is only visible to the data owner. + + if e._errors[0]["reason"] in [ + "badRequest", + "blocked", + "duplicate", + "invalidQuery", + "invalid", + "jobBackendError", + "jobInternalError", + "notFound", + "notImplemented", + "rateLimitExceeded", + "resourceInUse", + "resourcesExceeded", + "tableUnavailable", + "timeout", + ]: + raise SyftException( + public_message="Error occured during the call: " + + e._errors[0]["message"] + ) + else: + raise SyftException( + public_message="An error occured executing the API call, please contact the domain owner." + ) + + return live_test_query diff --git a/packages/syft/tests/syft/action_graph/__init__.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/__init__.py similarity index 100% rename from packages/syft/tests/syft/action_graph/__init__.py rename to notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/__init__.py diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/data.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/data.py new file mode 100644 index 00000000000..82262bf7a01 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/data.py @@ -0,0 +1,268 @@ +# stdlib +from math import nan + +schema_dict = { + "project": { + 0: "example-project", + 1: "example-project", + 2: "example-project", + 3: "example-project", + 4: "example-project", + 5: "example-project", + 6: "example-project", + 7: "example-project", + 8: "example-project", + 9: "example-project", + 10: "example-project", + 11: "example-project", + 12: "example-project", + 13: "example-project", + 14: "example-project", + 15: "example-project", + 16: "example-project", + 17: "example-project", + 18: "example-project", + 19: "example-project", + 20: "example-project", + 21: "example-project", + 22: "example-project", + }, + "dataset_id": { + 0: "test_1gb", + 1: "test_1gb", + 2: "test_1gb", + 3: "test_1gb", + 4: "test_1gb", + 5: "test_1gb", + 6: "test_1gb", + 7: "test_1gb", + 8: "test_1gb", + 9: "test_1gb", + 10: "test_1gb", + 11: "test_1gb", + 12: "test_1gb", + 13: "test_1gb", + 14: "test_1gb", + 15: "test_1gb", + 16: "test_1gb", + 17: "test_1gb", + 18: "test_1gb", + 19: "test_1gb", + 20: "test_1gb", + 21: "test_1gb", + 22: "test_1gb", + }, + "table_id": { + 0: "posts", + 1: "posts", + 2: "posts", + 3: "posts", + 4: "posts", + 5: "posts", + 6: "posts", + 7: "comments", + 8: "comments", + 9: "comments", + 10: "comments", + 11: "comments", + 12: "comments", + 13: "comments", + 14: "comments", + 15: "comments", + 16: "comments", + 17: "comments", + 18: "comments", + 19: "comments", + 20: "comments", + 21: "comments", + 22: "comments", + }, + "schema_name": { + 0: "int64_field_0", + 1: "id", + 2: "name", + 3: "subscribers_count", + 4: "permalink", + 5: "nsfw", + 6: "spam", + 7: "int64_field_0", + 8: "id", + 9: "body", + 10: "parent_id", + 11: "created_at", + 12: "last_modified_at", + 13: "gilded", + 14: "permalink", + 15: "score", + 16: "comment_id", + 17: "post_id", + 18: "author_id", + 19: "spam", + 20: "deleted", + 21: "upvote_raio", + 22: "collapsed_in_crowd_control", + }, + "schema_field": { + 0: "INTEGER", + 1: "STRING", + 2: "STRING", + 3: "INTEGER", + 4: "STRING", + 5: "FLOAT", + 6: "BOOLEAN", + 7: "INTEGER", + 8: "STRING", + 9: "STRING", + 10: "STRING", + 11: "INTEGER", + 12: "INTEGER", + 13: "BOOLEAN", + 14: "STRING", + 15: "INTEGER", + 16: "STRING", + 17: "STRING", + 18: "STRING", + 19: "BOOLEAN", + 20: "BOOLEAN", + 21: "FLOAT", + 22: "BOOLEAN", + }, + "description": { + 0: "None", + 1: "None", + 2: "None", + 3: "None", + 4: "None", + 5: "None", + 6: "None", + 7: "None", + 8: "None", + 9: "None", + 10: "None", + 11: "None", + 12: "None", + 13: "None", + 14: "None", + 15: "None", + 16: "None", + 17: "None", + 18: "None", + 19: "None", + 20: "None", + 21: "None", + 22: "None", + }, + "num_rows": { + 0: "2000000", + 1: "2000000", + 2: "2000000", + 3: "2000000", + 4: "2000000", + 5: "2000000", + 6: "2000000", + 7: "2000000", + 8: "2000000", + 9: "2000000", + 10: "2000000", + 11: "2000000", + 12: "2000000", + 13: "2000000", + 14: "2000000", + 15: "2000000", + 16: "2000000", + 17: "2000000", + 18: "2000000", + 19: "2000000", + 20: "2000000", + 21: "2000000", + 22: "2000000", + }, +} + + +query_dict = { + "int64_field_0": { + 0: 4, + 1: 5, + 2: 10, + 3: 16, + 4: 17, + 5: 23, + 6: 24, + 7: 25, + 8: 27, + 9: 40, + }, + "id": { + 0: "t5_via1x", + 1: "t5_cv9gn", + 2: "t5_8p2tq", + 3: "t5_8fcro", + 4: "t5_td5of", + 5: "t5_z01fv", + 6: "t5_hmqjk", + 7: "t5_1flyj", + 8: "t5_5rwej", + 9: "t5_uurcv", + }, + "name": { + 0: "/channel/mylittlepony", + 1: "/channel/polyamory", + 2: "/channel/Catholicism", + 3: "/channel/cordcutters", + 4: "/channel/stevenuniverse", + 5: "/channel/entitledbitch", + 6: "/channel/engineering", + 7: "/channel/nottheonion", + 8: "/channel/FoodPorn", + 9: "/channel/puppysmiles", + }, + "subscribers_count": { + 0: 4323081, + 1: 2425929, + 2: 4062607, + 3: 7543226, + 4: 2692168, + 5: 2709080, + 6: 8766144, + 7: 2580984, + 8: 7784809, + 9: 3715991, + }, + "permalink": { + 0: "/channel//channel/mylittlepony", + 1: "/channel//channel/polyamory", + 2: "/channel//channel/Catholicism", + 3: "/channel//channel/cordcutters", + 4: "/channel//channel/stevenuniverse", + 5: "/channel//channel/entitledbitch", + 6: "/channel//channel/engineering", + 7: "/channel//channel/nottheonion", + 8: "/channel//channel/FoodPorn", + 9: "/channel//channel/puppysmiles", + }, + "nsfw": { + 0: nan, + 1: nan, + 2: nan, + 3: nan, + 4: nan, + 5: nan, + 6: nan, + 7: nan, + 8: nan, + 9: nan, + }, + "spam": { + 0: False, + 1: False, + 2: False, + 3: False, + 4: False, + 5: False, + 6: False, + 7: False, + 8: False, + 9: False, + }, +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/schema.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/schema.py new file mode 100644 index 00000000000..a95e04f2f1d --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/schema.py @@ -0,0 +1,52 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..rate_limiter import is_within_rate_limit +from .data import schema_dict + + +def make_schema(settings, worker_pool) -> Callable: + updated_settings = { + "calls_per_min": 5, + "rate_limiter_enabled": True, + "schema_dict": schema_dict, + } | settings + + @sy.api_endpoint( + path="bigquery.schema", + description="This endpoint allows for visualising the metadata of tables available in BigQuery.", + settings=updated_settings, + helper_functions=[is_within_rate_limit], + worker_pool=worker_pool, + ) + def mock_schema( + context, + ) -> str: + # syft absolute + from syft import SyftException + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + # stdlib + import datetime + + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + # third party + import pandas as pd + + df = pd.DataFrame(context.settings["schema_dict"]) + return df + + return mock_schema diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/test_query.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/test_query.py new file mode 100644 index 00000000000..ae028a8cf36 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/mock/test_query.py @@ -0,0 +1,138 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..rate_limiter import is_within_rate_limit +from .data import query_dict + + +def extract_limit_value(sql_query: str) -> int: + # stdlib + import re + + limit_pattern = re.compile(r"\bLIMIT\s+(\d+)\b", re.IGNORECASE) + match = limit_pattern.search(sql_query) + if match: + return int(match.group(1)) + return None + + +def is_valid_sql(query: str) -> bool: + # stdlib + import sqlite3 + + # Prepare an in-memory SQLite database + conn = sqlite3.connect(":memory:") + cursor = conn.cursor() + + try: + # Use the EXPLAIN QUERY PLAN command to get the query plan + cursor.execute(f"EXPLAIN QUERY PLAN {query}") + except sqlite3.Error as e: + if "no such table" in str(e).lower(): + return True + return False + finally: + conn.close() + + +def adjust_dataframe_rows(df, target_rows: int): + # third party + import pandas as pd + + current_rows = len(df) + + if target_rows > current_rows: + # Repeat rows to match target_rows + repeat_times = (target_rows + current_rows - 1) // current_rows + df_expanded = pd.concat([df] * repeat_times, ignore_index=True).head( + target_rows + ) + else: + # Truncate rows to match target_rows + df_expanded = df.head(target_rows) + + return df_expanded + + +def make_test_query(settings: dict) -> Callable: + updated_settings = { + "calls_per_min": 10, + "rate_limiter_enabled": True, + "query_dict": query_dict, + } | settings + + # these are the same if you allow the rate limiter to be turned on and off + @sy.api_endpoint_method( + settings=updated_settings, + helper_functions=[ + is_within_rate_limit, + extract_limit_value, + is_valid_sql, + adjust_dataframe_rows, + ], + ) + def mock_test_query( + context, + sql_query: str, + ) -> str: + # stdlib + import datetime + + # third party + from google.api_core.exceptions import BadRequest + + # syft absolute + from syft import SyftException + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + bad_table = "invalid_table" + bad_post = ( + "BadRequest: 400 POST " + "https://bigquery.googleapis.com/bigquery/v2/projects/project-id/" + "queries?prettyPrint=false: " + ) + if bad_table in sql_query: + try: + raise BadRequest( + f'{bad_post} Table "{bad_table}" must be qualified ' + "with a dataset (e.g. dataset.table)." + ) + except Exception as e: + raise SyftException( + public_message=f"*must be qualified with a dataset*. {e}" + ) + + if not context.code.is_valid_sql(sql_query): + raise BadRequest( + f'{bad_post} Syntax error: Unexpected identifier "{sql_query}" at [1:1]' + ) + + # third party + import pandas as pd + + limit = context.code.extract_limit_value(sql_query) + if limit > 1_000_000: + raise SyftException( + public_message="Please only write queries that gather aggregate statistics" + ) + + base_df = pd.DataFrame(context.settings["query_dict"]) + + df = context.code.adjust_dataframe_rows(base_df, limit) + return df + + return mock_test_query diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/rate_limiter.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/rate_limiter.py new file mode 100644 index 00000000000..8ce319b61f4 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/rate_limiter.py @@ -0,0 +1,16 @@ +def is_within_rate_limit(context) -> bool: + """Rate limiter for custom API calls made by users.""" + # stdlib + import datetime + + state = context.state + settings = context.settings + email = context.user.email + + current_time = datetime.datetime.now() + calls_last_min = [ + 1 if (current_time - call_time).seconds < 60 else 0 + for call_time in state[email] + ] + + return sum(calls_last_min) < settings.get("calls_per_min", 5) diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/submit_query.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/submit_query.py new file mode 100644 index 00000000000..a0125ee009b --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/apis/submit_query.py @@ -0,0 +1,42 @@ +# syft absolute +import syft as sy + + +def make_submit_query(settings, worker_pool): + updated_settings = {"user_code_worker": worker_pool} | settings + + @sy.api_endpoint( + path="bigquery.submit_query", + description="API endpoint that allows you to submit SQL queries to run on the private data.", + worker_pool=worker_pool, + settings=updated_settings, + ) + def submit_query( + context, + func_name: str, + query: str, + ) -> str: + # syft absolute + import syft as sy + + @sy.syft_function( + name=func_name, + input_policy=sy.MixedInputPolicy( + endpoint=sy.Constant( + val=context.admin_client.api.services.bigquery.test_query + ), + query=sy.Constant(val=query), + client=context.admin_client, + ), + worker_pool_name=context.settings["user_code_worker"], + ) + def execute_query(query: str, endpoint): + res = endpoint(sql_query=query) + return res + + request = context.user_client.code.request_code_execution(execute_query) + context.admin_client.requests.set_tags(request, ["autosync"]) + + return f"Query submitted {request}. Use `client.code.{func_name}()` to run your query" + + return submit_query diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/email_helpers.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/email_helpers.py new file mode 100644 index 00000000000..dda08c21866 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/email_helpers.py @@ -0,0 +1,338 @@ +# stdlib +import asyncio +from dataclasses import dataclass +from dataclasses import field +import json +import re +import time +from typing import Any + +# third party +from aiosmtpd.controller import Controller +from faker import Faker + +# syft absolute +from syft.service.user.user_roles import ServiceRole + +fake = Faker() + + +@dataclass +class Email: + email_from: str + email_to: str + email_content: str + + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + output[k] = v + return output + + def __iter__(self): + yield from self.to_dict().items() + + def __getitem__(self, key): + return self.to_dict()[key] + + def __repr__(self) -> str: + return f"{self.email_to}\n{self.email_from}\n\n{self.email_content}" + + +class EmailServer: + def __init__(self, filepath="./emails.json"): + self.filepath = filepath + self._emails: dict[str, list[Email]] = self.load_emails() + + def load_emails(self) -> dict[str, list[Email]]: + try: + with open(self.filepath) as f: + data = json.load(f) + return {k: [Email(**email) for email in v] for k, v in data.items()} + except Exception as e: + print("Issues reading email file", e) + return {} + + def save_emails(self) -> None: + with open(self.filepath, "w") as f: + data = { + k: [email.to_dict() for email in v] for k, v in self._emails.items() + } + f.write(json.dumps(data)) + + def add_email_for_user(self, user_email: str, email: Email) -> None: + if user_email not in self._emails: + self._emails[user_email] = [] + self._emails[user_email].append(email) + self.save_emails() + + def get_emails_for_user(self, user_email: str) -> list[Email]: + self._emails: dict[str, list[Email]] = self.load_emails() + return self._emails.get(user_email, []) + + def reset_emails(self) -> None: + self._emails = {} + self.save_emails() + + +SENDER = "noreply@openmined.org" + + +def get_token(email) -> str: + # stdlib + import re + + pattern = r"syft_client\.reset_password\(token='(.*?)', new_password=.*?\)" + try: + token = re.search(pattern, email.email_content).group(1) + except Exception: + raise Exception(f"No token found in email: {email.email_content}") + return token + + +@dataclass +class TestUser: + name: str + email: str + password: str + role: ServiceRole + new_password: str | None = None + email_disabled: bool = False + reset_password: bool = False + reset_token: str | None = None + _client_cache: Any | None = field(default=None, repr=False, init=False) + _email_server: EmailServer | None = None + + @property + def latest_password(self) -> str: + if self.new_password: + return self.new_password + return self.password + + def make_new_password(self) -> str: + self.new_password = fake.password() + return self.new_password + + @property + def client(self): + return self._client_cache + + def relogin(self) -> None: + self.client = self.client + + @client.setter + def client(self, client): + client = client.login(email=self.email, password=self.latest_password) + self._client_cache = client + + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + if k.startswith("_"): + continue + if k == "role": + v = str(v) + output[k] = v + return output + + def __iter__(self): + for key, val in self.to_dict().items(): + if not key.startswith("_"): + yield key, val + + def __getitem__(self, key): + if key.startswith("_"): + return None + return self.to_dict()[key] + + def update_password(self): + self.password = self.new_password + self.new_password = None + + @property + def emails(self) -> list[Email]: + if not self._email_server: + print("Not connected to email server object") + return [] + return self._email_server.get_emails_for_user(self.email) + + def get_token(self) -> str: + for email in reversed(self.emails): + token = None + try: + token = get_token(email) + break + except Exception: # nosec + pass + self.reset_token = token + return token + + +def save_users(users): + user_dicts = [] + for user in users: + user_dicts.append(user.to_dict()) + print(user_dicts) + with open("./users.json", "w") as f: + f.write(json.dumps(user_dicts)) + + +def load_users(high_client: None, path="./users.json"): + users = [] + with open(path) as f: + data = f.read() + user_dicts = json.loads(data) + for user in user_dicts: + test_user = TestUser(**user) + if high_client: + test_user.client = high_client + users.append(test_user) + return users + + +def make_user( + name: str | None = None, + email: str | None = None, + password: str | None = None, + role: ServiceRole = ServiceRole.DATA_SCIENTIST, +): + fake = Faker() + if name is None: + name = fake.name() + if email is None: + ascii_string = re.sub(r"[^a-zA-Z\s]", "", name).lower() + dashed_string = ascii_string.replace(" ", "-") + email = f"{dashed_string}-fake@openmined.org" + if password is None: + password = fake.password() + + return TestUser(name=name, email=email, password=password, role=role) + + +def user_exists(root_client, email: str) -> bool: + users = root_client.api.services.user + for user in users: + if user.email == email: + return True + return False + + +class SMTPTestServer: + def __init__(self, email_server): + self.port = 9025 + self.hostname = "0.0.0.0" # nosec: B104 + self._stop_event = asyncio.Event() + + # Simple email handler class + class SimpleHandler: + async def handle_DATA(self, server, session, envelope): + try: + print(f"> SMTPTestServer got an email for {envelope.rcpt_tos}") + email = Email( + email_from=envelope.mail_from, + email_to=envelope.rcpt_tos, + email_content=envelope.content.decode( + "utf-8", errors="replace" + ), + ) + email_server.add_email_for_user(envelope.rcpt_tos[0], email) + email_server.save_emails() + return "250 Message accepted for delivery" + except Exception as e: + print(f"> Error handling email: {e}") + return "550 Internal Server Error" + + try: + self.handler = SimpleHandler() + self.controller = Controller( + self.handler, hostname=self.hostname, port=self.port + ) + except Exception as e: + print(f"> Error initializing SMTPTestServer Controller: {e}") + + def start(self): + print(f"> Starting SMTPTestServer on: {self.hostname}:{self.port}") + asyncio.create_task(self.async_loop()) + + async def async_loop(self): + try: + print(f"> Starting SMTPTestServer on: {self.hostname}:{self.port}") + self.controller.start() + await ( + self._stop_event.wait() + ) # Wait until the event is set to stop the server + except Exception as e: + print(f"> Error with SMTPTestServer: {e}") + + def stop(self): + try: + print("> Stopping SMTPTestServer") + loop = asyncio.get_running_loop() + if loop.is_running(): + loop.create_task(self.async_stop()) + else: + asyncio.run(self.async_stop()) + except Exception as e: + print(f"> Error stopping SMTPTestServer: {e}") + + async def async_stop(self): + self.controller.stop() + self._stop_event.set() # Stop the server by setting the event + + +class TimeoutError(Exception): + pass + + +class Timeout: + def __init__(self, timeout_duration): + if timeout_duration > 60: + raise ValueError("Timeout duration cannot exceed 60 seconds.") + self.timeout_duration = timeout_duration + + def run_with_timeout(self, condition_func, *args, **kwargs): + start_time = time.time() + result = None + + while True: + elapsed_time = time.time() - start_time + if elapsed_time > self.timeout_duration: + raise TimeoutError( + f"Function execution exceeded {self.timeout_duration} seconds." + ) + + # Check if the condition is met + try: + if condition_func(): + print("Condition met, exiting early.") + break + except Exception as e: + print(f"Exception in target function: {e}") + break # Exit the loop if an exception occurs in the function + time.sleep(1) + + return result + + +def get_email_server(reset=False): + email_server = EmailServer() + if reset: + email_server.reset_emails() + smtp_server = SMTPTestServer(email_server) + smtp_server.start() + return email_server, smtp_server + + +def create_user(root_client, test_user): + if not user_exists(root_client, test_user.email): + fake = Faker() + root_client.register( + name=test_user.name, + email=test_user.email, + password=test_user.password, + password_verify=test_user.password, + institution=fake.company(), + website=fake.url(), + ) + else: + print("User already exists", test_user) diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/job_helpers.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/job_helpers.py new file mode 100644 index 00000000000..e34a2eb10c7 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/job_helpers.py @@ -0,0 +1,400 @@ +# stdlib +from collections import defaultdict +from collections.abc import Callable +from dataclasses import dataclass +from dataclasses import field +import json +import random +import re +import secrets +import textwrap +from typing import Any + +# third party +from email_helpers import TestUser + +# syft absolute +from syft import test_settings + +from syft.client.client import SyftClient # noqa + +dataset_1 = test_settings.get("dataset_1", default="dataset_1") +dataset_2 = test_settings.get("dataset_2", default="dataset_2") +table_1 = test_settings.get("table_1", default="table_1") +table_2 = test_settings.get("table_2", default="table_2") +table_1_col_id = test_settings.get("table_1_col_id", default="table_id") +table_1_col_score = test_settings.get("table_1_col_score", default="colname") +table_2_col_id = test_settings.get("table_2_col_id", default="table_id") +table_2_col_score = test_settings.get("table_2_col_score", default="colname") + + +@dataclass +class TestJob: + user_email: str + func_name: str + query: str + job_type: str + settings: dict # make a type so we can rely on attributes + should_succeed: bool + should_submit: bool = True + code_path: str | None = field(default=None) + admin_reviewed: bool = False + result_as_expected: bool | None = None + + _client_cache: SyftClient | None = field(default=None, repr=False, init=False) + + @property + def is_submitted(self) -> bool: + return self.code_path is not None + + @property + def client(self): + return self._client_cache + + @client.setter + def client(self, client): + self._client_cache = client + + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + if k.startswith("_"): + continue + output[k] = v + return output + + def __iter__(self): + for key, val in self.to_dict().items(): + if key.startswith("_"): + yield key, val + + def __getitem__(self, key): + if key.startswith("_"): + return None + return self.to_dict()[key] + + @property + def code_method(self) -> None | Callable: + try: + return getattr(self.client.code, self.func_name, None) + except Exception as e: + print(f"Cant find code method. {e}") + return None + + +def make_query(settings: dict) -> str: + query = f""" + SELECT {settings['groupby_col']}, AVG({settings['score_col']}) AS average_score + FROM {settings['dataset']}.{settings['table']} + GROUP BY {settings['groupby_col']} + LIMIT {settings['limit']}""".strip() # nosec: B608 + + return textwrap.dedent(query) + + +def create_simple_query_job(user: TestUser) -> TestJob: + job_type = "simple_query" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + dataset = random.choice([dataset_1, dataset_2]) # nosec: B311 + table, groupby_col, score_col = random.choice( # nosec: B311 + [ + (table_1, table_1_col_id, table_1_col_score), + (table_2, table_2_col_id, table_2_col_score), + ] + ) + limit = random.randint(1, 1_000_000) # nosec: B311 + + settings = { + "dataset": dataset, + "table": table, + "groupby_col": groupby_col, + "score_col": score_col, + "limit": limit, + } + query = make_query(settings) + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings=settings, + should_succeed=True, + ) + + result.client = user.client + return result + + +def create_wrong_asset_query(user: TestUser) -> TestJob: + job_type = "wrong_asset_query" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + valid_job = create_simple_query_job(user) + settings = valid_job.settings + corrupted_asset = random.choice(["dataset", "table"]) # nosec: B311 + settings[corrupted_asset] = "wrong_asset" + query = make_query(settings) + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings=settings, + should_succeed=False, + ) + + result.client = user.client + return result + + +def create_wrong_syntax_query(user: TestUser) -> TestJob: + job_type = "wrong_syntax_query" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + query = "SELECT * FROM table INCORRECT SYNTAX" + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings={}, + should_succeed=False, + ) + + result.client = user.client + return result + + +def create_long_query_job(user: TestUser) -> TestJob: + job_type = "job_too_much_text" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + query = "a" * 1_000 + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings={}, + should_succeed=False, + ) + + result.client = user.client + return result + + +def create_query_long_name(user: TestUser) -> TestJob: + job_type = "job_long_name" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + job = create_simple_query_job(user) + + job.job_type = job_type + job.func_name = func_name + "a" * 1_000 + + return job + + +def create_job_funcname_xss(user: TestUser) -> TestJob: + job_type = "job_funcname_xss" + func_name = f"{job_type}_{secrets.token_hex(3)}" + func_name += "" + + job = create_simple_query_job(user) + job.job_type = job_type + job.func_name = func_name + job.should_submit = False + return job + + +def get_request_for_job_info(requests, job): + job_requests = [r for r in requests if r.code.service_func_name == job.func_name] + if len(job_requests) != 1: + raise Exception(f"Too many or too few requests: {job} in requests: {requests}") + return job_requests[0] + + +def create_job_query_xss(user: TestUser) -> TestJob: + job_type = "job_query_xss" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + job = create_simple_query_job(user) + job.job_type = job_type + job.func_name = func_name + job.query += "" + job.should_succeed = False + + return job + + +def create_job_many_columns(user: TestUser) -> TestJob: + job_type = "job_many_columns" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + job = create_simple_query_job(user) + job.job_type = job_type + job.func_name = func_name + settings = job.settings + job.settings["num_extra_cols"] = random.randint(100, 1000) # nosec: B311 + + new_columns_string = ", ".join( + f"{settings['score_col']} as col_{i}" for i in range(settings["num_extra_cols"]) + ) + + job.query = f""" + SELECT {settings['groupby_col']}, AVG({settings['score_col']}) AS average_score, {new_columns_string} + FROM {settings['dataset']}.{settings['table']} + GROUP BY {settings['groupby_col']} + LIMIT {settings['limit']}""".strip() # nosec: B608 + + return job + + +def create_random_job(user: TestUser) -> TestJob: + job_func = random.choice(create_job_functions) # nosec: B311 + return job_func(user) + + +def create_jobs(users: list[TestUser], total_jobs: int = 10) -> list[TestJob]: + jobs = [] + num_users = len(users) + user_index = 0 + each_count = 0 + # keep making jobs until we have enough + while len(jobs) < total_jobs: + # if we havent used each job type yet keep getting the next one + if each_count < len(create_job_functions): + job_func = create_job_functions[each_count] + each_count += 1 + else: + # otherwise lets get a random one + job_func = create_random_job + # use the current index of user + jobs.append(job_func(users[user_index])) + + # only go as high as the last user index + if user_index < num_users - 1: + user_index += 1 + else: + # reset back to the first user + user_index = 0 + + # in case we stuffed up + if len(jobs) > total_jobs: + jobs = jobs[:total_jobs] + return jobs + + +def submit_job(job: TestJob) -> tuple[Any, str]: + client = job.client + response = client.api.services.bigquery.submit_query( + func_name=job.func_name, query=job.query + ) + job.code_path = extract_code_path(response) + return response + + +def extract_code_path(response) -> str | None: + pattern = r"client\.code\.(\w+)\(\)" + match = re.search(pattern, str(response)) + if match: + extracted_code = match.group(1) + return extracted_code + return None + + +def approve_by_running(request): + job = request.code(blocking=False) + result = job.wait() + print("got result of type", type(result), "bool", bool(result)) + # got result of type bool False + # assert result won't work unless we know what type is coming back + job_info = job.info(result=True) + # need force when running multiple times + # todo check and dont run if its already done + response = request.deposit_result(job_info, approve=True, force=True) + return response + + +def get_job_emails(jobs, client, email_server): + all_requests = client.requests + res = {} + for job in jobs: + request = get_request_for_job_info(all_requests, job) + emails = email_server.get_emails_for_user(request.requesting_user_email) + res[request.requesting_user_email] = emails + return res + + +def resolve_request(request): + service_func_name = request.code.service_func_name + if service_func_name.startswith("simple_query"): + request.approve() # approve because it is good + if service_func_name.startswith("wrong_asset_query"): + request.approve() # approve because it is bad + if service_func_name.startswith("wrong_syntax_query"): + request.approve() # approve because it is bad + if service_func_name.startswith("job_too_much_text"): + request.deny(reason="too long, boring!") # deny because it is bad + if service_func_name.startswith("job_long_name"): + request.approve() + if service_func_name.startswith("job_funcname_xss"): + request.deny(reason="too long, boring!") # never reach doesnt matter + if service_func_name.startswith("job_query_xss"): + request.approve() # approve because it is bad + if service_func_name.startswith("job_many_columns"): + request.approve() # approve because it is bad + + return (request.id, request.status) + + +create_job_functions = [ + create_simple_query_job, # quick way to increase the odds + create_simple_query_job, + create_simple_query_job, + create_simple_query_job, + create_simple_query_job, + create_simple_query_job, + create_wrong_syntax_query, + create_long_query_job, + create_query_long_name, + create_job_funcname_xss, + create_job_query_xss, + create_job_many_columns, +] + + +def save_jobs(jobs, filepath="./jobs.json"): + user_jobs = defaultdict(list) + for job in jobs: + user_jobs[job.user_email].append(job.to_dict()) + with open(filepath, "w") as f: + f.write(json.dumps(user_jobs)) + + +def load_jobs(users, high_client, filepath="./jobs.json"): + data = {} + try: + with open(filepath) as f: + data = json.loads(f.read()) + except Exception as e: + print(f"cant read file: {filepath}: {e}") + data = {} + jobs_list = [] + for user in users: + if user.email not in data: + print(f"{user.email} missing from jobs") + continue + user_jobs = data[user.email] + for user_job in user_jobs: + test_job = TestJob(**user_job) + if user._client_cache is None: + user.client = high_client + test_job.client = user.client + jobs_list.append(test_job) + return jobs_list diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/sync_helpers.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/sync_helpers.py new file mode 100644 index 00000000000..e1d558016ba --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/sync_helpers.py @@ -0,0 +1,190 @@ +# third party +from tqdm import tqdm + +# syft absolute +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.client.syncing import compare_clients +from syft.service.code.user_code import UserCode +from syft.service.job.job_stash import Job +from syft.service.job.job_stash import JobStatus +from syft.service.request.request import Request +from syft.service.request.request import RequestStatus +from syft.service.sync.diff_state import ObjectDiffBatch +from syft.types.result import Err + + +def deny_requests_without_autosync_tag(client_low: DatasiteClient): + # Deny all requests that are not autosync + requests = client_low.requests.get_all() + if isinstance(requests, sy.SyftError): + print(requests) + return + + denied_requests = [] + for request in tqdm(requests): + if request.status != RequestStatus.PENDING: + continue + if "autosync" not in request.tags: + request.deny( + reason="This request has been denied automatically. " + "Please use the designated API to submit your request." + ) + denied_requests.append(request.id) + print(f"Denied {len(denied_requests)} requests without autosync tag") + + +def is_request_to_sync(batch: ObjectDiffBatch) -> bool: + # True if this is a new low-side request + # TODO add condition for sql requests/usercodes + low_request = batch.root.low_obj + return ( + isinstance(low_request, Request) + and batch.status == "NEW" + and "autosync" in low_request.tags + ) + + +def is_job_to_sync(batch: ObjectDiffBatch): + # True if this is a new high-side job that is either COMPLETED or ERRORED + if batch.status != "NEW": + return False + if not isinstance(batch.root.high_obj, Job): + return False + job = batch.root.high_obj + return job.status in (JobStatus.ERRORED, JobStatus.COMPLETED) + + +def execute_requests( + client_high: DatasiteClient, request_ids: list[sy.UID] +) -> dict[sy.UID, Job]: + jobs_by_request_id = {} + for request_id in request_ids: + request = client_high.requests.get_by_uid(request_id) + if not isinstance(request, Request): + continue + + code = request.code + if not isinstance(code, UserCode): + continue + + func_name = request.code.service_func_name + api_func = getattr(client_high.code, func_name, None) + if api_func is None: + continue + + job = api_func(blocking=False) + jobs_by_request_id[request_id] = job + + return jobs_by_request_id + + +def deny_failed_jobs( + client_low: DatasiteClient, + jobs: list[Job], +) -> None: + # NOTE no syncing is needed, requests are denied on the low side + denied_requests = [] + + for job in jobs: + if job.status != JobStatus.ERRORED: + continue + + error_result = job.result + if isinstance(error_result, Err): + error_msg = error_result.err_value + else: + error_msg = "An unknown error occurred, please check the Job logs for more information." + + code_id = job.user_code_id + if code_id is None: + continue + requests = client_low.requests.get_by_usercode_id(code_id) + if isinstance(requests, list) and len(requests) > 0: + request = requests[0] + request.deny(reason=f"Execution failed: {error_msg}") + denied_requests.append(request.id) + else: + print(f"Failed to deny request for job {job.id}") + + print(f"Denied {len(denied_requests)} failed requests") + + +def sync_finished_jobs( + client_low: DatasiteClient, + client_high: DatasiteClient, +) -> dict[sy.UID, sy.SyftError | sy.SyftSuccess] | sy.SyftError: + sync_job_results = {} + synced_jobs = [] + diff = compare_clients( + from_client=client_high, to_client=client_low, include_types=["job"] + ) + if isinstance(diff, sy.SyftError): + print(diff) + return diff + + for batch in diff.batches: + if is_job_to_sync(batch): + job = batch.root.high_obj + + w = batch.resolve(build_state=False) + share_result = w.click_share_all_private_data() + if isinstance(share_result, sy.SyftError): + sync_job_results[job.id] = share_result + continue + sync_result = w.click_sync() + + synced_jobs.append(job) + sync_job_results[job.id] = sync_result + + print(f"Sharing {len(sync_job_results)} new results") + deny_failed_jobs(client_low, synced_jobs) + return sync_job_results + + +def sync_new_requests( + client_low: DatasiteClient, + client_high: DatasiteClient, +) -> dict[sy.UID, sy.SyftSuccess | sy.SyftError] | sy.SyftError: + sync_request_results = {} + diff = compare_clients( + from_client=client_low, to_client=client_high, include_types=["request"] + ) + if isinstance(diff, sy.SyftError): + print(diff) + return sync_request_results + print(f"{len(diff.batches)} request batches found") + for batch in tqdm(diff.batches): + if is_request_to_sync(batch): + request_id = batch.root.low_obj.id + w = batch.resolve(build_state=False) + result = w.click_sync() + sync_request_results[request_id] = result + return sync_request_results + + +def sync_and_execute_new_requests( + client_low: DatasiteClient, client_high: DatasiteClient +) -> None: + sync_results = sync_new_requests(client_low, client_high) + if isinstance(sync_results, sy.SyftError): + print(sync_results) + return + + request_ids = [ + uid for uid, res in sync_results.items() if isinstance(res, sy.SyftSuccess) + ] + print(f"Synced {len(request_ids)} new requests") + + jobs_by_request = execute_requests(client_high, request_ids) + print(f"Started {len(jobs_by_request)} new jobs") + + +def auto_sync(client_low: DatasiteClient, client_high: DatasiteClient) -> None: + print("Starting auto sync") + print("Denying non tagged jobs") + deny_requests_without_autosync_tag(client_low) + print("Syncing and executing") + sync_and_execute_new_requests(client_low, client_high) + sync_finished_jobs(client_low, client_high) + print("Finished auto sync") diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/worker_helpers.py b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/worker_helpers.py new file mode 100644 index 00000000000..3c2667fecc8 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_helpers/worker_helpers.py @@ -0,0 +1,86 @@ +# syft absolute +import syft as sy + + +def build_and_launch_worker_pool_from_docker_str( + environment: str, + client: sy.DatasiteClient, + worker_pool_name: str, + custom_pool_pod_annotations: dict, + custom_pool_pod_labels: dict, + worker_dockerfile: str, + external_registry: str, + docker_tag: str, + scale_to: int, +): + result = client.api.services.image_registry.add(external_registry) + assert "success" in result.message # nosec: B101 + + # For some reason, when using k9s, result.value is empty so can't use the below line + # local_registry = result.value + local_registry = client.api.services.image_registry[0] + + docker_config = sy.DockerWorkerConfig(dockerfile=worker_dockerfile) + assert docker_config.dockerfile == worker_dockerfile # nosec: B101 + submit_result = client.api.services.worker_image.submit(worker_config=docker_config) + print(submit_result.message) + assert "success" in submit_result.message # nosec: B101 + + worker_image = submit_result.value + + if environment == "remote": + docker_build_result = client.api.services.worker_image.build( + image_uid=worker_image.id, + tag=docker_tag, + registry_uid=local_registry.id, + ) + print(docker_build_result) + + if environment == "remote": + push_result = client.api.services.worker_image.push(worker_image.id) + print(push_result) + + result = client.api.services.worker_pool.launch( + pool_name=worker_pool_name, + image_uid=worker_image.id, + num_workers=1, + pod_annotations=custom_pool_pod_annotations, + pod_labels=custom_pool_pod_labels, + ) + print(result) + # assert 'success' in str(result.message) + + if environment == "remote": + result = client.worker_pools.scale(number=scale_to, pool_name=worker_pool_name) + print(result) + + +def launch_worker_pool_from_docker_tag_and_registry( + environment: str, + client: sy.DatasiteClient, + worker_pool_name: str, + custom_pool_pod_annotations: dict, + custom_pool_pod_labels: dict, + docker_tag: str, + external_registry: str, + scale_to: int = 1, +): + res = client.api.services.image_registry.add(external_registry) + assert "success" in res.message # nosec: B101 + docker_config = sy.PrebuiltWorkerConfig(tag=docker_tag) + image_result = client.api.services.worker_image.submit(worker_config=docker_config) + assert "success" in res.message # nosec: B101 + worker_image = image_result.value + + launch_result = client.api.services.worker_pool.launch( + pool_name=worker_pool_name, + image_uid=worker_image.id, + num_workers=1, + pod_annotations=custom_pool_pod_annotations, + pod_labels=custom_pool_pod_labels, + ) + if environment == "remote" and scale_to > 1: + result = client.worker_pools.scale(number=scale_to, pool_name=worker_pool_name) + print(result) + + return launch_result diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/000-start-and-configure-server-and-admins.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/000-start-and-configure-server-and-admins.ipynb new file mode 100644 index 00000000000..f1e17f0f7cb --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/000-start-and-configure-server-and-admins.ipynb @@ -0,0 +1,285 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# # stdlib\n", + "# import os\n", + "\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "from os import environ as env\n", + "import os\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "# from syft import get_helpers # noqa: F401\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "\n", + "# third party\n", + "from email_helpers import get_email_server\n", + "# isort: on" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# when in k8s these are the default values\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " reset=True,\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ROOT_EMAIL, password=ROOT_PASSWORD\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create new admin client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create\n", + "root_client.register(\n", + " name=\"second admin\", email=ADMIN_EMAIL, password=ADMIN_PW, password_verify=ADMIN_PW\n", + ")\n", + "# update role\n", + "new_user_id = root_client.users.search(email=ADMIN_EMAIL)[0].id\n", + "root_client.users.update(uid=new_user_id, role=\"admin\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# We cannot delete the root client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_admin_id = root_client.users.search(email=ROOT_EMAIL)[0].id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException):\n", + " high_client.users.delete(root_admin_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create ephemeral admin and delete it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# register 2nd new admin (to delete)\n", + "user_email, user_pw = \"admin3@bigquery.org\", \"bqpw3\"\n", + "## create\n", + "root_client.register(\n", + " name=\"x\", email=user_email, password=user_pw, password_verify=user_pw\n", + ")\n", + "## update role\n", + "new_user_id2 = root_client.users.search(email=user_email)[0].id\n", + "root_client.users.update(uid=new_user_id, role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "root_client.users.delete(new_user_id2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft_3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/001-scale-delete-worker-pools.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/001-scale-delete-worker-pools.ipynb new file mode 100644 index 00000000000..0caab490376 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/001-scale-delete-worker-pools.ipynb @@ -0,0 +1,399 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "import os\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "\n", + "# third party\n", + "from email_helpers import Timeout\n", + "from email_helpers import get_email_server\n", + "# isort: on" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "\n", + "num_workers = int(os.environ.get(\"NUM_TEST_WORKERS\", 1))\n", + "\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"\n", + "environment" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=num_workers, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ROOT_EMAIL, password=ROOT_PASSWORD\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "high_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "default_worker_pool = high_client.worker_pools.get_by_name(\"default-pool\")\n", + "default_worker_pool" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "### Scale Worker pool" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "##### Scale up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale to 1\n", + "if environment == \"remote\":\n", + " high_client.api.worker_pool.scale(\n", + " number=num_workers, pool_name=default_worker_pool.name\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.worker_pool[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale up workers\n", + "if environment == \"remote\":\n", + " scale_up_result = high_client.api.worker_pool.scale(\n", + " number=5, pool_name=default_worker_pool.name\n", + " )\n", + " if environment == \"remote\":\n", + " assert scale_up_result, scale_up_result\n", + "\n", + " assert (\n", + " high_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == 5\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "##### Scale down" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# Scale down workers, this gracefully shutdowns the consumers\n", + "if environment == \"remote\":\n", + " scale_down_result = high_client.api.worker_pool.scale(\n", + " number=num_workers, pool_name=default_worker_pool.name\n", + " )\n", + " assert scale_down_result, scale_down_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + "\n", + " def has_worker_scaled_down():\n", + " return (\n", + " high_client.api.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + " )\n", + "\n", + " worker_scale_timeout = Timeout(timeout_duration=20)\n", + " worker_scale_timeout.run_with_timeout(has_worker_scaled_down)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " assert (\n", + " high_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "#### Delete Worker Pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "pool_delete_result = high_client.api.services.worker_pool.delete(\n", + " pool_name=default_worker_pool.name\n", + ")\n", + "pool_delete_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(KeyError):\n", + " _ = high_client.api.services.worker_pool[default_worker_pool.name]" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "#### Re-launch the default worker pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "default_worker_image = default_worker_pool.image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "launch_result = high_client.api.services.worker_pool.launch(\n", + " pool_name=default_worker_pool.name,\n", + " image_uid=default_worker_image.id,\n", + " num_workers=num_workers,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "assert high_client.api.services.worker_pool[default_worker_pool.name]\n", + "assert (\n", + " high_client.api.services.worker_pool[default_worker_pool.name].max_count\n", + " == num_workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/010-setup-bigquery-pool.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/010-setup-bigquery-pool.ipynb new file mode 100644 index 00000000000..d40acbcb36d --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/010-setup-bigquery-pool.ipynb @@ -0,0 +1,569 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "import os\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "from syft import test_settings\n", + "\n", + "# third party\n", + "from email_helpers import get_email_server\n", + "# isort: on" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "environment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "_, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Submit images and build pools" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add registry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "external_registry = test_settings.get(\"external_registry\", default=\"docker.io\")\n", + "external_registry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = high_client.api.services.image_registry.add(external_registry)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "image_registry_list = high_client.api.services.image_registry.get_all()\n", + "image_registry_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "local_registry = image_registry_list[0]\n", + "local_registry" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Upload Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_worker_image = next(\n", + " (\n", + " image\n", + " for image in dockerfile_list\n", + " if image.is_prebuilt and \"syft-backend\" in str(image.config)\n", + " ),\n", + " None,\n", + ")\n", + "base_worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_worker_image.image_identifier" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_dockerfile = f\"\"\"\n", + "FROM {str(base_worker_image.image_identifier)}\n", + "\n", + "RUN uv pip install db-dtypes google-cloud-bigquery\n", + "\n", + "\"\"\".strip()\n", + "worker_dockerfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docker_config = sy.DockerWorkerConfig(dockerfile=worker_dockerfile)\n", + "assert docker_config.dockerfile == worker_dockerfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submit_result = high_client.api.services.worker_image.submit(\n", + " worker_config=docker_config\n", + ")\n", + "submit_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_image = next(\n", + " (\n", + " image\n", + " for image in dockerfile_list\n", + " if not image.is_prebuilt and image.config.dockerfile == worker_dockerfile\n", + " ),\n", + " None,\n", + ")\n", + "worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(worker_image.config.dockerfile)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# worker_docker_tag = f\"openmined/bigquery:{sy.__version__}\"\n", + "# worker_docker_tag" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Build image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docker_tag = str(base_worker_image.image_identifier).replace(\n", + " \"backend\", \"worker-bigquery\"\n", + ")\n", + "docker_tag" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " docker_build_result = high_client.api.services.worker_image.build(\n", + " image_uid=worker_image.id,\n", + " tag=docker_tag,\n", + " registry_uid=local_registry.id,\n", + " )\n", + " print(docker_build_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " push_result = high_client.api.services.worker_image.push(worker_image.id)\n", + " print(push_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docker_config = sy.PrebuiltWorkerConfig(tag=docker_tag)\n", + "docker_config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# overwrite it for now Mongo ignore\n", + "result = high_client.api.services.worker_image.submit(worker_config=docker_config)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dockerfile_list = high_client.images.get_all()\n", + "dockerfile_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: fix\n", + "# something is wrong here, sometimes it has the non prebuilt one\n", + "# other times it only has the one we built; in python there are multiple\n", + "# for now lets just use which ever one has worker-bigquery in its\n", + "# identifier so we can create a k8s worker pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_image = next(\n", + " (\n", + " image\n", + " for image in dockerfile_list\n", + " if \"worker-bigquery\" in str(image.image_identifier)\n", + " ),\n", + " None,\n", + ")\n", + "worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "worker_pool_name = \"bigquery-pool\"\n", + "custom_pool_pod_annotations = {\"bigquery-custom-pool\": \"Pod annotation for bigquery\"}\n", + "custom_pool_pod_labels = {\"bigquery-custom-pool\": \"Pod_label_for_bigquery\"}\n", + "\n", + "num_workers = int(os.environ.get(\"NUM_TEST_WORKERS\", 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Launch pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = high_client.api.services.worker_pool.launch(\n", + " pool_name=worker_pool_name,\n", + " image_uid=worker_image.id,\n", + " num_workers=1,\n", + " pod_annotations=custom_pool_pod_annotations,\n", + " pod_labels=custom_pool_pod_labels,\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Scale pool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if environment == \"remote\":\n", + " result = high_client.worker_pools.scale(\n", + " number=num_workers, pool_name=worker_pool_name\n", + " )\n", + " print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.api.services.user.get_all()) == 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/011-users-emails-passwords.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/011-users-emails-passwords.ipynb new file mode 100644 index 00000000000..460840e4b87 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/011-users-emails-passwords.ipynb @@ -0,0 +1,675 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"\n", + "# # !pip install aiosmtpd\n", + "# # !uv pip install aiosmtpd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "import os\n", + "\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "# third party\n", + "from email_helpers import SENDER\n", + "from email_helpers import create_user\n", + "from email_helpers import get_email_server\n", + "from email_helpers import make_user\n", + "from email_helpers import save_users\n", + "# isort: on" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"\n", + "\n", + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "environment" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "high_client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: this should show SyftSuccess?\n", + "high_client.api.services.settings.enable_notifications(\n", + " email_sender=SENDER,\n", + " email_server=\"localhost\",\n", + " email_port=\"9025\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# # TODO: this should show SyftSuccess?\n", + "# high_client.api.services.settings.disable_notifications()" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "# Register users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "num_users = int(os.environ.get(\"NUM_TEST_USERS\", 5))\n", + "print(f\"registering {num_users} users\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "users = []\n", + "email_disable_index = 0\n", + "reset_password_index = 1\n", + "for i in range(num_users):\n", + " user = make_user()\n", + " user._email_server = email_server\n", + " create_user(high_client, user)\n", + " user.client = high_client\n", + " if email_disable_index == i:\n", + " user.email_disabled = True\n", + " if reset_password_index == i:\n", + " user.reset_password = True\n", + " users.append(user)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "save_users(users)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import asyncio\n", + "\n", + "await asyncio.sleep(5)" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "## Verify Emails are sent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# everyone gets a welcome email\n", + "server_name = high_client.name\n", + "for user in users:\n", + " emails = user.emails\n", + " assert len(emails) == 1\n", + " welcome_email = user.emails[0]\n", + " assert welcome_email.email_from == SENDER\n", + " assert len(welcome_email.email_to) == 1\n", + " assert welcome_email.email_to[0] == user.email\n", + " assert f\"Welcome to {server_name}\" in welcome_email.email_content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "# one user disables notifications\n", + "# one user requests a password reset\n", + "no_email_user = None\n", + "reset_password_user = None\n", + "for user in users:\n", + " user.client = high_client # get user client\n", + " if user.email_disabled:\n", + " no_email_user = user\n", + " # disable for this user only\n", + " user.client.api.notifications.deactivate()\n", + "\n", + " if user.reset_password:\n", + " # ask admin for forgot password flow\n", + " user.client.guest().forgot_password(email=user.email)\n", + " assert \"Password Reset Requested\" in user.emails[1].email_content\n", + " reset_password_user = user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "ds0 = users[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "ds0_user = ds0.client.account\n", + "ds0_user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# with sy.raises(\n", + "# sy.SyftException(public_message=\"*tried to update user*\"\n", + "# ), show=True): this is different on k8s no idea why\n", + "with sy.raises(sy.SyftException, show=True):\n", + " ds0.client.users.update(uid=ds0_user.id, role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "# with sy.raises(sy.SyftException(public_message=\"*tried to update user*\"), show=True):\n", + "with sy.raises(sy.SyftException, show=True):\n", + " ds0_user.update(role=\"admin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: test disabling and re-enabling all notifications" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# high_client.api.services.settings.disable_notifications()\n", + "# high_client.api.services.settings.enable_notifications()" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## Test reset password" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "# # This is necessary as it sets the new token value in user.reset_token\n", + "token = reset_password_user.get_token()\n", + "token" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "# This is necessary as it sets the new password value in user.new_password\n", + "passw = reset_password_user.make_new_password()\n", + "passw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "assert token\n", + "assert passw" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.reset_token, new_password=reset_password_user.new_password\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "reset_password_user.relogin()\n", + "# reset_password_user.client = reset_password_user.client" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "## Reset password second time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.client.guest().forgot_password(email=reset_password_user.email)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "output = reset_password_user.client.guest().reset_password(\n", + " token=reset_password_user.get_token(),\n", + " new_password=reset_password_user.make_new_password(),\n", + ")\n", + "output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(output, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "# print(f\"token:\\t\\t {reset_password_user.reset_token}\\n\\\n", + "# password:\\t {reset_password_user.password}\\n\\\n", + "# new password:\\t {reset_password_user.new_password}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user.update_password()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "# print(f\"token:\\t\\t {reset_password_user.reset_token}\\n\\\n", + "# password:\\t {reset_password_user.password}\\n\\\n", + "# new password:\\t {reset_password_user.new_password}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "# relogin\n", + "reset_password_user.relogin()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [ + "save_users(users)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40", + "metadata": {}, + "outputs": [], + "source": [ + "reset_password_user" + ] + }, + { + "cell_type": "markdown", + "id": "41", + "metadata": {}, + "source": [ + "## Reduce token expiration and try resetting" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42", + "metadata": {}, + "outputs": [], + "source": [ + "# Variable is poorly named, token expiration time is in seconds and not minutes\n", + "high_client.api.services.settings.update(pwd_token_config={\"token_exp_min\": 3})\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43", + "metadata": {}, + "outputs": [], + "source": [ + "# reset_password_user.client.guest().forgot_password(email=reset_password_user.email)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44", + "metadata": {}, + "outputs": [], + "source": [ + "# Wait 3 seconds to ensure token expires\n", + "await asyncio.sleep(3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45", + "metadata": {}, + "outputs": [], + "source": [ + "# This should throw a SyftError because we waited too long\n", + "# output = reset_password_user.client.guest().reset_password(\n", + "# token=reset_password_user.get_token(),\n", + "# new_password=reset_password_user.make_new_password(),\n", + "# )\n", + "# output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [ + "# assert isinstance(output, sy.SyftError)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47", + "metadata": {}, + "outputs": [], + "source": [ + "# # relogin\n", + "# with sy.raises(sy.SyftException, show=True):\n", + "# reset_password_user.relogin()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48", + "metadata": {}, + "outputs": [], + "source": [ + "# Set things back to the the default settings\n", + "high_client.api.services.settings.update(pwd_token_config={\"token_exp_min\": 1800})\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "markdown", + "id": "49", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/020-configure-api.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/020-configure-api.ipynb new file mode 100644 index 00000000000..70df8a30d8e --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/020-configure-api.ipynb @@ -0,0 +1,658 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# set to use the live APIs\n", + "# os.environ[\"TEST_BIGQUERY_APIS_LIVE\"] = \"True\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "import os\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "from syft import test_settings\n", + "\n", + "# third party\n", + "from apis import make_schema\n", + "from apis import make_submit_query\n", + "from apis import make_test_query\n", + "\n", + "# run email server\n", + "from email_helpers import get_email_server\n", + "# isort: on" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "this_worker_pool_name = \"bigquery-pool\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create `test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip list | grep bigquery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install db-dtypes google-cloud-bigquery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Look up the worker pools and identify the name of the one that has the required packages\n", + "# After, bind the endpoint to that workerpool\n", + "high_client.worker_pools" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mock_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": True,\n", + " \"calls_per_min\": 10,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "private_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": False,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_endpoint = sy.TwinAPIEndpoint(\n", + " path=\"bigquery.test_query\",\n", + " description=\"This endpoint allows to query Bigquery storage via SQL queries.\",\n", + " private_function=private_func,\n", + " mock_function=mock_func,\n", + " worker_pool=this_worker_pool_name,\n", + ")\n", + "\n", + "high_client.custom_api.add(endpoint=new_endpoint)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Update `test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Here, we update the endpoint to timeout after 100s (rather the default of 60s)\n", + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", endpoint_timeout=120\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test `test_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_1 = test_settings.get(\"dataset_1\", default=\"dataset_1\")\n", + "dataset_2 = test_settings.get(\"dataset_2\", default=\"dataset_2\")\n", + "table_1 = test_settings.get(\"table_1\", default=\"table_1\")\n", + "table_2 = test_settings.get(\"table_2\", default=\"table_2\")\n", + "table_2_col_id = test_settings.get(\"table_2_col_id\", default=\"table_id\")\n", + "table_2_col_score = test_settings.get(\"table_2_col_score\", default=\"colname\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version\n", + "result = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test private version\n", + "result = high_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version for wrong queries\n", + "with sy.raises(\n", + " sy.SyftException(public_message=\"*must be qualified with a dataset*\"), show=True\n", + "):\n", + " _ = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=\"SELECT * FROM invalid_table LIMIT 1\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test private version\n", + "result = high_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 1\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inspect endpoint state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Inspect the context state on an endpoint\n", + "state = high_client.api.services.bigquery.test_query.mock.context.state\n", + "state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(state[ADMIN_EMAIL])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(state[ADMIN_EMAIL]) >= 2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create `schema` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "schema_function = make_schema(\n", + " settings={\n", + " \"calls_per_min\": 5,\n", + " },\n", + " worker_pool=this_worker_pool_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=schema_function)\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test `schema` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Testing schema\n", + "result = high_client.api.services.bigquery.schema()\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(result) == 23" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create `submit_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submit_query_function = make_submit_query(\n", + " settings={}, worker_pool=this_worker_pool_name\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=submit_query_function)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.submit_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.api_endpoints()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.bigquery.test_query" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.bigquery.submit_query" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test `submit_query` endpoint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Testing submit query\n", + "result = high_client.api.services.bigquery.submit_query(\n", + " func_name=\"my_func\",\n", + " query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 1\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test emails" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# assert (\n", + "# \"Job Failed\"\n", + "# in email_server.get_emails_for_user(user_email=ADMIN_EMAIL)[0].email_content\n", + "# )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# email_server.get_emails_for_user(user_email=\"admin@bigquery.org\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: change this to be all admins or configure which ones etc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# assert len(email_server.get_emails_for_user(user_email=\"admin@bigquery.org\")) > 0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# assert \"Query submitted\" in result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/021-create-jobs.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/021-create-jobs.ipynb new file mode 100644 index 00000000000..87a3033d9d8 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/021-create-jobs.ipynb @@ -0,0 +1,515 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "# import os\n", + "\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# use_live_bigquery = False\n", + "# os.environ[\"TEST_BIGQUERY_APIS_LIVE\"] = str(use_live_bigquery)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "from collections import Counter\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "from syft.service.job.job_stash import JobStatus\n", + "\n", + "# third party\n", + "from email_helpers import get_email_server\n", + "\n", + "# isort: on\n", + "# third party\n", + "from job_helpers import TestJob\n", + "from job_helpers import create_jobs\n", + "from job_helpers import extract_code_path\n", + "from job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n", + "environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "SERVER_PORT = \"8080\"\n", + "SERVER_URL = f\"http://localhost:{SERVER_PORT}\"\n", + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=SERVER_PORT,\n", + " n_consumers=4, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server(reset=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from email_helpers import load_users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "[user.email for user in high_client.users.get_all()]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(high_client)" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "# Create jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "# Inspect job data (requests for these jobs to be created)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "num_jobs = int(os.environ.get(\"NUM_TEST_JOBS\", 10))\n", + "\n", + "jobs_data = create_jobs(users, total_jobs=num_jobs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "counts = Counter([j.job_type for j in jobs_data])\n", + "for k, v in counts.most_common():\n", + " print(f\"{k}: #{v}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"{len(jobs_data)=}\")\n", + "\n", + "for job in jobs_data:\n", + " print(f\"{job.job_type=}, {job.should_succeed=}, {job.should_submit=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "assert len(jobs_data) == num_jobs\n", + "assert all(isinstance(j, TestJob) for j in jobs_data)\n", + "assert all(job.client is not None for job in jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "# Submit jobs\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "admin_emails_before = len(email_server.get_emails_for_user(\"admin@bigquery.org\"))\n", + "print(f\"{admin_emails_before=}\")" + ] + }, + { + "cell_type": "markdown", + "id": "23", + "metadata": {}, + "source": [ + "## Test Succesful jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "jobs_submit_should_succeed = [j for j in jobs_data if j.should_submit]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_submit_should_succeed:\n", + " client = job.client\n", + " response = client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + " )\n", + " job.code_path = extract_code_path(response)" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "## Test failures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "jobs_submit_should_fail = [j for j in jobs_data if not j.should_submit]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_submit_should_fail:\n", + " client = job.client\n", + "\n", + " with sy.raises(sy.SyftException):\n", + " client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "for job in jobs_data:\n", + " print(f\"Job {job.func_name:.20} {job.should_submit=}, {job.is_submitted=}\")\n", + "\n", + "assert all(job.is_submitted == job.should_submit for job in jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "## Test: cannot execute submitted jobs yet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "submitted_jobs = [job for job in jobs_data if job.should_submit]\n", + "job_execution_fns = [getattr(job.client.code, job.code_path) for job in submitted_jobs]\n", + "assert len(submitted_jobs) # failsafe for next tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "for fn in job_execution_fns:\n", + " # blocking\n", + " with sy.raises(\n", + " sy.SyftException(public_message=\"*Your code is waiting for approval*\")\n", + " ):\n", + " result = fn()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "for fn in job_execution_fns:\n", + " # nonblocking\n", + " result_job = fn(blocking=False)\n", + " result_job.wait()\n", + " assert isinstance(result_job.result, sy.SyftError)\n", + " assert result_job.status == JobStatus.ERRORED" + ] + }, + { + "cell_type": "markdown", + "id": "35", + "metadata": {}, + "source": [ + "# Verify that admin has emails for submitted requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36", + "metadata": {}, + "outputs": [], + "source": [ + "num_should_submit = sum(j.should_submit for j in jobs_data)\n", + "admin_emails_after = len(email_server.get_emails_for_user(\"admin@bigquery.org\"))\n", + "print(\"admin emails after\", admin_emails_after)\n", + "assert admin_emails_after >= admin_emails_before + num_should_submit\n", + "# assert len(users_emails) > after_number_of_emails\n", + "# assert len(users_emails) == after_number_of_emails + 1" + ] + }, + { + "cell_type": "markdown", + "id": "37", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft_3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/040-do-review-requests.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/040-do-review-requests.ipynb new file mode 100644 index 00000000000..da95c7e5431 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/040-do-review-requests.ipynb @@ -0,0 +1,342 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# stdlib\n", + "import random\n", + "import os\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "from syft.service.job.job_stash import Job\n", + "\n", + "# third party\n", + "from email_helpers import get_email_server\n", + "from job_helpers import approve_by_running\n", + "from job_helpers import get_job_emails\n", + "from job_helpers import get_request_for_job_info\n", + "# isort: on" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Start server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Review requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from email_helpers import load_users\n", + "from job_helpers import load_jobs\n", + "from job_helpers import save_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.requests.get_all_pending()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(high_client)\n", + "jobs_data = load_jobs(users, high_client)\n", + "all_requests = high_client.requests\n", + "submitted_jobs_data = [job for job in jobs_data if job.is_submitted]\n", + "n_emails_per_job_user = {\n", + " k: len(v)\n", + " for k, v in get_job_emails(submitted_jobs_data, high_client, email_server).items()\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO we should record whether it was approved or deposited\n", + "# and test doing both in either order as there might be a bug when\n", + "# force overwriting\n", + "# also changing deny to approve and back again" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run or deny" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submitted_jobs_data_should_succeed = [\n", + " j for j in submitted_jobs_data if j.should_succeed\n", + "]\n", + "submitted_jobs_data_should_fail = [\n", + " j for j in submitted_jobs_data if not j.should_succeed\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_succeed:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " if random.randrange(2):\n", + " choice = \"approved with deposit_result\"\n", + " response = approve_by_running(request)\n", + " assert isinstance(response, Job)\n", + " else:\n", + " choice = \"approved\"\n", + " response = request.approve()\n", + " assert isinstance(response, sy.SyftSuccess)\n", + " print(f\"Job {job.func_name} should succeed: {job.should_succeed} and was {choice}\")\n", + " job.admin_reviewed = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in submitted_jobs_data_should_fail:\n", + " request = get_request_for_job_info(all_requests, job)\n", + " response = request.deny(\n", + " reason=f\"Your request {job.func_name} looks wrong, try again.\"\n", + " )\n", + " assert isinstance(response, sy.SyftSuccess)\n", + " assert not job.should_succeed\n", + " job.admin_reviewed = True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Verify that users have new emails" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_n_emails_per_job_user = {\n", + " k: len(v)\n", + " for k, v in get_job_emails(submitted_jobs_data, high_client, email_server).items()\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# for user_email, new_count in new_n_emails_per_job_user.items():\n", + "# old_count = n_emails_per_job_user[user_email]\n", + "# assert new_count > old_count" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Save state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.requests.get_all_approved()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.requests.get_all_rejected()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/050-ds-get-results.ipynb b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/050-ds-get-results.ipynb new file mode 100644 index 00000000000..81e0e288a43 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/050-ds-get-results.ipynb @@ -0,0 +1,263 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", + "# os.environ[\"DEV_MODE\"] = \"True\"\n", + "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# isort: off\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.util.util import (\n", + " find_base_dir_with_tox_ini,\n", + " get_caller_file_path,\n", + " is_interpreter_jupyter,\n", + ")\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()\n", + "# third party\n", + "from email_helpers import get_email_server\n", + "from email_helpers import load_users\n", + "from job_helpers import load_jobs\n", + "from job_helpers import save_jobs\n", + "# isort: on" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch server & login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = sy.login(\n", + " url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Download results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(high_client)\n", + "jobs = load_jobs(users, high_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# submitted_jobs = [job for job in jobs if job.is_submitted]\n", + "reviewed_jobs = [job for job in jobs if job.admin_reviewed]\n", + "reviewed_jobs_should_succeed = [j for j in reviewed_jobs if j.should_succeed]\n", + "reviewed_jobs_should_fail = [j for j in reviewed_jobs if not j.should_succeed]\n", + "\n", + "print(\n", + " f\"{len(reviewed_jobs)=}, {len(reviewed_jobs_should_succeed)=}, {len(reviewed_jobs_should_fail)=}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: test jobs that were never approved\n", + "# they seem to give weird errors like\n", + "# \"You uploaded an ActionObject that is not yet in the blob storage\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_succeed:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + "\n", + " if isinstance(res, sy.SyftError):\n", + " raise sy.SyftException(public_message=\"Expected success, got error\")\n", + "\n", + " result = res.get()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_fail:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + "\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + " if isinstance(res, sy.SyftError):\n", + " job.result_as_expected = True\n", + " else:\n", + " raise sy.SyftException(public_message=f\"failed, job didnt raise {type(j)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "save_jobs(jobs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO fix\n", + "expected_jobs = [job for job in jobs if job.result_as_expected]\n", + "print(f\"got expected_jobs: {len(expected_jobs)} == reviewed_jobs: {len(reviewed_jobs)}\")\n", + "assert len(reviewed_jobs) == len(expected_jobs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "smtp_server.stop()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/emails.json b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/emails.json new file mode 100644 index 00000000000..dc22a271960 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/emails.json @@ -0,0 +1,107 @@ +{ + "admin@bigquery.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============5231718646336112954==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (6091) has been received!\r\n\r\n--===============5231718646336112954==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n

\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 60916f4d58504b7f91508b9e11e2fe03

\r\n

\r\n Submitted By:\r\n Michael Adams\r\n

\r\n

Date: 2024-09-23 10:22:57

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"60916f4d58504b7f91508b9e11e2fe03\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============5231718646336112954==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============2108625785401698392==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (a850) has been received!\r\n\r\n--===============2108625785401698392==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: a850407831c64b928c4733db4393bb84

\r\n

\r\n Submitted By:\r\n Lisa Clark\r\n

\r\n

Date: 2024-09-23 10:23:08

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"a850407831c64b928c4733db4393bb84\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============2108625785401698392==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============3190702502919456718==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (e0cf) has been received!\r\n\r\n--===============3190702502919456718==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: e0cf4aec1e80443fb49b54ab1ee460c3

\r\n

\r\n Submitted By:\r\n Anthony Simpson\r\n

\r\n

Date: 2024-09-23 10:23:18

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"e0cf4aec1e80443fb49b54ab1ee460c3\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============3190702502919456718==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============6538833477960096742==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (291e) has been received!\r\n\r\n--===============6538833477960096742==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 291ec8c54d594443ba5cdd39176c506d

\r\n

\r\n Submitted By:\r\n Ian Ray\r\n

\r\n

Date: 2024-09-23 10:23:28

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"291ec8c54d594443ba5cdd39176c506d\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============6538833477960096742==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============4270422105925677111==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (3573) has been received!\r\n\r\n--===============4270422105925677111==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 35736f1adf34471a8d805e53d29883ed

\r\n

\r\n Submitted By:\r\n Marie Russo\r\n

\r\n

Date: 2024-09-23 10:23:39

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"35736f1adf34471a8d805e53d29883ed\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============4270422105925677111==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============1927382439090520103==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (14f6) has been received!\r\n\r\n--===============1927382439090520103==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 14f66d6a120e4c3aadb123b2e8dd8805

\r\n

\r\n Submitted By:\r\n Michael Adams\r\n

\r\n

Date: 2024-09-23 10:23:49

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"14f66d6a120e4c3aadb123b2e8dd8805\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============1927382439090520103==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============1564302604072025353==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (1418) has been received!\r\n\r\n--===============1564302604072025353==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 141875e5343a48c8aef17f642a8c91e7

\r\n

\r\n Submitted By:\r\n Lisa Clark\r\n

\r\n

Date: 2024-09-23 10:23:59

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"141875e5343a48c8aef17f642a8c91e7\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============1564302604072025353==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============5221322304721055502==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (8fae) has been received!\r\n\r\n--===============5221322304721055502==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 8fae4312f60742b093c89a968d2a1453

\r\n

\r\n Submitted By:\r\n Anthony Simpson\r\n

\r\n

Date: 2024-09-23 10:24:09

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"8fae4312f60742b093c89a968d2a1453\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============5221322304721055502==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============7638781143527035342==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite bigquery-high-migrations: A New Request (b910) has been received!\r\n\r\n--===============7638781143527035342==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: b91086795fda4b2aa8978ae1d4449f78

\r\n

\r\n Submitted By:\r\n Ian Ray\r\n

\r\n

Date: 2024-09-23 10:24:20

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"b91086795fda4b2aa8978ae1d4449f78\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============7638781143527035342==--\r\n" + } + ], + "lisa-clark-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["lisa-clark-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============0070695112275164508==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: lisa-clark-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (a850) has been approved. \r\n\r\n--===============0070695112275164508==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: a850407831c64b928c4733db4393bb84

\r\n

\r\n Submitted By:\r\n Lisa Clark lisa-clark-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:08

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"a850407831c64b928c4733db4393bb84\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============0070695112275164508==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["lisa-clark-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============6956594247645420946==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: lisa-clark-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (a850) has been approved. \r\n\r\n--===============6956594247645420946==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: a850407831c64b928c4733db4393bb84

\r\n

\r\n Submitted By:\r\n Lisa Clark lisa-clark-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:08

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange,ActionStoreChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"a850407831c64b928c4733db4393bb84\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============6956594247645420946==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["lisa-clark-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============4955025587982972474==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: lisa-clark-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (1418) has been denied. \r\n\r\n--===============4955025587982972474==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 141875e5343a48c8aef17f642a8c91e7

\r\n

\r\n Submitted By:\r\n Lisa Clark lisa-clark-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:59

\r\n

Status:

\r\n REJECTED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"141875e5343a48c8aef17f642a8c91e7\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============4955025587982972474==--\r\n" + } + ], + "anthony-simpson-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["anthony-simpson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============4618684281710170390==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: anthony-simpson-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (e0cf) has been approved. \r\n\r\n--===============4618684281710170390==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: e0cf4aec1e80443fb49b54ab1ee460c3

\r\n

\r\n Submitted By:\r\n Anthony Simpson anthony-simpson-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:18

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"e0cf4aec1e80443fb49b54ab1ee460c3\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============4618684281710170390==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["anthony-simpson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============4796926491134373799==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: anthony-simpson-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (8fae) has been denied. \r\n\r\n--===============4796926491134373799==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 8fae4312f60742b093c89a968d2a1453

\r\n

\r\n Submitted By:\r\n Anthony Simpson anthony-simpson-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:24:09

\r\n

Status:

\r\n REJECTED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"8fae4312f60742b093c89a968d2a1453\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============4796926491134373799==--\r\n" + } + ], + "ian-ray-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["ian-ray-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============3263288618826687943==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: ian-ray-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (291e) has been approved. \r\n\r\n--===============3263288618826687943==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 291ec8c54d594443ba5cdd39176c506d

\r\n

\r\n Submitted By:\r\n Ian Ray ian-ray-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:28

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"291ec8c54d594443ba5cdd39176c506d\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============3263288618826687943==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["ian-ray-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============9068082627312456525==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: ian-ray-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (291e) has been approved. \r\n\r\n--===============9068082627312456525==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 291ec8c54d594443ba5cdd39176c506d

\r\n

\r\n Submitted By:\r\n Ian Ray ian-ray-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:28

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange,ActionStoreChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"291ec8c54d594443ba5cdd39176c506d\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============9068082627312456525==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["ian-ray-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============5570947561616756360==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: ian-ray-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (b910) has been approved. \r\n\r\n--===============5570947561616756360==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: b91086795fda4b2aa8978ae1d4449f78

\r\n

\r\n Submitted By:\r\n Ian Ray ian-ray-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:24:20

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"b91086795fda4b2aa8978ae1d4449f78\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============5570947561616756360==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["ian-ray-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============2122230739167057876==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: ian-ray-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (b910) has been approved. \r\n\r\n--===============2122230739167057876==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: b91086795fda4b2aa8978ae1d4449f78

\r\n

\r\n Submitted By:\r\n Ian Ray ian-ray-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:24:20

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange,ActionStoreChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"b91086795fda4b2aa8978ae1d4449f78\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============2122230739167057876==--\r\n" + } + ], + "marie-russo-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["marie-russo-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============6066415345440146385==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: marie-russo-fake@openmined.org\r\nSubject: Datasite bigquery-high-migrations: Your request (3573) has been approved. \r\n\r\n--===============6066415345440146385==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 35736f1adf34471a8d805e53d29883ed

\r\n

\r\n Submitted By:\r\n Marie Russo marie-russo-fake@openmined.org\r\n

\r\n

Date: 2024-09-23 10:23:39

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"35736f1adf34471a8d805e53d29883ed\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============6066415345440146385==--\r\n" + } + ] +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/emails_k8s.json b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/emails_k8s.json new file mode 100644 index 00000000000..0a009e522e3 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/emails_k8s.json @@ -0,0 +1,107 @@ +{ + "admin@bigquery.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============2774799181943617093==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (c052) has been received!\r\n\r\n--===============2774799181943617093==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: c05221ad44e24dbbaea0be3f588731e7

\r\n

\r\n Submitted By:\r\n Richard Porter\r\n

\r\n

Date: 2024-10-11 12:16:56

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"c05221ad44e24dbbaea0be3f588731e7\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============2774799181943617093==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============5773659224502352484==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (8151) has been received!\r\n\r\n--===============5773659224502352484==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 81516ec734ba4aa5a6d8935c4bf74c79

\r\n

\r\n Submitted By:\r\n Christine Peterson\r\n

\r\n

Date: 2024-10-11 12:17:02

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"81516ec734ba4aa5a6d8935c4bf74c79\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============5773659224502352484==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============4559612919700462923==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (d422) has been received!\r\n\r\n--===============4559612919700462923==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: d422c049c36e4318a8c6565d468be834

\r\n

\r\n Submitted By:\r\n Jon Wilson\r\n

\r\n

Date: 2024-10-11 12:17:09

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"d422c049c36e4318a8c6565d468be834\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============4559612919700462923==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============7147581347992530150==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (f8ea) has been received!\r\n\r\n--===============7147581347992530150==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: f8ea222576e045e0b6d930ef0f33e797

\r\n

\r\n Submitted By:\r\n Tyrone Flores\r\n

\r\n

Date: 2024-10-11 12:17:15

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"f8ea222576e045e0b6d930ef0f33e797\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============7147581347992530150==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============1854129607512735293==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (716f) has been received!\r\n\r\n--===============1854129607512735293==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 716f174008c94bcf8bd936e3852eeb3b

\r\n

\r\n Submitted By:\r\n Brian Bradford\r\n

\r\n

Date: 2024-10-11 12:17:21

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"716f174008c94bcf8bd936e3852eeb3b\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============1854129607512735293==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============8691567137314204328==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (3ade) has been received!\r\n\r\n--===============8691567137314204328==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 3ade7c04c4f0464fa8dc7076f6a24708

\r\n

\r\n Submitted By:\r\n Richard Porter\r\n

\r\n

Date: 2024-10-11 12:17:28

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"3ade7c04c4f0464fa8dc7076f6a24708\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============8691567137314204328==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============6271599080268850531==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (9b8d) has been received!\r\n\r\n--===============6271599080268850531==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 9b8d27b470984e93adb00f9223ca76c5

\r\n

\r\n Submitted By:\r\n Christine Peterson\r\n

\r\n

Date: 2024-10-11 12:17:35

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"9b8d27b470984e93adb00f9223ca76c5\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============6271599080268850531==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============3318936042735950465==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (7211) has been received!\r\n\r\n--===============3318936042735950465==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 7211bae27fc54b05a38e8a07d1b3a656

\r\n

\r\n Submitted By:\r\n Jon Wilson\r\n

\r\n

Date: 2024-10-11 12:17:41

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"7211bae27fc54b05a38e8a07d1b3a656\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============3318936042735950465==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["admin@bigquery.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============6286247889141890900==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: admin@bigquery.org\r\nSubject: Datasite syft-dev-server: A New Request (4558) has been received!\r\n\r\n--===============6286247889141890900==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

A new request has been submitted and requires your attention.\r\n Please review the details below:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 45580e2c053f448baf10c8d7cb8cbc05

\r\n

\r\n Submitted By:\r\n Tyrone Flores\r\n

\r\n

Date: 2024-10-11 12:17:48

\r\n

Status:

\r\n PENDING\r\n
\r\n

Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"45580e2c053f448baf10c8d7cb8cbc05\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============6286247889141890900==--\r\n" + } + ], + "christine-peterson-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["christine-peterson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============3572756550182992666==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: christine-peterson-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (8151) has been approved. \r\n\r\n--===============3572756550182992666==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 81516ec734ba4aa5a6d8935c4bf74c79

\r\n

\r\n Submitted By:\r\n Christine Peterson christine-peterson-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:02

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"81516ec734ba4aa5a6d8935c4bf74c79\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============3572756550182992666==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["christine-peterson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============8523461382233039019==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: christine-peterson-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (9b8d) has been denied. \r\n\r\n--===============8523461382233039019==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 9b8d27b470984e93adb00f9223ca76c5

\r\n

\r\n Submitted By:\r\n Christine Peterson christine-peterson-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:35

\r\n

Status:

\r\n REJECTED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"9b8d27b470984e93adb00f9223ca76c5\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============8523461382233039019==--\r\n" + } + ], + "jon-wilson-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["jon-wilson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============8578183122068990029==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: jon-wilson-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (d422) has been approved. \r\n\r\n--===============8578183122068990029==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: d422c049c36e4318a8c6565d468be834

\r\n

\r\n Submitted By:\r\n Jon Wilson jon-wilson-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:09

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"d422c049c36e4318a8c6565d468be834\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============8578183122068990029==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["jon-wilson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============0206824959708758077==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: jon-wilson-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (d422) has been approved. \r\n\r\n--===============0206824959708758077==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: d422c049c36e4318a8c6565d468be834

\r\n

\r\n Submitted By:\r\n Jon Wilson jon-wilson-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:09

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange,ActionStoreChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"d422c049c36e4318a8c6565d468be834\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============0206824959708758077==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["jon-wilson-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============5534833348616141732==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: jon-wilson-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (7211) has been denied. \r\n\r\n--===============5534833348616141732==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 7211bae27fc54b05a38e8a07d1b3a656

\r\n

\r\n Submitted By:\r\n Jon Wilson jon-wilson-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:41

\r\n

Status:

\r\n REJECTED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"7211bae27fc54b05a38e8a07d1b3a656\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============5534833348616141732==--\r\n" + } + ], + "tyrone-flores-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["tyrone-flores-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============3003277406568338591==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: tyrone-flores-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (f8ea) has been approved. \r\n\r\n--===============3003277406568338591==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: f8ea222576e045e0b6d930ef0f33e797

\r\n

\r\n Submitted By:\r\n Tyrone Flores tyrone-flores-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:15

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"f8ea222576e045e0b6d930ef0f33e797\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============3003277406568338591==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["tyrone-flores-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============7397070137918696406==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: tyrone-flores-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (f8ea) has been approved. \r\n\r\n--===============7397070137918696406==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: f8ea222576e045e0b6d930ef0f33e797

\r\n

\r\n Submitted By:\r\n Tyrone Flores tyrone-flores-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:15

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange,ActionStoreChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"f8ea222576e045e0b6d930ef0f33e797\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============7397070137918696406==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["tyrone-flores-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============4208462321236607557==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: tyrone-flores-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (4558) has been approved. \r\n\r\n--===============4208462321236607557==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 45580e2c053f448baf10c8d7cb8cbc05

\r\n

\r\n Submitted By:\r\n Tyrone Flores tyrone-flores-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:48

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"45580e2c053f448baf10c8d7cb8cbc05\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============4208462321236607557==--\r\n" + }, + { + "email_from": "noreply@openmined.org", + "email_to": ["tyrone-flores-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============6070189532547198528==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: tyrone-flores-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (4558) has been approved. \r\n\r\n--===============6070189532547198528==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 45580e2c053f448baf10c8d7cb8cbc05

\r\n

\r\n Submitted By:\r\n Tyrone Flores tyrone-flores-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:48

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange,ActionStoreChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"45580e2c053f448baf10c8d7cb8cbc05\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============6070189532547198528==--\r\n" + } + ], + "brian-bradford-fake@openmined.org": [ + { + "email_from": "noreply@openmined.org", + "email_to": ["brian-bradford-fake@openmined.org"], + "email_content": "Content-Type: multipart/alternative; boundary=\"===============7786129856721107806==\"\r\nMIME-Version: 1.0\r\nFrom: noreply@openmined.org\r\nTo: brian-bradford-fake@openmined.org\r\nSubject: Datasite syft-dev-server: Your request (716f) has been approved. \r\n\r\n--===============7786129856721107806==\r\nContent-Type: text/html; charset=\"us-ascii\"\r\nMIME-Version: 1.0\r\nContent-Transfer-Encoding: 7bit\r\n\r\n\r\n \r\n Access Request Notification\r\n \r\n \r\n \r\n
\r\n
\r\n Request Notification\r\n
\r\n
\r\n

Hello,

\r\n

The status of your recent request has been updated.\r\n Below is the latest information regarding it:

\r\n\r\n
\r\n
Request Details
\r\n
\r\n\r\n

ID: 716f174008c94bcf8bd936e3852eeb3b

\r\n

\r\n Submitted By:\r\n Brian Bradford brian-bradford-fake@openmined.org\r\n

\r\n

Date: 2024-10-11 12:17:21

\r\n

Status:

\r\n APPROVED\r\n
\r\n

\r\n Changes:\r\n UserCodeStatusChange\r\n

\r\n\r\n

Use:
\r\n \r\n request = client.api.services.request.get_by_uid(uid=sy.UID(\"716f174008c94bcf8bd936e3852eeb3b\"))\r\n
\r\n to get this specific request.\r\n

\r\n\r\n

Or you can view all requests with:
\r\n \r\n client.requests\r\n \r\n

\r\n
\r\n
\r\n

If you did not expect this request or have concerns about it,\r\n please contact our support team immediately.

\r\n
\r\n
\r\n This is an automated message, please do not reply directly to this email.
\r\n For assistance, please contact our support team.\r\n
\r\n
\r\n \r\n \r\n--===============7786129856721107806==--\r\n" + } + ] +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/jobs.json b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/jobs.json new file mode 100644 index 00000000000..4f9d247146a --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/jobs.json @@ -0,0 +1,180 @@ +{ + "michael-adams-fake@openmined.org": [ + { + "user_email": "michael-adams-fake@openmined.org", + "func_name": "simple_query_e92e5a", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_2.table_1\n GROUP BY table_id\n LIMIT 717808", + "job_type": "simple_query", + "settings": { + "dataset": "dataset_2", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 717808 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_e92e5a", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "michael-adams-fake@openmined.org", + "func_name": "simple_query_4e10ab", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_1.table_1\n GROUP BY table_id\n LIMIT 512806", + "job_type": "simple_query", + "settings": { + "dataset": "dataset_1", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 512806 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_4e10ab", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "lisa-clark-fake@openmined.org": [ + { + "user_email": "lisa-clark-fake@openmined.org", + "func_name": "simple_query_5374dd", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_1.table_1\n GROUP BY table_id\n LIMIT 55958", + "job_type": "simple_query", + "settings": { + "dataset": "dataset_1", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 55958 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_5374dd", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "lisa-clark-fake@openmined.org", + "func_name": "wrong_syntax_query_cf08ee", + "query": "SELECT * FROM table INCORRECT SYNTAX", + "job_type": "wrong_syntax_query", + "settings": {}, + "should_succeed": false, + "should_submit": true, + "code_path": "wrong_syntax_query_cf08ee", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "anthony-simpson-fake@openmined.org": [ + { + "user_email": "anthony-simpson-fake@openmined.org", + "func_name": "simple_query_e46828", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_2.table_2\n GROUP BY table_id\n LIMIT 346093", + "job_type": "simple_query", + "settings": { + "dataset": "dataset_2", + "table": "table_2", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 346093 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_e46828", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "anthony-simpson-fake@openmined.org", + "func_name": "job_too_much_text_47c92e", + "query": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "job_type": "job_too_much_text", + "settings": {}, + "should_succeed": false, + "should_submit": true, + "code_path": "job_too_much_text_47c92e", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "ian-ray-fake@openmined.org": [ + { + "user_email": "ian-ray-fake@openmined.org", + "func_name": "simple_query_57f511", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_1.table_1\n GROUP BY table_id\n LIMIT 199607", + "job_type": "simple_query", + "settings": { + "dataset": "dataset_1", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 199607 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_57f511", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "ian-ray-fake@openmined.org", + "func_name": "job_long_name_6a5b10aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_2.table_1\n GROUP BY table_id\n LIMIT 138346", + "job_type": "job_long_name", + "settings": { + "dataset": "dataset_2", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 138346 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "job_long_name_6a5b10aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "marie-russo-fake@openmined.org": [ + { + "user_email": "marie-russo-fake@openmined.org", + "func_name": "simple_query_079c08", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_1.table_1\n GROUP BY table_id\n LIMIT 381248", + "job_type": "simple_query", + "settings": { + "dataset": "dataset_1", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 381248 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_079c08", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "marie-russo-fake@openmined.org", + "func_name": "job_funcname_xss_d77c1a", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM dataset_1.table_1\n GROUP BY table_id\n LIMIT 640290", + "job_type": "job_funcname_xss", + "settings": { + "dataset": "dataset_1", + "table": "table_1", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 640290 + }, + "should_succeed": true, + "should_submit": false, + "code_path": null, + "admin_reviewed": false, + "result_as_expected": null + } + ] +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/jobs_k8s.json b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/jobs_k8s.json new file mode 100644 index 00000000000..96c9ebcd075 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/jobs_k8s.json @@ -0,0 +1,180 @@ +{ + "richard-porter-fake@openmined.org": [ + { + "user_email": "richard-porter-fake@openmined.org", + "func_name": "simple_query_61dd0e", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM data_10gb.subreddits\n GROUP BY table_id\n LIMIT 713078", + "job_type": "simple_query", + "settings": { + "dataset": "data_10gb", + "table": "subreddits", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 713078 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_61dd0e", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "richard-porter-fake@openmined.org", + "func_name": "simple_query_7e0b14", + "query": "SELECT subreddit_id, AVG(score) AS average_score\n FROM data_10gb.comments\n GROUP BY subreddit_id\n LIMIT 770329", + "job_type": "simple_query", + "settings": { + "dataset": "data_10gb", + "table": "comments", + "groupby_col": "subreddit_id", + "score_col": "score", + "limit": 770329 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_7e0b14", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "christine-peterson-fake@openmined.org": [ + { + "user_email": "christine-peterson-fake@openmined.org", + "func_name": "simple_query_6e409e", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM test_1gb.subreddits\n GROUP BY table_id\n LIMIT 969621", + "job_type": "simple_query", + "settings": { + "dataset": "test_1gb", + "table": "subreddits", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 969621 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_6e409e", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "christine-peterson-fake@openmined.org", + "func_name": "wrong_syntax_query_201be7", + "query": "SELECT * FROM table INCORRECT SYNTAX", + "job_type": "wrong_syntax_query", + "settings": {}, + "should_succeed": false, + "should_submit": true, + "code_path": "wrong_syntax_query_201be7", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "jon-wilson-fake@openmined.org": [ + { + "user_email": "jon-wilson-fake@openmined.org", + "func_name": "simple_query_cac747", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM data_10gb.subreddits\n GROUP BY table_id\n LIMIT 865657", + "job_type": "simple_query", + "settings": { + "dataset": "data_10gb", + "table": "subreddits", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 865657 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_cac747", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "jon-wilson-fake@openmined.org", + "func_name": "job_too_much_text_7d0b7e", + "query": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "job_type": "job_too_much_text", + "settings": {}, + "should_succeed": false, + "should_submit": true, + "code_path": "job_too_much_text_7d0b7e", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "tyrone-flores-fake@openmined.org": [ + { + "user_email": "tyrone-flores-fake@openmined.org", + "func_name": "simple_query_9ab2f5", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM test_1gb.subreddits\n GROUP BY table_id\n LIMIT 897138", + "job_type": "simple_query", + "settings": { + "dataset": "test_1gb", + "table": "subreddits", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 897138 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_9ab2f5", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "tyrone-flores-fake@openmined.org", + "func_name": "job_long_name_d6277aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "query": "SELECT subreddit_id, AVG(score) AS average_score\n FROM data_10gb.comments\n GROUP BY subreddit_id\n LIMIT 591345", + "job_type": "job_long_name", + "settings": { + "dataset": "data_10gb", + "table": "comments", + "groupby_col": "subreddit_id", + "score_col": "score", + "limit": 591345 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "job_long_name_d6277aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "admin_reviewed": true, + "result_as_expected": true + } + ], + "brian-bradford-fake@openmined.org": [ + { + "user_email": "brian-bradford-fake@openmined.org", + "func_name": "simple_query_edeb0b", + "query": "SELECT subreddit_id, AVG(score) AS average_score\n FROM data_10gb.comments\n GROUP BY subreddit_id\n LIMIT 133654", + "job_type": "simple_query", + "settings": { + "dataset": "data_10gb", + "table": "comments", + "groupby_col": "subreddit_id", + "score_col": "score", + "limit": 133654 + }, + "should_succeed": true, + "should_submit": true, + "code_path": "simple_query_edeb0b", + "admin_reviewed": true, + "result_as_expected": true + }, + { + "user_email": "brian-bradford-fake@openmined.org", + "func_name": "job_funcname_xss_338b43", + "query": "SELECT table_id, AVG(colname) AS average_score\n FROM test_1gb.subreddits\n GROUP BY table_id\n LIMIT 64464", + "job_type": "job_funcname_xss", + "settings": { + "dataset": "test_1gb", + "table": "subreddits", + "groupby_col": "table_id", + "score_col": "colname", + "limit": 64464 + }, + "should_succeed": true, + "should_submit": false, + "code_path": null, + "admin_reviewed": false, + "result_as_expected": null + } + ] +} diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/users.json b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/users.json new file mode 100644 index 00000000000..c2ef3885a77 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/users.json @@ -0,0 +1,52 @@ +[ + { + "name": "Michael Adams", + "email": "michael-adams-fake@openmined.org", + "password": "qRPt9Ua0_6", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": true, + "reset_password": false, + "reset_token": null + }, + { + "name": "Lisa Clark", + "email": "lisa-clark-fake@openmined.org", + "password": "tC9fZrku_o", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": true, + "reset_token": "D9893xYYM1HO" + }, + { + "name": "Anthony Simpson", + "email": "anthony-simpson-fake@openmined.org", + "password": ")@28JBEb4c", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": false, + "reset_token": null + }, + { + "name": "Ian Ray", + "email": "ian-ray-fake@openmined.org", + "password": "rA9miV^sX$", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": false, + "reset_token": null + }, + { + "name": "Marie Russo", + "email": "marie-russo-fake@openmined.org", + "password": "$4Asq)sSVb", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": false, + "reset_token": null + } +] diff --git a/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/users_k8s.json b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/users_k8s.json new file mode 100644 index 00000000000..a41455fa61d --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks/users_k8s.json @@ -0,0 +1,52 @@ +[ + { + "name": "Richard Porter", + "email": "richard-porter-fake@openmined.org", + "password": "Fmo4!MxcZ(", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": true, + "reset_password": false, + "reset_token": null + }, + { + "name": "Christine Peterson", + "email": "christine-peterson-fake@openmined.org", + "password": "4J13O(x&!G", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": true, + "reset_token": "DgSgUZ2gOpvm" + }, + { + "name": "Jon Wilson", + "email": "jon-wilson-fake@openmined.org", + "password": "h5TEQw#t%4", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": false, + "reset_token": null + }, + { + "name": "Tyrone Flores", + "email": "tyrone-flores-fake@openmined.org", + "password": "&#CJFvxb3_", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": false, + "reset_token": null + }, + { + "name": "Brian Bradford", + "email": "brian-bradford-fake@openmined.org", + "password": "crZ#4Heh6e", + "role": "ServiceRole.DATA_SCIENTIST", + "new_password": null, + "email_disabled": false, + "reset_password": false, + "reset_token": null + } +] diff --git a/notebooks/scenarios/bigquery/upgradability/1-dump-database-to-file.ipynb b/notebooks/scenarios/bigquery/upgradability/1-dump-database-to-file.ipynb new file mode 100644 index 00000000000..4ed6ee40e26 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/1-dump-database-to-file.ipynb @@ -0,0 +1,162 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "from pathlib import Path\n", + "\n", + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "ADMIN_EMAIL, ADMIN_PW = \"admin2@bigquery.org\", \"bqpw2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")\n", + "client = sy.login(url=\"http://localhost:8080\", email=ADMIN_EMAIL, password=ADMIN_PW)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=8080)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# Check if this server has data on it\n", + "# assert len(client.users.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "migration_data = client.get_migration_data(include_blobs=True)\n", + "migration_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "assert migration_data.includes_blobs\n", + "assert migration_data.num_action_objects > 0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n", + "migration_data_dir = Path(\".\")\n", + "migration_data_dir.mkdir(exist_ok=True)\n", + "\n", + "blob_path = migration_data_dir / \"migration.blob\"\n", + "yaml_path = migration_data_dir / \"migration.yaml\"\n", + "\n", + "blob_path.unlink(missing_ok=True)\n", + "yaml_path.unlink(missing_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "migration_data.save(blob_path, yaml_path=yaml_path)\n", + "\n", + "assert blob_path.exists()\n", + "assert yaml_path.exists()\n", + "\n", + "print(f\"Saved migration data to {str(blob_path.resolve())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "if server.server_type.value == \"python\":\n", + " server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft_3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/upgradability/2-migrate-for-scenarios-k8s.ipynb b/notebooks/scenarios/bigquery/upgradability/2-migrate-for-scenarios-k8s.ipynb new file mode 100644 index 00000000000..a8ad3c880ba --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/2-migrate-for-scenarios-k8s.ipynb @@ -0,0 +1,732 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "from os import environ as env\n", + "from pathlib import Path\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.service.action.action_object import AnyActionObject\n", + "from syft.service.user.user_roles import ServiceRole\n", + "from syft.util.test_helpers.email_helpers import load_users\n", + "from syft.util.test_helpers.job_helpers import create_simple_query_job\n", + "from syft.util.test_helpers.job_helpers import create_wrong_syntax_query\n", + "from syft.util.test_helpers.job_helpers import load_jobs\n", + "from syft.util.util import find_base_dir_with_tox_ini\n", + "from syft.util.util import get_caller_file_path\n", + "from syft.util.util import is_interpreter_jupyter" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Prepare file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1. Start the cluster\n", + "- switch to pysyftclone, and go to 0.9.1 and \n", + " - export DEVSPACE_PROFILE=bigquery-scenario-tests\n", + " - tox -e dev.k8s.destroy\n", + " - tox -e dev.k8s.start\n", + " - tox -e dev.k8s.deploy\n", + "2. Prepare the file\n", + "- go to normal pysyft dev (tox will create the right environment with 0.9.1 for you), edit some files\n", + "\n", + "`tox -e migration.scenarios.k8s.prepare`\n", + "\n", + "this will create migration.yaml\n", + "\n", + "3. Start new cluster\n", + "- copy migration_k8s.yaml (in scenario notebooks), to grid/helm/examples/dev as migration.yaml\n", + "- export DEVSPACE_PROFILE=migrated-datasite\n", + "- tox -e dev.k8s.start\n", + "- tox -e dev.k8s.deploy\n", + "\n", + "4. Run this File\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#! cp ./migration_k8s.yaml /Users/koen/workspace/PySyft/packages/grid/helm/examples/dev/migration.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To prepare, run with the right profile\n", + "\n", + "```\n", + "export DEVSPACE_PROFILE=bigquery-scenario-tests\n", + "tox -e dev.k8s.start\n", + "tox -e dev.k8s.deploy\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(base_dir, \"test_helpers\")\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + " notebook_helper_path = os.path.join(\n", + " base_dir, \"notebooks/scenarios/bigquery/upgradability/0.9.1_helpers\"\n", + " )\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ROOT_EMAIL = \"info@openmined.org\"\n", + "ROOT_PASSWORD = \"changethis\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# when in k8s these are the default values\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Connect email" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from email_helpers import get_email_server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "email_server, smtp_server = get_email_server()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " reset=True,\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")\n", + "\n", + "client = sy.login(url=\"http://localhost:8080\", email=ROOT_EMAIL, password=ROOT_PASSWORD)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check if this is a new server\n", + "migration_data = client.get_migration_data()\n", + "\n", + "# assert len(migration_data.store_objects[User]) == 1\n", + "# assert UserCode not in migration_data.store_objects" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "migration_data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load migration data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n", + "blob_path = migration_data_dir / \"migration_k8s.blob\"\n", + "yaml_path = migration_data_dir / \"migration_k8s.yaml\"\n", + "\n", + "print(f\"Loading migration data from {str(blob_path.resolve())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = client.load_migration_data(blob_path)\n", + "assert isinstance(res, sy.SyftSuccess), res.message" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# from syft.service.migration.object_migration_state import MigrationData\n", + "\n", + "# migration_data = MigrationData.from_file(blob_path)\n", + "\n", + "# worker_pools = migration_data.get_items_by_canonical_name(\"WorkerPool\")\n", + "\n", + "# pool = worker_pools[0]\n", + "\n", + "# images = migration_data.get_items_by_canonical_name(\"SyftWorkerImage\")\n", + "# image_id = pool.image_id\n", + "# old_image = [img for img in images if img.id == image_id][0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# old_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# old_image.is_prebuilt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# worker_pools[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!docker build -f bigquery.dockerfile -t test ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!docker image tag test k3d-registry.localhost:5800/openmined/test:dev-latest" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!docker push k3d-registry.localhost:5800/openmined/test:dev-latest" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_config = sy.PrebuiltWorkerConfig(\n", + " tag=\"k3d-registry.localhost:5800/openmined/test:dev-latest\", description=\"\"\n", + ")\n", + "\n", + "print(\"submitting new prebuilt image...\")\n", + "result = client.api.services.worker_image.submit(worker_config=new_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "image_uid = result.value.id # or client.images[i].id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = client.api.services.worker_pool.launch(\n", + " pool_name=\"bigquery-pool\",\n", + " image_uid=image_uid,\n", + " num_workers=1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# sy.upgrade_custom_workerpools(client, blob_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Emails" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "client.register(\n", + " name=\"abcdef\",\n", + " email=\"ab@de.org\",\n", + " password=\"abc\",\n", + " password_verify=\"abc\",\n", + " institution=\"comp\",\n", + " website=\"www.a.com\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(email_server.load_emails()[\"ab@de.org\"]) == 1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Post migration tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(client, path=\"0.9.1_notebooks/users_k8s.json\")\n", + "jobs = load_jobs(users, client, filepath=\"0.9.1_notebooks/jobs_k8s.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Check users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_users = client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_users[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_user_names = [\n", + " user.name for user in server_users if user.role == ServiceRole.DATA_SCIENTIST\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "user_names = [user.name for user in users] + [\"abcdef\"] # new registered user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert set(server_user_names) == set(user_names)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Old jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# submitted_jobs = [job for job in jobs if job.is_submitted]\n", + "reviewed_jobs = [job for job in jobs if job.admin_reviewed]\n", + "reviewed_jobs_should_succeed = [j for j in reviewed_jobs if j.should_succeed]\n", + "reviewed_jobs_should_fail = [j for j in reviewed_jobs if not j.should_succeed]\n", + "\n", + "print(\n", + " f\"{len(reviewed_jobs)=}, {len(reviewed_jobs_should_succeed)=}, {len(reviewed_jobs_should_fail)=}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_succeed:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + "\n", + " if isinstance(res, sy.SyftError):\n", + " raise sy.SyftException(public_message=res.message)\n", + "\n", + " result = res.get()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_fail:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + "\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + " if isinstance(res, sy.SyftError):\n", + " job.result_as_expected = True\n", + " else:\n", + " raise sy.SyftException(public_message=f\"failed, job didnt raise {type(j)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expected_jobs = [job for job in jobs if job.result_as_expected]\n", + "print(f\"got expected_jobs: {len(expected_jobs)} == reviewed_jobs: {len(reviewed_jobs)}\")\n", + "assert len(reviewed_jobs) == len(expected_jobs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use old DS to go through the flow again" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_client = users[0].client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(ds_client.api.services.api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job = create_simple_query_job(users[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = ds_client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(response, AnyActionObject)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for request in client.requests:\n", + " if request.code.service_func_name == job.func_name:\n", + " request.approve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_res = getattr(ds_client.code, job.func_name)(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_res.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from pandas import DataFrame\n", + "\n", + "assert isinstance(job_res.result.get(), DataFrame)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "wrong_syntax_job = create_wrong_syntax_query(users[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = ds_client.api.services.bigquery.submit_query(\n", + " func_name=wrong_syntax_job.func_name, query=wrong_syntax_job.query\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(response, AnyActionObject)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for request in client.requests:\n", + " if request.code.service_func_name == wrong_syntax_job.func_name:\n", + " request.approve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_res = getattr(ds_client.code, wrong_syntax_job.func_name)(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(job_res.wait(), sy.SyftError)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if server.server_type.value == \"python\":\n", + " server.land()" + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/2-migrate-for-scenarios.ipynb b/notebooks/scenarios/bigquery/upgradability/2-migrate-for-scenarios.ipynb new file mode 100644 index 00000000000..bd4f5cae2f5 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/2-migrate-for-scenarios.ipynb @@ -0,0 +1,508 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "from os import environ as env\n", + "from pathlib import Path\n", + "import sys\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft.service.action.action_object import AnyActionObject\n", + "from syft.service.user.user_roles import ServiceRole\n", + "from syft.util.test_helpers.email_helpers import load_users\n", + "from syft.util.test_helpers.job_helpers import create_simple_query_job\n", + "from syft.util.test_helpers.job_helpers import create_wrong_syntax_query\n", + "from syft.util.test_helpers.job_helpers import load_jobs\n", + "from syft.util.util import find_base_dir_with_tox_ini\n", + "from syft.util.util import get_caller_file_path\n", + "from syft.util.util import is_interpreter_jupyter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(base_dir, \"test_helpers\")\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# when in k8s these are the default values\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high-migrations\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " reset=True,\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")\n", + "\n", + "client = sy.login(url=\"http://localhost:8080\", email=ROOT_EMAIL, password=ROOT_PASSWORD)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check if this is a new server\n", + "migration_data = client.get_migration_data()\n", + "\n", + "# assert len(migration_data.store_objects[User]) == 1\n", + "# assert UserCode not in migration_data.store_objects" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "migration_data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load migration data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n", + "blob_path = migration_data_dir / \"migration.blob\"\n", + "yaml_path = migration_data_dir / \"migration.yaml\"\n", + "\n", + "print(f\"Loading migration data from {str(blob_path.resolve())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = client.load_migration_data(blob_path)\n", + "assert isinstance(res, sy.SyftSuccess), res.message" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sy.upgrade_custom_workerpools(client, blob_path, mode=\"auto\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Post migration tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(client, path=\"0.9.1_notebooks/users.json\")\n", + "jobs = load_jobs(users, client, filepath=\"0.9.1_notebooks/jobs.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Check users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_users = client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_users[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_user_names = [\n", + " user.name for user in server_users if user.role == ServiceRole.DATA_SCIENTIST\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "user_names = [user.name for user in users]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert server_user_names == user_names" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Old jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# submitted_jobs = [job for job in jobs if job.is_submitted]\n", + "reviewed_jobs = [job for job in jobs if job.admin_reviewed]\n", + "reviewed_jobs_should_succeed = [j for j in reviewed_jobs if j.should_succeed]\n", + "reviewed_jobs_should_fail = [j for j in reviewed_jobs if not j.should_succeed]\n", + "\n", + "print(\n", + " f\"{len(reviewed_jobs)=}, {len(reviewed_jobs_should_succeed)=}, {len(reviewed_jobs_should_fail)=}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_succeed:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + "\n", + " if isinstance(res, sy.SyftError):\n", + " raise sy.SyftException(public_message=res.message)\n", + "\n", + " result = res.get()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_fail:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + "\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + " if isinstance(res, sy.SyftError):\n", + " job.result_as_expected = True\n", + " else:\n", + " raise sy.SyftException(public_message=f\"failed, job didnt raise {type(j)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expected_jobs = [job for job in jobs if job.result_as_expected]\n", + "print(f\"got expected_jobs: {len(expected_jobs)} == reviewed_jobs: {len(reviewed_jobs)}\")\n", + "assert len(reviewed_jobs) == len(expected_jobs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use old DS to go through the flow again" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_client = users[0].client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(ds_client.api.services.api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job = create_simple_query_job(users[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = ds_client.api.services.bigquery.submit_query(\n", + " func_name=job.func_name, query=job.query\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(response, AnyActionObject)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for request in client.requests:\n", + " if request.code.service_func_name == job.func_name:\n", + " request.approve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_res = getattr(ds_client.code, job.func_name)(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_res.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from pandas import DataFrame\n", + "\n", + "assert isinstance(job_res.result.get(), DataFrame)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "wrong_syntax_job = create_wrong_syntax_query(users[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = ds_client.api.services.bigquery.submit_query(\n", + " func_name=wrong_syntax_job.func_name, query=wrong_syntax_job.query\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(response, AnyActionObject)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for request in client.requests:\n", + " if request.code.service_func_name == wrong_syntax_job.func_name:\n", + " request.approve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job_res = getattr(ds_client.code, wrong_syntax_job.func_name)(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(job_res.wait(), sy.SyftError)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if server.server_type.value == \"python\":\n", + " server.land()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft_3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/scenarios/bigquery/upgradability/bigquery.dockerfile b/notebooks/scenarios/bigquery/upgradability/bigquery.dockerfile new file mode 100644 index 00000000000..700247b6756 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/bigquery.dockerfile @@ -0,0 +1,3 @@ +FROM k3d-registry.localhost:5800/openmined/syft-backend:dev-latest + +RUN uv pip install db-dtypes google-cloud-bigquery \ No newline at end of file diff --git a/notebooks/scenarios/bigquery/upgradability/migration.blob b/notebooks/scenarios/bigquery/upgradability/migration.blob new file mode 100644 index 00000000000..3e0d4bfe6b0 Binary files /dev/null and b/notebooks/scenarios/bigquery/upgradability/migration.blob differ diff --git a/notebooks/scenarios/bigquery/upgradability/migration.yaml b/notebooks/scenarios/bigquery/upgradability/migration.yaml new file mode 100644 index 00000000000..179e57a3135 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/migration.yaml @@ -0,0 +1,6 @@ +server: + env: + - name: SERVER_UID + value: c1bdbee427ff4068ad8a9ebc151c75a2 + - name: SERVER_PRIVATE_KEY + value: 3d9252d25b8c78ddd64915e580016b0a88c5d6372fa138b88a5aaf61bfbb806e diff --git a/notebooks/scenarios/bigquery/upgradability/migration_k8s.blob b/notebooks/scenarios/bigquery/upgradability/migration_k8s.blob new file mode 100644 index 00000000000..4dc9c5030da Binary files /dev/null and b/notebooks/scenarios/bigquery/upgradability/migration_k8s.blob differ diff --git a/notebooks/scenarios/bigquery/upgradability/migration_k8s.yaml b/notebooks/scenarios/bigquery/upgradability/migration_k8s.yaml new file mode 100644 index 00000000000..f68c2a6e971 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/migration_k8s.yaml @@ -0,0 +1,6 @@ +server: + env: + - name: SERVER_UID + value: d7d40650a3d44e33b485f8493eb73bc8 + - name: SERVER_PRIVATE_KEY + value: bb2620054749852c46ed30dffeda2e89e7c6c71e399c905b348d303f90c38180 diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/01-setup-high-low-datasites.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/01-setup-high-low-datasites.ipynb new file mode 100644 index 00000000000..65a947371db --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/01-setup-high-low-datasites.ipynb @@ -0,0 +1,241 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings\n", + "\n", + "print(f\"syft version: {sy.__version__}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " reset=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " reset=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client = server_low.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client = server_high.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client.worker_pools.get_all()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 1\n", + "assert len(low_client.worker_pools.get_all()) == 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def launch_worker_pool(client, pool_name):\n", + " if pool_name not in [x.name for x in client.worker_pools]:\n", + " external_registry = test_settings.get(\"external_registry\", default=\"docker.io\")\n", + " worker_docker_tag = f\"openmined/bigquery:{sy.__version__}\"\n", + " result = client.api.services.worker_image.submit(\n", + " worker_config=sy.PrebuiltWorkerConfig(\n", + " tag=f\"{external_registry}/{worker_docker_tag}\"\n", + " )\n", + " )\n", + " worker_image = client.images.get_all()[1]\n", + " result = client.api.services.image_registry.add(external_registry)\n", + " result = client.api.services.worker_pool.launch(\n", + " pool_name=pool_name,\n", + " image_uid=worker_image.id,\n", + " num_workers=1,\n", + " )\n", + " return result\n", + " else:\n", + " print(\"Pool already exists\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pool_name = \"bigquery-pool\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "launch_worker_pool(high_client, pool_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "launch_worker_pool(low_client, pool_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# result = high_client.worker_pools.scale(number=5, pool_name=pool_name)\n", + "# result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2\n", + "assert len(low_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_worker_image = high_client.images.get_all()[0]\n", + "base_worker_image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client.register(\n", + " email=\"data_scientist@openmined.org\",\n", + " password=\"verysecurepassword\",\n", + " password_verify=\"verysecurepassword\",\n", + " name=\"John Doe\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.settings.allow_guest_signup(enable=False)\n", + "low_client.settings.allow_guest_signup(enable=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " len(low_client.api.services.user.get_all()) == 2\n", + "), \"Only DS and Admin should be at low side\"\n", + "assert (\n", + " len(high_client.api.services.user.get_all()) == 1\n", + "), \"Only Admin should be at high side\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_high.land()\n", + "server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/02-configure-api-and-sync.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/02-configure-api-and-sync.ipynb new file mode 100644 index 00000000000..8a8ff673625 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/02-configure-api-and-sync.ipynb @@ -0,0 +1,606 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install db-dtypes google-cloud-bigquery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# stdlib\n", + "\n", + "# third party\n", + "# set to use the live APIs\n", + "# import os\n", + "# os.environ[\"TEST_BIGQUERY_APIS_LIVE\"] = \"True\"\n", + "from apis import make_schema\n", + "from apis import make_submit_query\n", + "from apis import make_test_query" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "low_client = server_low.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "high_client = server_high.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "assert len(high_client.worker_pools.get_all()) == 2\n", + "assert len(low_client.worker_pools.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "this_worker_pool_name = \"bigquery-pool\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# !pip list | grep bigquery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# !pip install db-dtypes google-cloud-bigquery" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Twin endpoints" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "mock_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": True,\n", + " \"calls_per_min\": 10,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "private_func = make_test_query(\n", + " settings={\n", + " \"rate_limiter_enabled\": False,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "new_endpoint = sy.TwinAPIEndpoint(\n", + " path=\"bigquery.test_query\",\n", + " description=\"This endpoint allows to query Bigquery storage via SQL queries.\",\n", + " private_function=private_func,\n", + " mock_function=mock_func,\n", + " worker_pool=this_worker_pool_name,\n", + ")\n", + "\n", + "high_client.custom_api.add(endpoint=new_endpoint)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Here, we update the endpoint to timeout after 100s (rather the default of 60s)\n", + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", endpoint_timeout=120\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.test_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "schema_function = make_schema(\n", + " settings={\n", + " \"calls_per_min\": 5,\n", + " },\n", + " worker_pool=this_worker_pool_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=schema_function)\n", + "high_client.refresh()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "dataset_1 = test_settings.get(\"dataset_1\", default=\"dataset_1\")\n", + "dataset_2 = test_settings.get(\"dataset_2\", default=\"dataset_2\")\n", + "table_1 = test_settings.get(\"table_1\", default=\"table_1\")\n", + "table_2 = test_settings.get(\"table_2\", default=\"table_2\")\n", + "table_2_col_id = test_settings.get(\"table_2_col_id\", default=\"table_id\")\n", + "table_2_col_score = test_settings.get(\"table_2_col_score\", default=\"colname\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version\n", + "result = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.bigquery.schema()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submit_query_function = make_submit_query(\n", + " settings={}, worker_pool=this_worker_pool_name\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.add(endpoint=submit_query_function)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.api.services.api.update(\n", + " endpoint_path=\"bigquery.submit_query\", hide_mock_definition=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_client.custom_api.api_endpoints()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert (\n", + " high_client.api.services.bigquery.test_query\n", + " and high_client.api.services.bigquery.submit_query\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test mock version\n", + "result = high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug with the new Error PR: message printed multiple times. TODO clean up the duplicate exception messages.\n", + "\n", + "# Test mock version for wrong queries\n", + "with sy.raises(\n", + " sy.SyftException(public_message=\"*must be qualified with a dataset*\"), show=True\n", + "):\n", + " high_client.api.services.bigquery.test_query.mock(\n", + " sql_query=\"SELECT * FROM invalid_table LIMIT 1\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test private version\n", + "result = high_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 10\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Testing submit query\n", + "result = high_client.api.services.bigquery.submit_query(\n", + " func_name=\"my_func\",\n", + " query=f\"SELECT * FROM {dataset_1}.{table_1} LIMIT 1\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"Query submitted\" in result\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job = high_client.code.my_func(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job.result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.client.syncing import compare_clients\n", + "from syft.service.job.job_stash import Job\n", + "from syft.service.job.job_stash import JobStatus" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def is_job_to_sync(batch):\n", + " if batch.status != \"NEW\":\n", + " return False\n", + " if not isinstance(batch.root.high_obj, Job):\n", + " return False\n", + " job = batch.root.high_obj\n", + " return job.status in (JobStatus.ERRORED, JobStatus.COMPLETED)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def sync_new_objects(\n", + " from_client, to_client, dry_run: bool = True, private_data: bool = False\n", + "):\n", + " sim = \"Simulating \" if dry_run else \"\"\n", + " priv = \"WITH PRIVATE DATA\" if private_data else \"\"\n", + " print(f\"{sim}Syncing from {from_client.name} to {to_client.name} {priv}\")\n", + " changes = []\n", + " diff = compare_clients(\n", + " from_client=from_client, to_client=to_client, hide_usercode=False\n", + " )\n", + " if isinstance(diff, sy.SyftError):\n", + " return diff\n", + "\n", + " for batch in diff.batches:\n", + " try:\n", + " if is_job_to_sync(batch) or batch.status == \"NEW\":\n", + " w = batch.resolve(build_state=False)\n", + " if private_data:\n", + " w.click_share_all_private_data()\n", + " if not dry_run:\n", + " w.click_sync()\n", + " change_text = f\"Synced {batch.status} {batch.root_type.__name__}\"\n", + " if not dry_run:\n", + " changes.append(change_text)\n", + " else:\n", + " print(f\"Would have run: {change_text}\")\n", + " except Exception as e:\n", + " print(\"sync_new_objects\", e)\n", + " raise e\n", + " return changes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = sync_new_objects(high_client, low_client)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = sync_new_objects(high_client, low_client, dry_run=False)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert [\n", + " \"Synced NEW TwinAPIEndpoint\",\n", + " \"Synced NEW TwinAPIEndpoint\",\n", + " \"Synced NEW TwinAPIEndpoint\",\n", + "] == result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# widget = sy.sync(from_client=high_client, to_client=low_client, hide_usercode=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# # TODO: ignore private function from high side in diff\n", + "# widget" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# widget.click_sync(0)\n", + "# widget.click_sync(1)\n", + "# widget.click_sync(2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Some internal helper methods\n", + "\n", + "# widget._share_all()\n", + "# widget._sync_all()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_high.land()\n", + "server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/03-ds-submit-request.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/03-ds-submit-request.ipynb new file mode 100644 index 00000000000..8d6c0665dc7 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/03-ds-submit-request.ipynb @@ -0,0 +1,269 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Only low side server and login as DS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_client = server_low.login(\n", + " email=\"data_scientist@openmined.org\", password=\"verysecurepassword\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Low side research" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(ds_client.custom_api.api_endpoints()) == 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_1 = test_settings.get(\"dataset_1\", default=\"dataset_1\")\n", + "dataset_2 = test_settings.get(\"dataset_2\", default=\"dataset_2\")\n", + "table_1 = test_settings.get(\"table_1\", default=\"table_1\")\n", + "table_2 = test_settings.get(\"table_2\", default=\"table_2\")\n", + "table_2_col_id = test_settings.get(\"table_2_col_id\", default=\"table_id\")\n", + "table_2_col_score = test_settings.get(\"table_2_col_score\", default=\"colname\")\n", + "query_limit_size = test_settings.get(\"query_limit_size\", default=10000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = ds_client.api.services.bigquery.test_query.mock(\n", + " sql_query=f\"SELECT * from {dataset_2}.{table_2} limit 10\"\n", + ")\n", + "assert len(result) == 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " ds_client.api.services.bigquery.test_query.private(\n", + " sql_query=f\"SELECT * from {dataset_2}.{table_2} limit 10\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = ds_client.api.services.bigquery.schema()\n", + "# third party\n", + "import pandas as pd\n", + "\n", + "assert isinstance(res.get(), pd.DataFrame)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "FUNC_NAME = \"large_sample\"\n", + "LARGE_SAMPLE_QUERY = f\"SELECT * FROM {dataset_2}.{table_2} LIMIT {query_limit_size}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mock_res = ds_client.api.services.bigquery.test_query(sql_query=LARGE_SAMPLE_QUERY)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "submission = ds_client.api.services.bigquery.submit_query(\n", + " func_name=FUNC_NAME, query=LARGE_SAMPLE_QUERY\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_code_path(response):\n", + " # stdlib\n", + " import re\n", + "\n", + " pattern = r\"client\\.code\\.(\\w+)\\(\\)\"\n", + " match = re.search(pattern, str(response))\n", + " if match:\n", + " extracted_code = match.group(1)\n", + " return extracted_code\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# why are we randomizing things here?\n", + "func_name = extract_code_path(submission)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "api_method = getattr(ds_client.code, func_name, None)\n", + "api_method" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# todo: this is very noisy, but it actually passes\n", + "with sy.raises(\n", + " sy.SyftException(\n", + " public_message=\"*Please wait for the admin to allow the execution of this code*\"\n", + " ),\n", + " show=True,\n", + "):\n", + " result = api_method(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"large_sample\" in func_name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "api_method_2 = getattr(ds_client.code, func_name, None)\n", + "api_method_2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(\n", + " sy.SyftException(public_message=\"*Your code is waiting for approval*\"), show=True\n", + "):\n", + " result = api_method_2()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/04-do-review-requests.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/04-do-review-requests.ipynb new file mode 100644 index 00000000000..a4a632e2f13 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/04-do-review-requests.ipynb @@ -0,0 +1,422 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "from syft.service.code.user_code import UserCode\n", + "from syft.service.request.request import Request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "low_client = server_low.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "high_client = server_high.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# # todo: this is way too noisy\n", + "# widget = sy.sync(from_client=low_client, to_client=high_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# widget" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# sync the users new request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.client.syncing import compare_clients" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.service.job.job_stash import Job\n", + "from syft.service.job.job_stash import JobStatus" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def is_job_to_sync(batch):\n", + " if batch.status != \"NEW\":\n", + " return False\n", + " if not isinstance(batch.root.high_obj, Job):\n", + " return False\n", + " job = batch.root.high_obj\n", + " return job.status in (JobStatus.ERRORED, JobStatus.COMPLETED)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def sync_new_objects(\n", + " from_client, to_client, dry_run: bool = True, private_data: bool = False\n", + "):\n", + " sim = \"Simulating \" if dry_run else \"\"\n", + " priv = \"WITH PRIVATE DATA\" if private_data else \"\"\n", + " print(f\"{sim}Syncing from {from_client.name} to {to_client.name} {priv}\")\n", + " changes = []\n", + " diff = compare_clients(\n", + " from_client=from_client, to_client=to_client, hide_usercode=False\n", + " )\n", + " if isinstance(diff, sy.SyftError):\n", + " return diff\n", + "\n", + " for batch in diff.batches:\n", + " try:\n", + " if is_job_to_sync(batch) or batch.status == \"NEW\":\n", + " w = batch.resolve(build_state=False)\n", + " if private_data:\n", + " w.click_share_all_private_data()\n", + " if not dry_run:\n", + " w.click_sync()\n", + " change_text = f\"Synced {batch.status} {batch.root_type.__name__}\"\n", + " if not dry_run:\n", + " changes.append(change_text)\n", + " else:\n", + " print(f\"Would have run: {change_text}\")\n", + " except Exception as e:\n", + " print(\"sync_new_objects\", e)\n", + " raise e\n", + " return changes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sync_new_objects(low_client, high_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = sync_new_objects(low_client, high_client, dry_run=False, private_data=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"Synced NEW UserCode\" in result\n", + "assert \"Synced NEW Request\" in result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert len(high_client.code.get_all()) == 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "requests = high_client.requests\n", + "requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "user_request = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for request in requests:\n", + " if \"large_sample\" in getattr(\n", + " getattr(request, \"code\", None), \"service_func_name\", None\n", + " ):\n", + " user_request = request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert user_request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def execute_request(client, request) -> dict:\n", + " if not isinstance(request, Request):\n", + " return \"This is not a request\"\n", + "\n", + " code = request.code\n", + " if not isinstance(code, UserCode):\n", + " return \"No usercode found\"\n", + "\n", + " func_name = request.code.service_func_name\n", + " api_func = getattr(client.code, func_name, None)\n", + " if api_func is None:\n", + " return \"Code name was not found on the client.\"\n", + "\n", + " job = api_func(blocking=False)\n", + " return job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job = execute_request(high_client, user_request)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# # todo: this is way too noisy\n", + "# widget = sy.sync(from_client=high_client, to_client=low_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# widget" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sync_new_objects(high_client, low_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = sync_new_objects(high_client, low_client, dry_run=False, private_data=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"Synced NEW Job\" in result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "requests = low_client.requests\n", + "requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "user_request = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for request in requests:\n", + " if \"large_sample\" in getattr(\n", + " getattr(request, \"code\", None), \"service_func_name\", None\n", + " ):\n", + " user_request = request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "user_request.status" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert \"approved\" in str(user_request.status).lower()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_high.land()\n", + "server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/05-ds-get-results.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/05-ds-get-results.ipynb new file mode 100644 index 00000000000..6981f1e7105 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/05-ds-get-results.ipynb @@ -0,0 +1,131 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import pandas as pd\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "from syft import test_settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_client = server_low.login(\n", + " email=\"data_scientist@openmined.org\", password=\"verysecurepassword\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "api_method = None\n", + "for code in ds_client.code:\n", + " if \"large_sample\" in code.service_func_name:\n", + " api_method = getattr(ds_client.code, code.service_func_name, None)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "job = api_method(blocking=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = job.wait().get()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(res, pd.DataFrame)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "query_limit_size = test_settings.get(\"query_limit_size\", default=10000)\n", + "assert len(res) == query_limit_size" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/packages/grid/packer/cloud-config/meta-data b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/__init__.py similarity index 100% rename from packages/grid/packer/cloud-config/meta-data rename to notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/__init__.py diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/__init__.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/__init__.py new file mode 100644 index 00000000000..7231b580696 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/__init__.py @@ -0,0 +1,23 @@ +# stdlib +import os + +# syft absolute +from syft.util.util import str_to_bool + +# relative +from .submit_query import make_submit_query + +env_var = "TEST_BIGQUERY_APIS_LIVE" +use_live = str_to_bool(str(os.environ.get(env_var, "False"))) +env_name = "Live" if use_live else "Mock" +print(f"Using {env_name} API Code, this will query BigQuery. ${env_var}=={use_live}") + + +if use_live: + # relative + from .live.schema import make_schema + from .live.test_query import make_test_query +else: + # relative + from .mock.schema import make_schema + from .mock.test_query import make_test_query diff --git a/packages/log.txt b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/__init__.py similarity index 100% rename from packages/log.txt rename to notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/__init__.py diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/schema.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/schema.py new file mode 100644 index 00000000000..5b39d9d9066 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/schema.py @@ -0,0 +1,108 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy +from syft import test_settings + +# relative +from ..rate_limiter import is_within_rate_limit + + +def make_schema(settings: dict, worker_pool: str) -> Callable: + updated_settings = { + "calls_per_min": 5, + "rate_limiter_enabled": True, + "credentials": test_settings.gce_service_account.to_dict(), + "region": test_settings.gce_region, + "project_id": test_settings.gce_project_id, + "dataset_1": test_settings.dataset_1, + "table_1": test_settings.table_1, + "table_2": test_settings.table_2, + } | settings + + @sy.api_endpoint( + path="bigquery.schema", + description="This endpoint allows for visualising the metadata of tables available in BigQuery.", + settings=updated_settings, + helper_functions=[ + is_within_rate_limit + ], # Adds ratelimit as this is also a method available to data scientists + worker_pool=worker_pool, + ) + def live_schema( + context, + ) -> str: + # stdlib + import datetime + + # third party + from google.cloud import bigquery # noqa: F811 + from google.oauth2 import service_account + import pandas as pd + + # syft absolute + from syft import SyftException + + # Auth for Bigquer based on the workload identity + credentials = service_account.Credentials.from_service_account_info( + context.settings["credentials"] + ) + scoped_credentials = credentials.with_scopes( + ["https://www.googleapis.com/auth/cloud-platform"] + ) + + client = bigquery.Client( + credentials=scoped_credentials, + location=context.settings["region"], + ) + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + try: + # Formats the data schema in a data frame format + # Warning: the only supported format types are primitives, np.ndarrays and pd.DataFrames + + data_schema = [] + for table_id in [ + f"{context.settings['dataset_1']}.{context.settings['table_1']}", + f"{context.settings['dataset_1']}.{context.settings['table_2']}", + ]: + table = client.get_table(table_id) + for schema in table.schema: + data_schema.append( + { + "project": str(table.project), + "dataset_id": str(table.dataset_id), + "table_id": str(table.table_id), + "schema_name": str(schema.name), + "schema_field": str(schema.field_type), + "description": str(table.description), + "num_rows": str(table.num_rows), + } + ) + return pd.DataFrame(data_schema) + + except Exception as e: + # not a bigquery exception + if not hasattr(e, "_errors"): + output = f"got exception e: {type(e)} {str(e)}" + raise SyftException( + public_message=f"An error occured executing the API call {output}" + ) + + # Should add appropriate error handling for what should be exposed to the data scientists. + raise SyftException( + public_message="An error occured executing the API call, please contact the domain owner." + ) + + return live_schema diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/test_query.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/test_query.py new file mode 100644 index 00000000000..344879dcb62 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/live/test_query.py @@ -0,0 +1,113 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy +from syft import test_settings + +# relative +from ..rate_limiter import is_within_rate_limit + + +def make_test_query(settings) -> Callable: + updated_settings = { + "calls_per_min": 10, + "rate_limiter_enabled": True, + "credentials": test_settings.gce_service_account.to_dict(), + "region": test_settings.gce_region, + "project_id": test_settings.gce_project_id, + } | settings + + # these are the same if you allow the rate limiter to be turned on and off + @sy.api_endpoint_method( + settings=updated_settings, + helper_functions=[is_within_rate_limit], + ) + def live_test_query( + context, + sql_query: str, + ) -> str: + # stdlib + import datetime + + # third party + from google.cloud import bigquery # noqa: F811 + from google.oauth2 import service_account + + # syft absolute + from syft import SyftException + + # Auth for Bigquer based on the workload identity + credentials = service_account.Credentials.from_service_account_info( + context.settings["credentials"] + ) + scoped_credentials = credentials.with_scopes( + ["https://www.googleapis.com/auth/cloud-platform"] + ) + + client = bigquery.Client( + credentials=scoped_credentials, + location=context.settings["region"], + ) + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + try: + rows = client.query_and_wait( + sql_query, + project=context.settings["project_id"], + ) + + if rows.total_rows > 1_000_000: + raise SyftException( + public_message="Please only write queries that gather aggregate statistics" + ) + + return rows.to_dataframe() + + except Exception as e: + # not a bigquery exception + if not hasattr(e, "_errors"): + output = f"got exception e: {type(e)} {str(e)}" + raise SyftException( + public_message=f"An error occured executing the API call {output}" + ) + + # Treat all errors that we would like to be forwarded to the data scientists + # By default, any exception is only visible to the data owner. + + if e._errors[0]["reason"] in [ + "badRequest", + "blocked", + "duplicate", + "invalidQuery", + "invalid", + "jobBackendError", + "jobInternalError", + "notFound", + "notImplemented", + "rateLimitExceeded", + "resourceInUse", + "resourcesExceeded", + "tableUnavailable", + "timeout", + ]: + raise SyftException( + public_message="Error occured during the call: " + + e._errors[0]["message"] + ) + else: + raise SyftException( + public_message="An error occured executing the API call, please contact the domain owner." + ) + + return live_test_query diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/__init__.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/data.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/data.py new file mode 100644 index 00000000000..82262bf7a01 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/data.py @@ -0,0 +1,268 @@ +# stdlib +from math import nan + +schema_dict = { + "project": { + 0: "example-project", + 1: "example-project", + 2: "example-project", + 3: "example-project", + 4: "example-project", + 5: "example-project", + 6: "example-project", + 7: "example-project", + 8: "example-project", + 9: "example-project", + 10: "example-project", + 11: "example-project", + 12: "example-project", + 13: "example-project", + 14: "example-project", + 15: "example-project", + 16: "example-project", + 17: "example-project", + 18: "example-project", + 19: "example-project", + 20: "example-project", + 21: "example-project", + 22: "example-project", + }, + "dataset_id": { + 0: "test_1gb", + 1: "test_1gb", + 2: "test_1gb", + 3: "test_1gb", + 4: "test_1gb", + 5: "test_1gb", + 6: "test_1gb", + 7: "test_1gb", + 8: "test_1gb", + 9: "test_1gb", + 10: "test_1gb", + 11: "test_1gb", + 12: "test_1gb", + 13: "test_1gb", + 14: "test_1gb", + 15: "test_1gb", + 16: "test_1gb", + 17: "test_1gb", + 18: "test_1gb", + 19: "test_1gb", + 20: "test_1gb", + 21: "test_1gb", + 22: "test_1gb", + }, + "table_id": { + 0: "posts", + 1: "posts", + 2: "posts", + 3: "posts", + 4: "posts", + 5: "posts", + 6: "posts", + 7: "comments", + 8: "comments", + 9: "comments", + 10: "comments", + 11: "comments", + 12: "comments", + 13: "comments", + 14: "comments", + 15: "comments", + 16: "comments", + 17: "comments", + 18: "comments", + 19: "comments", + 20: "comments", + 21: "comments", + 22: "comments", + }, + "schema_name": { + 0: "int64_field_0", + 1: "id", + 2: "name", + 3: "subscribers_count", + 4: "permalink", + 5: "nsfw", + 6: "spam", + 7: "int64_field_0", + 8: "id", + 9: "body", + 10: "parent_id", + 11: "created_at", + 12: "last_modified_at", + 13: "gilded", + 14: "permalink", + 15: "score", + 16: "comment_id", + 17: "post_id", + 18: "author_id", + 19: "spam", + 20: "deleted", + 21: "upvote_raio", + 22: "collapsed_in_crowd_control", + }, + "schema_field": { + 0: "INTEGER", + 1: "STRING", + 2: "STRING", + 3: "INTEGER", + 4: "STRING", + 5: "FLOAT", + 6: "BOOLEAN", + 7: "INTEGER", + 8: "STRING", + 9: "STRING", + 10: "STRING", + 11: "INTEGER", + 12: "INTEGER", + 13: "BOOLEAN", + 14: "STRING", + 15: "INTEGER", + 16: "STRING", + 17: "STRING", + 18: "STRING", + 19: "BOOLEAN", + 20: "BOOLEAN", + 21: "FLOAT", + 22: "BOOLEAN", + }, + "description": { + 0: "None", + 1: "None", + 2: "None", + 3: "None", + 4: "None", + 5: "None", + 6: "None", + 7: "None", + 8: "None", + 9: "None", + 10: "None", + 11: "None", + 12: "None", + 13: "None", + 14: "None", + 15: "None", + 16: "None", + 17: "None", + 18: "None", + 19: "None", + 20: "None", + 21: "None", + 22: "None", + }, + "num_rows": { + 0: "2000000", + 1: "2000000", + 2: "2000000", + 3: "2000000", + 4: "2000000", + 5: "2000000", + 6: "2000000", + 7: "2000000", + 8: "2000000", + 9: "2000000", + 10: "2000000", + 11: "2000000", + 12: "2000000", + 13: "2000000", + 14: "2000000", + 15: "2000000", + 16: "2000000", + 17: "2000000", + 18: "2000000", + 19: "2000000", + 20: "2000000", + 21: "2000000", + 22: "2000000", + }, +} + + +query_dict = { + "int64_field_0": { + 0: 4, + 1: 5, + 2: 10, + 3: 16, + 4: 17, + 5: 23, + 6: 24, + 7: 25, + 8: 27, + 9: 40, + }, + "id": { + 0: "t5_via1x", + 1: "t5_cv9gn", + 2: "t5_8p2tq", + 3: "t5_8fcro", + 4: "t5_td5of", + 5: "t5_z01fv", + 6: "t5_hmqjk", + 7: "t5_1flyj", + 8: "t5_5rwej", + 9: "t5_uurcv", + }, + "name": { + 0: "/channel/mylittlepony", + 1: "/channel/polyamory", + 2: "/channel/Catholicism", + 3: "/channel/cordcutters", + 4: "/channel/stevenuniverse", + 5: "/channel/entitledbitch", + 6: "/channel/engineering", + 7: "/channel/nottheonion", + 8: "/channel/FoodPorn", + 9: "/channel/puppysmiles", + }, + "subscribers_count": { + 0: 4323081, + 1: 2425929, + 2: 4062607, + 3: 7543226, + 4: 2692168, + 5: 2709080, + 6: 8766144, + 7: 2580984, + 8: 7784809, + 9: 3715991, + }, + "permalink": { + 0: "/channel//channel/mylittlepony", + 1: "/channel//channel/polyamory", + 2: "/channel//channel/Catholicism", + 3: "/channel//channel/cordcutters", + 4: "/channel//channel/stevenuniverse", + 5: "/channel//channel/entitledbitch", + 6: "/channel//channel/engineering", + 7: "/channel//channel/nottheonion", + 8: "/channel//channel/FoodPorn", + 9: "/channel//channel/puppysmiles", + }, + "nsfw": { + 0: nan, + 1: nan, + 2: nan, + 3: nan, + 4: nan, + 5: nan, + 6: nan, + 7: nan, + 8: nan, + 9: nan, + }, + "spam": { + 0: False, + 1: False, + 2: False, + 3: False, + 4: False, + 5: False, + 6: False, + 7: False, + 8: False, + 9: False, + }, +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/schema.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/schema.py new file mode 100644 index 00000000000..a95e04f2f1d --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/schema.py @@ -0,0 +1,52 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..rate_limiter import is_within_rate_limit +from .data import schema_dict + + +def make_schema(settings, worker_pool) -> Callable: + updated_settings = { + "calls_per_min": 5, + "rate_limiter_enabled": True, + "schema_dict": schema_dict, + } | settings + + @sy.api_endpoint( + path="bigquery.schema", + description="This endpoint allows for visualising the metadata of tables available in BigQuery.", + settings=updated_settings, + helper_functions=[is_within_rate_limit], + worker_pool=worker_pool, + ) + def mock_schema( + context, + ) -> str: + # syft absolute + from syft import SyftException + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + # stdlib + import datetime + + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + # third party + import pandas as pd + + df = pd.DataFrame(context.settings["schema_dict"]) + return df + + return mock_schema diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/test_query.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/test_query.py new file mode 100644 index 00000000000..ae028a8cf36 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/mock/test_query.py @@ -0,0 +1,138 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..rate_limiter import is_within_rate_limit +from .data import query_dict + + +def extract_limit_value(sql_query: str) -> int: + # stdlib + import re + + limit_pattern = re.compile(r"\bLIMIT\s+(\d+)\b", re.IGNORECASE) + match = limit_pattern.search(sql_query) + if match: + return int(match.group(1)) + return None + + +def is_valid_sql(query: str) -> bool: + # stdlib + import sqlite3 + + # Prepare an in-memory SQLite database + conn = sqlite3.connect(":memory:") + cursor = conn.cursor() + + try: + # Use the EXPLAIN QUERY PLAN command to get the query plan + cursor.execute(f"EXPLAIN QUERY PLAN {query}") + except sqlite3.Error as e: + if "no such table" in str(e).lower(): + return True + return False + finally: + conn.close() + + +def adjust_dataframe_rows(df, target_rows: int): + # third party + import pandas as pd + + current_rows = len(df) + + if target_rows > current_rows: + # Repeat rows to match target_rows + repeat_times = (target_rows + current_rows - 1) // current_rows + df_expanded = pd.concat([df] * repeat_times, ignore_index=True).head( + target_rows + ) + else: + # Truncate rows to match target_rows + df_expanded = df.head(target_rows) + + return df_expanded + + +def make_test_query(settings: dict) -> Callable: + updated_settings = { + "calls_per_min": 10, + "rate_limiter_enabled": True, + "query_dict": query_dict, + } | settings + + # these are the same if you allow the rate limiter to be turned on and off + @sy.api_endpoint_method( + settings=updated_settings, + helper_functions=[ + is_within_rate_limit, + extract_limit_value, + is_valid_sql, + adjust_dataframe_rows, + ], + ) + def mock_test_query( + context, + sql_query: str, + ) -> str: + # stdlib + import datetime + + # third party + from google.api_core.exceptions import BadRequest + + # syft absolute + from syft import SyftException + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + bad_table = "invalid_table" + bad_post = ( + "BadRequest: 400 POST " + "https://bigquery.googleapis.com/bigquery/v2/projects/project-id/" + "queries?prettyPrint=false: " + ) + if bad_table in sql_query: + try: + raise BadRequest( + f'{bad_post} Table "{bad_table}" must be qualified ' + "with a dataset (e.g. dataset.table)." + ) + except Exception as e: + raise SyftException( + public_message=f"*must be qualified with a dataset*. {e}" + ) + + if not context.code.is_valid_sql(sql_query): + raise BadRequest( + f'{bad_post} Syntax error: Unexpected identifier "{sql_query}" at [1:1]' + ) + + # third party + import pandas as pd + + limit = context.code.extract_limit_value(sql_query) + if limit > 1_000_000: + raise SyftException( + public_message="Please only write queries that gather aggregate statistics" + ) + + base_df = pd.DataFrame(context.settings["query_dict"]) + + df = context.code.adjust_dataframe_rows(base_df, limit) + return df + + return mock_test_query diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/rate_limiter.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/rate_limiter.py new file mode 100644 index 00000000000..8ce319b61f4 --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/rate_limiter.py @@ -0,0 +1,16 @@ +def is_within_rate_limit(context) -> bool: + """Rate limiter for custom API calls made by users.""" + # stdlib + import datetime + + state = context.state + settings = context.settings + email = context.user.email + + current_time = datetime.datetime.now() + calls_last_min = [ + 1 if (current_time - call_time).seconds < 60 else 0 + for call_time in state[email] + ] + + return sum(calls_last_min) < settings.get("calls_per_min", 5) diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/submit_query.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/submit_query.py new file mode 100644 index 00000000000..a0125ee009b --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/apis/submit_query.py @@ -0,0 +1,42 @@ +# syft absolute +import syft as sy + + +def make_submit_query(settings, worker_pool): + updated_settings = {"user_code_worker": worker_pool} | settings + + @sy.api_endpoint( + path="bigquery.submit_query", + description="API endpoint that allows you to submit SQL queries to run on the private data.", + worker_pool=worker_pool, + settings=updated_settings, + ) + def submit_query( + context, + func_name: str, + query: str, + ) -> str: + # syft absolute + import syft as sy + + @sy.syft_function( + name=func_name, + input_policy=sy.MixedInputPolicy( + endpoint=sy.Constant( + val=context.admin_client.api.services.bigquery.test_query + ), + query=sy.Constant(val=query), + client=context.admin_client, + ), + worker_pool_name=context.settings["user_code_worker"], + ) + def execute_query(query: str, endpoint): + res = endpoint(sql_query=query) + return res + + request = context.user_client.code.request_code_execution(execute_query) + context.admin_client.requests.set_tags(request, ["autosync"]) + + return f"Query submitted {request}. Use `client.code.{func_name}()` to run your query" + + return submit_query diff --git a/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/sync_helpers.py b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/sync_helpers.py new file mode 100644 index 00000000000..e1d558016ba --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/0.9.1_notebooks/sync_helpers.py @@ -0,0 +1,190 @@ +# third party +from tqdm import tqdm + +# syft absolute +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.client.syncing import compare_clients +from syft.service.code.user_code import UserCode +from syft.service.job.job_stash import Job +from syft.service.job.job_stash import JobStatus +from syft.service.request.request import Request +from syft.service.request.request import RequestStatus +from syft.service.sync.diff_state import ObjectDiffBatch +from syft.types.result import Err + + +def deny_requests_without_autosync_tag(client_low: DatasiteClient): + # Deny all requests that are not autosync + requests = client_low.requests.get_all() + if isinstance(requests, sy.SyftError): + print(requests) + return + + denied_requests = [] + for request in tqdm(requests): + if request.status != RequestStatus.PENDING: + continue + if "autosync" not in request.tags: + request.deny( + reason="This request has been denied automatically. " + "Please use the designated API to submit your request." + ) + denied_requests.append(request.id) + print(f"Denied {len(denied_requests)} requests without autosync tag") + + +def is_request_to_sync(batch: ObjectDiffBatch) -> bool: + # True if this is a new low-side request + # TODO add condition for sql requests/usercodes + low_request = batch.root.low_obj + return ( + isinstance(low_request, Request) + and batch.status == "NEW" + and "autosync" in low_request.tags + ) + + +def is_job_to_sync(batch: ObjectDiffBatch): + # True if this is a new high-side job that is either COMPLETED or ERRORED + if batch.status != "NEW": + return False + if not isinstance(batch.root.high_obj, Job): + return False + job = batch.root.high_obj + return job.status in (JobStatus.ERRORED, JobStatus.COMPLETED) + + +def execute_requests( + client_high: DatasiteClient, request_ids: list[sy.UID] +) -> dict[sy.UID, Job]: + jobs_by_request_id = {} + for request_id in request_ids: + request = client_high.requests.get_by_uid(request_id) + if not isinstance(request, Request): + continue + + code = request.code + if not isinstance(code, UserCode): + continue + + func_name = request.code.service_func_name + api_func = getattr(client_high.code, func_name, None) + if api_func is None: + continue + + job = api_func(blocking=False) + jobs_by_request_id[request_id] = job + + return jobs_by_request_id + + +def deny_failed_jobs( + client_low: DatasiteClient, + jobs: list[Job], +) -> None: + # NOTE no syncing is needed, requests are denied on the low side + denied_requests = [] + + for job in jobs: + if job.status != JobStatus.ERRORED: + continue + + error_result = job.result + if isinstance(error_result, Err): + error_msg = error_result.err_value + else: + error_msg = "An unknown error occurred, please check the Job logs for more information." + + code_id = job.user_code_id + if code_id is None: + continue + requests = client_low.requests.get_by_usercode_id(code_id) + if isinstance(requests, list) and len(requests) > 0: + request = requests[0] + request.deny(reason=f"Execution failed: {error_msg}") + denied_requests.append(request.id) + else: + print(f"Failed to deny request for job {job.id}") + + print(f"Denied {len(denied_requests)} failed requests") + + +def sync_finished_jobs( + client_low: DatasiteClient, + client_high: DatasiteClient, +) -> dict[sy.UID, sy.SyftError | sy.SyftSuccess] | sy.SyftError: + sync_job_results = {} + synced_jobs = [] + diff = compare_clients( + from_client=client_high, to_client=client_low, include_types=["job"] + ) + if isinstance(diff, sy.SyftError): + print(diff) + return diff + + for batch in diff.batches: + if is_job_to_sync(batch): + job = batch.root.high_obj + + w = batch.resolve(build_state=False) + share_result = w.click_share_all_private_data() + if isinstance(share_result, sy.SyftError): + sync_job_results[job.id] = share_result + continue + sync_result = w.click_sync() + + synced_jobs.append(job) + sync_job_results[job.id] = sync_result + + print(f"Sharing {len(sync_job_results)} new results") + deny_failed_jobs(client_low, synced_jobs) + return sync_job_results + + +def sync_new_requests( + client_low: DatasiteClient, + client_high: DatasiteClient, +) -> dict[sy.UID, sy.SyftSuccess | sy.SyftError] | sy.SyftError: + sync_request_results = {} + diff = compare_clients( + from_client=client_low, to_client=client_high, include_types=["request"] + ) + if isinstance(diff, sy.SyftError): + print(diff) + return sync_request_results + print(f"{len(diff.batches)} request batches found") + for batch in tqdm(diff.batches): + if is_request_to_sync(batch): + request_id = batch.root.low_obj.id + w = batch.resolve(build_state=False) + result = w.click_sync() + sync_request_results[request_id] = result + return sync_request_results + + +def sync_and_execute_new_requests( + client_low: DatasiteClient, client_high: DatasiteClient +) -> None: + sync_results = sync_new_requests(client_low, client_high) + if isinstance(sync_results, sy.SyftError): + print(sync_results) + return + + request_ids = [ + uid for uid, res in sync_results.items() if isinstance(res, sy.SyftSuccess) + ] + print(f"Synced {len(request_ids)} new requests") + + jobs_by_request = execute_requests(client_high, request_ids) + print(f"Started {len(jobs_by_request)} new jobs") + + +def auto_sync(client_low: DatasiteClient, client_high: DatasiteClient) -> None: + print("Starting auto sync") + print("Denying non tagged jobs") + deny_requests_without_autosync_tag(client_low) + print("Syncing and executing") + sync_and_execute_new_requests(client_low, client_high) + sync_finished_jobs(client_low, client_high) + print("Finished auto sync") diff --git a/notebooks/scenarios/bigquery/upgradability/sync/1-dump-database-to-file.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/1-dump-database-to-file.ipynb new file mode 100644 index 00000000000..09b02ee5ddd --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/1-dump-database-to-file.ipynb @@ -0,0 +1,201 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "from pathlib import Path\n", + "\n", + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "server_low = sy.orchestra.launch(\n", + " name=\"bigquery-low\",\n", + " server_side_type=\"low\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")\n", + "\n", + "server_high = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " server_side_type=\"high\",\n", + " dev_mode=True,\n", + " n_consumers=1,\n", + " create_producer=True,\n", + " port=\"auto\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "low_client = server_low.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "high_client = server_high.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "# Dump low side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n", + "migration_data_dir.mkdir(exist_ok=True)\n", + "\n", + "low_blob_path = migration_data_dir / \"migration_low.blob\"\n", + "low_yaml_path = migration_data_dir / \"migration_low.yaml\"\n", + "\n", + "low_blob_path.unlink(missing_ok=True)\n", + "low_yaml_path.unlink(missing_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Dump low state\n", + "\n", + "low_migration_data = low_client.get_migration_data(include_blobs=True)\n", + "low_migration_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "low_migration_data.save(path=low_blob_path, yaml_path=low_yaml_path)" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "# Dump high side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "high_blob_path = migration_data_dir / \"migration_high.blob\"\n", + "high_yaml_path = migration_data_dir / \"migration_high.yaml\"\n", + "\n", + "high_blob_path.unlink(missing_ok=True)\n", + "high_yaml_path.unlink(missing_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "high_migration_data = high_client.get_migration_data(include_blobs=True)\n", + "high_migration_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "high_migration_data.save(path=high_blob_path, yaml_path=high_yaml_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "for path in (low_blob_path, low_yaml_path, high_blob_path, high_yaml_path):\n", + " assert path.exists(), f\"Migration file {path} does not exist\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "if server_high.server_type.value == \"python\":\n", + " server_high.land()\n", + "\n", + "if server_low.server_type.value == \"python\":\n", + " server_low.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/2-migrate-for-scenarios.ipynb b/notebooks/scenarios/bigquery/upgradability/sync/2-migrate-for-scenarios.ipynb new file mode 100644 index 00000000000..326eb8c62cd --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/2-migrate-for-scenarios.ipynb @@ -0,0 +1,403 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "from os import environ as env\n", + "from pathlib import Path\n", + "\n", + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import sys\n", + "\n", + "# syft absolute\n", + "from syft.util.util import find_base_dir_with_tox_ini\n", + "from syft.util.util import get_caller_file_path\n", + "from syft.util.util import is_interpreter_jupyter\n", + "\n", + "\n", + "def add_helper_path_to_python_path() -> None:\n", + " current_path = \".\"\n", + "\n", + " # jupyter uses \".\" which resolves to the notebook\n", + " if not is_interpreter_jupyter():\n", + " # python uses the file which has from syft import test_settings in it\n", + " import_path = get_caller_file_path()\n", + " if import_path:\n", + " current_path = import_path\n", + "\n", + " base_dir = find_base_dir_with_tox_ini(current_path)\n", + " notebook_helper_path = os.path.join(base_dir, \"notebooks/notebook_helpers\")\n", + " sys.path.append(notebook_helper_path)\n", + "\n", + "\n", + "add_helper_path_to_python_path()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from email_helpers import load_users\n", + "from job_helpers import load_jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# when in k8s these are the default values\n", + "ROOT_EMAIL = \"admin@bigquery.org\"\n", + "ROOT_PASSWORD = \"bqpw\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# in case we are not in k8s we set them here for orchestra to use\n", + "env[\"DEFAULT_ROOT_EMAIL\"] = ROOT_EMAIL\n", + "env[\"DEFAULT_ROOT_PASSWORD\"] = ROOT_PASSWORD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Login" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"bigquery-high\",\n", + " dev_mode=True,\n", + " server_side_type=\"high\",\n", + " reset=True,\n", + " port=\"8080\",\n", + " n_consumers=1, # How many workers to be spawned\n", + " create_producer=True, # Can produce more workers\n", + ")\n", + "\n", + "client = sy.login(url=\"http://localhost:8080\", email=ROOT_EMAIL, password=ROOT_PASSWORD)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check if this is a new server\n", + "migration_data = client.get_migration_data()\n", + "\n", + "# assert len(migration_data.store_objects[User]) == 1\n", + "# assert UserCode not in migration_data.store_objects" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "migration_data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load migration data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n", + "migration_data_dir = Path(\"/home/teo/OpenMined/PySyft/.tox/.tmp/migration\")\n", + "blob_path = migration_data_dir / \"migration.blob\"\n", + "yaml_path = migration_data_dir / \"migration.yaml\"\n", + "\n", + "print(f\"Loading migration data from {str(blob_path.resolve())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = client.load_migration_data(blob_path)\n", + "assert isinstance(res, sy.SyftSuccess), res.message" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sy.upgrade_custom_workerpools(client, blob_path, mode=\"auto\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Post migration tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "TODO:\n", + " * verify users\n", + " * login\n", + " * check every role\n", + " * mostly check on lengths\n", + " * can a DS see the results of their old jobs/logs\n", + " * still use the api schema both mock \n", + " * still submit a new query via submit_query\n", + " * can admin still approve and approve_by_running, deny\n", + " * check on old broken queries\n", + " * create a new broken query\n", + " * can ds get the results of the new queries\n", + " * emails should work now\n", + " * test in k8s (both L2 and L0)\n", + " * test in L0 (migrate both nodes?)\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users = load_users(client, path=\"0.9.1_notebooks/users.json\")\n", + "jobs = load_jobs(users, client, filepath=\"0.9.1_notebooks/jobs.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Check users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO fix users??" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "users" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Old jobs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# submitted_jobs = [job for job in jobs if job.is_submitted]\n", + "reviewed_jobs = [job for job in jobs if job.admin_reviewed]\n", + "reviewed_jobs_should_succeed = [j for j in reviewed_jobs if j.should_succeed]\n", + "reviewed_jobs_should_fail = [j for j in reviewed_jobs if not j.should_succeed]\n", + "\n", + "print(\n", + " f\"{len(reviewed_jobs)=}, {len(reviewed_jobs_should_succeed)=}, {len(reviewed_jobs_should_fail)=}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_succeed:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + "\n", + " if isinstance(res, sy.SyftError):\n", + " raise sy.SyftException(public_message=\"Expected success, got error\")\n", + "\n", + " result = res.get()\n", + " job.result_as_expected = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for job in reviewed_jobs_should_fail:\n", + " print(f\"> Checking job: {job.job_type} {job.func_name} for user {job.user_email}\")\n", + " api_method = job.code_method\n", + "\n", + " j = api_method(blocking=False)\n", + " res = j.wait()\n", + " if isinstance(res, sy.SyftError):\n", + " job.result_as_expected = True\n", + " else:\n", + " raise sy.SyftException(public_message=f\"failed, job didnt raise {type(j)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expected_jobs = [job for job in jobs if job.result_as_expected]\n", + "print(f\"got expected_jobs: {len(expected_jobs)} == reviewed_jobs: {len(reviewed_jobs)}\")\n", + "assert len(reviewed_jobs) == len(expected_jobs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use old DS to go through the flow again" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_client = users[0].client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Check on emails now?" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "syft_3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/scenarios/bigquery/upgradability/sync/migration_high.blob b/notebooks/scenarios/bigquery/upgradability/sync/migration_high.blob new file mode 100644 index 00000000000..4143e59c7af Binary files /dev/null and b/notebooks/scenarios/bigquery/upgradability/sync/migration_high.blob differ diff --git a/notebooks/scenarios/bigquery/upgradability/sync/migration_high.yaml b/notebooks/scenarios/bigquery/upgradability/sync/migration_high.yaml new file mode 100644 index 00000000000..c3819a0c46a --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/migration_high.yaml @@ -0,0 +1,6 @@ +server: + env: + - name: SERVER_UID + value: fbdf5a287e58454cbbd3fac4ad744d37 + - name: SERVER_PRIVATE_KEY + value: fcfd09deed32e3574558b6719fed46e0b8fd957d59608e9d8b42ef07c6080d3e diff --git a/notebooks/scenarios/bigquery/upgradability/sync/migration_low.blob b/notebooks/scenarios/bigquery/upgradability/sync/migration_low.blob new file mode 100644 index 00000000000..6abeef2b057 Binary files /dev/null and b/notebooks/scenarios/bigquery/upgradability/sync/migration_low.blob differ diff --git a/notebooks/scenarios/bigquery/upgradability/sync/migration_low.yaml b/notebooks/scenarios/bigquery/upgradability/sync/migration_low.yaml new file mode 100644 index 00000000000..c950671118c --- /dev/null +++ b/notebooks/scenarios/bigquery/upgradability/sync/migration_low.yaml @@ -0,0 +1,6 @@ +server: + env: + - name: SERVER_UID + value: 4a471a09f56b4a1d809c0a7614074283 + - name: SERVER_PRIVATE_KEY + value: 3c095c07c94d7f7aec863d61641c71c467cee08cf9a44120a9cb7a493def22cc diff --git a/notebooks/scenarios/enclave/01-primary-datasite-setup.ipynb b/notebooks/scenarios/enclave/01-primary-datasite-setup.ipynb new file mode 100644 index 00000000000..1c62302ef37 --- /dev/null +++ b/notebooks/scenarios/enclave/01-primary-datasite-setup.ipynb @@ -0,0 +1,38 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- upload model tensor\n", + "# -- create user account\n", + "# -- phase 2 add model hosting\n", + "# -- phase 3 run on kubernetes" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb b/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb new file mode 100644 index 00000000000..d7c71feb737 --- /dev/null +++ b/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb @@ -0,0 +1,38 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- create enclave server\n", + "# -- attach to primary datasite\n", + "# -- phase 2 launch python enclave dynamically instead\n", + "# -- phase 3 run on cloud enclave with k3d (dynamically after)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/03-secondary-datasite-setup.ipynb b/notebooks/scenarios/enclave/03-secondary-datasite-setup.ipynb new file mode 100644 index 00000000000..b056a95d85e --- /dev/null +++ b/notebooks/scenarios/enclave/03-secondary-datasite-setup.ipynb @@ -0,0 +1,37 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- upload inference tensor\n", + "# -- phase 2 inference eval dataset\n", + "# -- create user account" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/04-data-scientist-join.ipynb b/notebooks/scenarios/enclave/04-data-scientist-join.ipynb new file mode 100644 index 00000000000..6209ba8f370 --- /dev/null +++ b/notebooks/scenarios/enclave/04-data-scientist-join.ipynb @@ -0,0 +1,51 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- connect to datasites\n", + "# -- associate datasites?\n", + "# -- list enclaves\n", + "# -- find datasets\n", + "# -- execution policies\n", + "# -- phase 2 - add a hf model and custom worker image to execution policy\n", + "# -- phase 3 eager data scientist inference inputs in InputPolicy\n", + "# -- create usercode sum(a, b)\n", + "# -- submit project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/05-datasites-review.ipynb b/notebooks/scenarios/enclave/05-datasites-review.ipynb new file mode 100644 index 00000000000..c4774bafacf --- /dev/null +++ b/notebooks/scenarios/enclave/05-datasites-review.ipynb @@ -0,0 +1,41 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- review project\n", + "# -- inspect code\n", + "# -- step through execution policy\n", + "# -- query enclave attestation\n", + "# -- approve execution\n", + "# -- phase 2 - once approved everywhere, setup custom image on enclave\n", + "# -- phase 3 - once approved deploy with terraform etc" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/06-manual-execution.ipynb b/notebooks/scenarios/enclave/06-manual-execution.ipynb new file mode 100644 index 00000000000..5ebec63ad46 --- /dev/null +++ b/notebooks/scenarios/enclave/06-manual-execution.ipynb @@ -0,0 +1,46 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- get project\n", + "# -- check project status\n", + "# -- run code\n", + "# -- get result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/07-audit-project-logs.ipynb b/notebooks/scenarios/enclave/07-audit-project-logs.ipynb new file mode 100644 index 00000000000..dfd9925e1c2 --- /dev/null +++ b/notebooks/scenarios/enclave/07-audit-project-logs.ipynb @@ -0,0 +1,36 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- datasite owners view logs from enclave on datasite\n", + "# -- step through execution policy at each step who did what" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/08-enclave-shutdown.ipynb b/notebooks/scenarios/enclave/08-enclave-shutdown.ipynb new file mode 100644 index 00000000000..f6811bc0415 --- /dev/null +++ b/notebooks/scenarios/enclave/08-enclave-shutdown.ipynb @@ -0,0 +1,35 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- primary terminates enclave" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/enclave/README.md b/notebooks/scenarios/enclave/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/notebooks/scenarios/getting-started/.gitignore b/notebooks/scenarios/getting-started/.gitignore new file mode 100644 index 00000000000..e96c8f2ef1a --- /dev/null +++ b/notebooks/scenarios/getting-started/.gitignore @@ -0,0 +1,2 @@ +secrets.json +*.csv \ No newline at end of file diff --git a/notebooks/scenarios/getting-started/.ruff.toml b/notebooks/scenarios/getting-started/.ruff.toml new file mode 100644 index 00000000000..4df73d6ade3 --- /dev/null +++ b/notebooks/scenarios/getting-started/.ruff.toml @@ -0,0 +1,2 @@ +[lint] +ignore = ["E501"] diff --git a/notebooks/scenarios/getting-started/01-installing-syft.ipynb b/notebooks/scenarios/getting-started/01-installing-syft.ipynb new file mode 100644 index 00000000000..79a51b4cadc --- /dev/null +++ b/notebooks/scenarios/getting-started/01-installing-syft.ipynb @@ -0,0 +1,82 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- pip install syft\n", + "# -- conda? (pycapnp lib bug??)\n", + "# -- accessing betas\n", + "# -- checking the version you have installed\n", + "# -- using the same version as the server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install -U syft" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<0.9.2\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)\n", + "print(sy.__version__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/02-running-python-server.ipynb b/notebooks/scenarios/getting-started/02-running-python-server.ipynb new file mode 100644 index 00000000000..aa1950c62c8 --- /dev/null +++ b/notebooks/scenarios/getting-started/02-running-python-server.ipynb @@ -0,0 +1,124 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- configuration options for demo / dev mode etc\n", + "# -- how to see the web server is running on port x\n", + "# -- how to make a server accessible to other users (brief explanation of networking)\n", + "# -- Optional: for testing purposes you can use bore: https://github.com/ekzhang/bore for free\n", + "# -- Note: production mode is recommended to use kubernetes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<0.9.2\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# run a local webserver\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# what minimum settings do we need to be able to run jobs?\n", + "# create_producer=True,\n", + "# n_consumers=1," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/03-configuring-datasite.ipynb b/notebooks/scenarios/getting-started/03-configuring-datasite.ipynb new file mode 100644 index 00000000000..58294a9f86f --- /dev/null +++ b/notebooks/scenarios/getting-started/03-configuring-datasite.ipynb @@ -0,0 +1,316 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<0.9.2\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'\n", + "\n", + "# syft absolute\n", + "import syft as sy # noqa: E402\n", + "from syft import test_settings # noqa: E402\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# run a local webserver\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "# email" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_token = test_settings.get(\"smtp_token\", default=\"ADD_POSTMARK_TOKEN\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "smtp_username, smtp_password, smtp_server, smtp_port = (\n", + " smtp_token,\n", + " smtp_token,\n", + " \"smtp.postmarkapp.com\",\n", + " \"25\",\n", + ")\n", + "# configure email settings\n", + "client.api.services.settings.enable_notifications(\n", + " email_username=smtp_username,\n", + " email_password=smtp_password,\n", + " email_sender=\"madhava@openmined.org\",\n", + " email_server=smtp_server,\n", + " email_port=smtp_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "ns = client.api.services.notifications.settings()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "ns.email_enabled" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "ns.notifiers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "s = client.api.services.settings.get()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "s" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "client.api.services.settings.set(s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "client.api.services.settings.update(\n", + " name=\"a\",\n", + " organization=\"b\",\n", + " description=\"c\",\n", + " admin_email=\"info2@openmined.org\",\n", + " association_request_auto_approval=True,\n", + " eager_execution_enabled=False,\n", + " notifications_enabled=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "md = \"\"\"\n", + " An h1 header\n", + "============\n", + "\n", + "Paragraphs are separated by a blank line.\n", + "\n", + "2nd paragraph. *Italic*, **bold**, and `monospace`. Itemized lists\n", + "look like:\n", + "\n", + " * this one\n", + " * that one\n", + " * the other one\n", + "\n", + "Note that --- not considering the asterisk --- the actual text\n", + "content starts at 4-columns in.\n", + "\n", + "> Block quotes are\n", + "> written like so.\n", + ">\n", + "> They can span multiple paragraphs,\n", + "> if you like.\n", + "\n", + "Use 3 dashes for an em-dash. Use 2 dashes for ranges (ex., \"it's all\n", + "in chapters 12--14\"). Three dots ... will be converted to an ellipsis.\n", + "Unicode is supported. ☺\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "client.api.services.settings.welcome_preview(markdown=md)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "client.api.services.settings.welcome_customize(markdown=md)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "client.api.services.settings.welcome_show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client = client.login_as_guest()\n", + "guest_client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client.register(name=\"a\", email=\"a@b.com\", password=\"c\", password_verify=\"c\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "client.api.services.settings.allow_guest_signup(enable=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client.register(name=\"a\", email=\"a@b.com\", password=\"c\", password_verify=\"c\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client.login(email=\"a@b.com\", password=\"c\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/04-uploading-data.ipynb b/notebooks/scenarios/getting-started/04-uploading-data.ipynb new file mode 100644 index 00000000000..56d457c984c --- /dev/null +++ b/notebooks/scenarios/getting-started/04-uploading-data.ipynb @@ -0,0 +1,323 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- what kinds of data are supported\n", + "# -- how to structure your data\n", + "# -- mock data and how to create some\n", + "# -- how much data you can store (Note: k8s requires blob storage configuration)\n", + "# -- adding metadata and uploading\n", + "# -- how to change the data later" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<0.9.2\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# run a local webserver\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "if not os.path.exists(\"ages_dataset.csv\"):\n", + " !curl -O https://openminedblob.blob.core.windows.net/csvs/ages_dataset.csv" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import pandas as pd\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "age_df = pd.read_csv(\"ages_dataset.csv\")\n", + "age_df = age_df.dropna(how=\"any\")\n", + "age_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "# TODO: also move to dataset repo\n", + "import os\n", + "\n", + "if not os.path.exists(\"ages_mock_dataset.csv\"):\n", + " !curl -O https://openminedblob.blob.core.windows.net/csvs/ages_mock_dataset.csv" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "age_mock_df = pd.read_csv(\"ages_mock_dataset.csv\")\n", + "age_mock_df = age_mock_df.dropna(how=\"any\")\n", + "age_mock_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# How an asset for low side and high-side would be defined:\n", + "main_contributor = sy.Contributor(\n", + " name=\"Jeffrey Salazar\", role=\"Dataset Creator\", email=\"jsala@ailab.com\"\n", + ")\n", + "\n", + "asset = sy.Asset(\n", + " name=\"asset_name\",\n", + " data=age_df, # real dataframe\n", + " mock=age_mock_df, # mock dataframe\n", + " contributors=[main_contributor],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "description_template = \"\"\"### About the dataset\n", + "This extensive dataset provides a rich collection of demographic and life events records for individuals across multiple countries. It covers a wide range of indicators and attributes related to personal information, birth and death events, gender, occupation, and associated countries. The dataset offers valuable insights into population dynamics and various aspects of human life, enabling comprehensive analyses and cross-country comparisons. The dataset is the largest one on notable deceased people and includes individ- uals from a variety of social groups, including but not limited to 107k females, 90k researchers, and 124 non-binary indi- viduals, spread across more than 300 contemporary or histor- ical regions.\n", + "\n", + "### Dataset usage policy\n", + "This dataset is subject to compliance with internal data use and mis-use policies at our organisation. The following rules apply:\n", + "- only aggregate statistics can be released from data computation\n", + "- data subjects should never be identifiable through the data computation outcomes\n", + "- a fixed privacy budget of eps=5 must be preserved by each researcher\n", + "\n", + "### Data collection and pre-processing\n", + "The dataset is based on open data hosted by Wikimedia Foundation.\n", + "\n", + "**Age**\n", + "Whenever possible, age was calculated based on the birth and death year mentioned in the description of the individual.\n", + "\n", + "**Gender**\n", + "Gender was available in the original dataset for 50% of participants. For the remaining, it was added from predictions based on name, country and century in which they lived. (97.51% accuracy and 98.89% F1-score)\n", + "\n", + "**Occupation**\n", + "The occupation was available in the original dataset for 66% of the individuals. For the remaining, it was added from predictions from a multiclass text classificator model. (93.4% accuracy for 84% of the dataset)\n", + "\n", + "More details about the features can be found by reading the paper.\n", + "\n", + "### Key features\n", + "1. **Id**: Unique identifier for each individual.\n", + "2. **Name**: Name of the person.\n", + "3. **Short description**: Brief description or summary of the individual.\n", + "4. **Gender**: Gender/s of the individual.\n", + "5. **Country**: Countries/Kingdoms of residence and/or origin.\n", + "6. **Occupation**: Occupation or profession of the individual.\n", + "7. **Birth year**: Year of birth for the individual.\n", + "8. **Death year**: Year of death for the individual.\n", + "9. **Manner of death**: Details about the circumstances or manner of death.\n", + "10. **Age of death**: Age at the time of death for the individual.\n", + "11. **Associated Countries**: Modern Day Countries associated with the individual.\n", + "12. **Associated Country Coordinates (Lat/Lon)**: Modern Day Latitude and longitude coordinates of the associated countries.\n", + "13. **Associated Country Life Expectancy**: Life expectancy of the associated countries.\n", + "\n", + "### Use cases\n", + "- Analyze demographic trends and birth rates in different countries.\n", + "- Investigate factors affecting life expectancy and mortality rates.\n", + "- Study the relationship between gender and occupation across regions.\n", + "- Explore correlations between age of death and associated country attributes.\n", + "- Examine patterns of migration and associated countries' life expectancy.\n", + "\n", + "\n", + "### Getting started\n", + "\n", + "```\n", + "!curl -O https://openminedblob.blob.core.windows.net/csvs/ages_dataset.csv\n", + "\n", + "age_df = pd.read_csv(\"ages_dataset.csv\")\n", + "```\n", + "\n", + "### Execution environment\n", + "The data is hosted in a remote compute environment with the following specifications:\n", + "- X CPU cores\n", + "- 1 GPU of type Y\n", + "- Z RAM\n", + "- A additional available storage\n", + "\n", + "### Citation\n", + "Annamoradnejad, Issa; Annamoradnejad, Rahimberdi (2022), “Age dataset: A structured general-purpose dataset on life, work, and death of 1.22 million distinguished people”, In Workshop Proceedings of the 16th International AAAI Conference on Web and Social Media (ICWSM), doi: 10.36190/2022.82\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "dataset = sy.Dataset(\n", + " name=\"Dataset name\",\n", + " description=description_template,\n", + " asset_list=[asset],\n", + " contributors=[main_contributor],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Uploading the dataset\n", + "client.upload_dataset(dataset)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "client.datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "d = client.datasets[0]\n", + "d" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "a = d.assets[0]\n", + "a" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "a.mock" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "a.data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/05-adding-users.ipynb b/notebooks/scenarios/getting-started/05-adding-users.ipynb new file mode 100644 index 00000000000..00baa8b9ada --- /dev/null +++ b/notebooks/scenarios/getting-started/05-adding-users.ipynb @@ -0,0 +1,290 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- how to enable / disable user registration\n", + "# -- how to create users\n", + "# -- how to reset user passwords" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# logging in as root client with default credentials\n", + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# register a new user\n", + "client.register(\n", + " name=\"John Doe\", email=\"john@email.com\", password=\"pass\", password_verify=\"pass\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Returns a list of all the existing users in the domain\n", + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# Selecting an user by index\n", + "client.users[1].name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# Selecting an user by filtering\n", + "search_results = client.users.search(email=\"john@email.com\")\n", + "search_results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "searched_user = search_results[0]\n", + "searched_user.name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# update email\n", + "searched_user.set_email(email=\"updatedjohn@email.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "searched_user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# update password\n", + "searched_user.set_password(new_password=\"newpass\", confirm=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Update info of an existing user\n", + "searched_user.update(\n", + " name=\"Updated Jane Doe\",\n", + " institution=\"My institution\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# validate login for new user\n", + "server.login(email=\"updatedjohn@email.com\", password=\"newpass\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# Delete user\n", + "client.users.delete(searched_user.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "# Enable user registration\n", + "client.settings.allow_guest_signup(enable=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "guest_user = server.login_as_guest()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# register the account\n", + "guest_user.register(\n", + " email=\"scientist@test.com\",\n", + " password=\"123\",\n", + " password_verify=\"123\",\n", + " name=\"Curious Scientist\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/06-working-with-remote-data.ipynb b/notebooks/scenarios/getting-started/06-working-with-remote-data.ipynb new file mode 100644 index 00000000000..b1bf008c081 --- /dev/null +++ b/notebooks/scenarios/getting-started/06-working-with-remote-data.ipynb @@ -0,0 +1,804 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- browsing datasets\n", + "# -- getting a pointer\n", + "# -- mock vs private\n", + "# -- Pointer UIDs\n", + "# -- choosing an input policy\n", + "# -- choosing an output policy\n", + "# -- using the syft function decorator\n", + "# -- testing code locally\n", + "# -- submitting code for approval\n", + "# -- code is denied\n", + "# -- changing code and re-uploading a new version" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "user_client = server.login(email=\"scientist@test.com\", password=\"123\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "user_client.datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Not sure about getting a pointer, what needs to be added?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "user_client.datasets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "user_client.datasets[0].assets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "mock_data = user_client.datasets[0].assets[0].mock\n", + "mock_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "private_data = user_client.datasets[0].assets[0].data\n", + "private_data" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "# Printing this because mock and private data are completely different" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "private_data = admin_client.datasets[0].assets[0].data\n", + "private_data" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "# Standard and custom Input/Output Policies and syft function decorator" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "asset = user_client.datasets[0].assets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function_single_use(ages_data=asset)\n", + "def how_are_people_dying_statistics(ages_data):\n", + " df = ages_data\n", + " avg_age_death_gender = (\n", + " df.groupby(\"Gender\")[\"Age of death\"].mean().reset_index(name=\"Avg_Age_of_Death\")\n", + " )\n", + " manner_of_death_count = (\n", + " df.groupby(\"Manner of death\")\n", + " .size()\n", + " .reset_index(name=\"Count\")\n", + " .sort_values(by=\"Count\", ascending=False)\n", + " )\n", + "\n", + " return (manner_of_death_count, avg_age_death_gender)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "# stdlib\n", + "from typing import Any\n", + "\n", + "# third party\n", + "from result import Err\n", + "from result import Ok\n", + "\n", + "# syft absolute\n", + "from syft.client.api import AuthedServiceContext\n", + "from syft.client.api import ServerIdentity\n", + "\n", + "\n", + "class CustomExactMatch(sy.CustomInputPolicy):\n", + " def __init__(self, *args: Any, **kwargs: Any) -> None:\n", + " pass\n", + "\n", + " def filter_kwargs(self, kwargs, context):\n", + " # stdlib\n", + "\n", + " try:\n", + " allowed_inputs = self.allowed_ids_only(\n", + " allowed_inputs=self.inputs, kwargs=kwargs, context=context\n", + " )\n", + " results = self.retrieve_from_db(\n", + " allowed_inputs=allowed_inputs,\n", + " context=context,\n", + " )\n", + " except Exception as e:\n", + " return Err(str(e))\n", + " return results\n", + "\n", + " def retrieve_from_db(self, allowed_inputs, context):\n", + " # syft absolute\n", + " from syft import ServerType\n", + " from syft.service.action.action_object import TwinMode\n", + "\n", + " action_service = context.server.get_service(\"actionservice\")\n", + " code_inputs = {}\n", + "\n", + " # When we are retrieving the code from the database, we need to use the server's\n", + " # verify key as the credentials. This is because when we approve the code, we\n", + " # we allow the private data to be used only for this specific code.\n", + " # but we are not modifying the permissions of the private data\n", + "\n", + " root_context = AuthedServiceContext(\n", + " server=context.server, credentials=context.server.verify_key\n", + " )\n", + " if context.server.server_type == ServerType.DATASITE:\n", + " for var_name, arg_id in allowed_inputs.items():\n", + " kwarg_value = action_service._get(\n", + " context=root_context,\n", + " uid=arg_id,\n", + " twin_mode=TwinMode.NONE,\n", + " has_permission=True,\n", + " )\n", + " if kwarg_value.is_err():\n", + " return Err(kwarg_value.err())\n", + " code_inputs[var_name] = kwarg_value.ok()\n", + " else:\n", + " raise Exception(\n", + " f\"Invalid Server Type for Code Submission:{context.server.server_type}\"\n", + " )\n", + " return Ok(code_inputs)\n", + "\n", + " def allowed_ids_only(\n", + " self,\n", + " allowed_inputs,\n", + " kwargs,\n", + " context,\n", + " ):\n", + " # syft absolute\n", + " from syft import ServerType\n", + " from syft import UID\n", + "\n", + " if context.server.server_type == ServerType.DATASITE:\n", + " server_identity = ServerIdentity(\n", + " server_name=context.server.name,\n", + " server_id=context.server.id,\n", + " verify_key=context.server.signing_key.verify_key,\n", + " )\n", + " allowed_inputs = allowed_inputs.get(server_identity, {})\n", + " else:\n", + " raise Exception(\n", + " f\"Invalid Server Type for Code Submission:{context.server.server_type}\"\n", + " )\n", + " filtered_kwargs = {}\n", + " for key in allowed_inputs.keys():\n", + " if key in kwargs:\n", + " value = kwargs[key]\n", + " uid = value\n", + " if not isinstance(uid, UID):\n", + " uid = getattr(value, \"id\", None)\n", + "\n", + " if uid != allowed_inputs[key]:\n", + " raise Exception(\n", + " f\"Input with uid: {uid} for `{key}` not in allowed inputs: {allowed_inputs}\"\n", + " )\n", + " filtered_kwargs[key] = value\n", + " return filtered_kwargs\n", + "\n", + " def _is_valid(\n", + " self,\n", + " context,\n", + " usr_input_kwargs,\n", + " code_item_id,\n", + " ):\n", + " filtered_input_kwargs = self.filter_kwargs(\n", + " kwargs=usr_input_kwargs,\n", + " context=context,\n", + " )\n", + "\n", + " if filtered_input_kwargs.is_err():\n", + " return filtered_input_kwargs\n", + "\n", + " filtered_input_kwargs = filtered_input_kwargs.ok()\n", + "\n", + " expected_input_kwargs = set()\n", + " for _inp_kwargs in self.inputs.values():\n", + " for k in _inp_kwargs.keys():\n", + " if k not in usr_input_kwargs:\n", + " return Err(f\"Function missing required keyword argument: '{k}'\")\n", + " expected_input_kwargs.update(_inp_kwargs.keys())\n", + "\n", + " permitted_input_kwargs = list(filtered_input_kwargs.keys())\n", + " not_approved_kwargs = set(expected_input_kwargs) - set(permitted_input_kwargs)\n", + " if len(not_approved_kwargs) > 0:\n", + " return Err(\n", + " f\"Input arguments: {not_approved_kwargs} to the function are not approved yet.\"\n", + " )\n", + " return Ok(True)\n", + "\n", + "\n", + "def allowed_ids_only(\n", + " self,\n", + " allowed_inputs,\n", + " kwargs,\n", + " context,\n", + "):\n", + " # syft absolute\n", + " from syft import ServerType\n", + " from syft import UID\n", + " from syft.client.api import ServerIdentity\n", + "\n", + " if context.server.server_type == ServerType.DATASITE:\n", + " server_identity = ServerIdentity(\n", + " server_name=context.server.name,\n", + " server_id=context.server.id,\n", + " verify_key=context.server.signing_key.verify_key,\n", + " )\n", + " allowed_inputs = allowed_inputs.get(server_identity, {})\n", + " else:\n", + " raise Exception(\n", + " f\"Invalid Server Type for Code Submission:{context.server.server_type}\"\n", + " )\n", + " filtered_kwargs = {}\n", + " for key in allowed_inputs.keys():\n", + " if key in kwargs:\n", + " value = kwargs[key]\n", + " uid = value\n", + " if not isinstance(uid, UID):\n", + " uid = getattr(value, \"id\", None)\n", + "\n", + " if uid != allowed_inputs[key]:\n", + " raise Exception(\n", + " f\"Input with uid: {uid} for `{key}` not in allowed inputs: {allowed_inputs}\"\n", + " )\n", + " filtered_kwargs[key] = value\n", + " return filtered_kwargs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "class RepeatedCallPolicy(sy.CustomOutputPolicy):\n", + " n_calls: int = 0\n", + " downloadable_output_args: list[str] = []\n", + " state: dict[Any, Any] = {}\n", + "\n", + " def __init__(self, n_calls=1, downloadable_output_args: list[str] = None):\n", + " self.downloadable_output_args = (\n", + " downloadable_output_args if downloadable_output_args is not None else []\n", + " )\n", + " self.n_calls = n_calls\n", + " self.state = {\"counts\": 0}\n", + "\n", + " def public_state(self):\n", + " return self.state[\"counts\"]\n", + "\n", + " def update_policy(self, context, outputs):\n", + " self.state[\"counts\"] += 1\n", + "\n", + " def apply_to_output(self, context, outputs, update_policy=True):\n", + " if hasattr(outputs, \"syft_action_data\"):\n", + " outputs = outputs.syft_action_data\n", + " output_dict = {}\n", + " if self.state[\"counts\"] < self.n_calls:\n", + " for output_arg in self.downloadable_output_args:\n", + " output_dict[output_arg] = outputs[output_arg]\n", + " if update_policy:\n", + " self.update_policy(context, outputs)\n", + " else:\n", + " return None\n", + " return output_dict\n", + "\n", + " def _is_valid(self, context):\n", + " return self.state[\"counts\"] < self.n_calls" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function(\n", + " input_policy=CustomExactMatch(ages_data=asset),\n", + " output_policy=RepeatedCallPolicy(n_calls=10, downloadable_output_args=[\"y\"]),\n", + ")\n", + "def how_are_people_dying_statistics_custom(ages_data):\n", + " df = ages_data\n", + " avg_age_death_gender = (\n", + " df.groupby(\"Gender\")[\"Age of death\"].mean().reset_index(name=\"Avg_Age_of_Death\")\n", + " )\n", + " manner_of_death_count = (\n", + " df.groupby(\"Manner of death\")\n", + " .size()\n", + " .reset_index(name=\"Count\")\n", + " .sort_values(by=\"Count\", ascending=False)\n", + " )\n", + "\n", + " return (manner_of_death_count, avg_age_death_gender)" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "# Test on mock data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "pointer = how_are_people_dying_statistics(ages_data=asset)\n", + "result = pointer.get()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "result[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "result[1]" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "# Submit code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a new project\n", + "new_project = sy.Project(\n", + " name=\"The project about death\",\n", + " description=\"Hi, I want to calculate some statistics on how folks are dying\",\n", + " members=[user_client],\n", + ")\n", + "new_project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "result = new_project.create_code_request(how_are_people_dying_statistics, user_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "project = new_project.send()\n", + "project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.service.request.request import RequestStatus\n", + "\n", + "retrieved_project = user_client.get_project(name=\"The project about death\")\n", + "assert retrieved_project\n", + "assert len(retrieved_project.events) == 1\n", + "assert isinstance(\n", + " retrieved_project.events[0], sy.service.project.project.ProjectRequest\n", + ")\n", + "assert retrieved_project.events[0].request.status == RequestStatus.PENDING" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_project.requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "result = user_client.code.how_are_people_dying_statistics(ages_data=asset)\n", + "result" + ] + }, + { + "cell_type": "markdown", + "id": "30", + "metadata": {}, + "source": [ + "# Code is denied" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client.projects" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "project_view = admin_client.projects[0]\n", + "project_view.requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "request = project_view.requests[0]\n", + "request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "func = request.code\n", + "func" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "func.show_code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36", + "metadata": {}, + "outputs": [], + "source": [ + "asset_view = func.assets[0]\n", + "asset_view.data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "result = request.deny(\n", + " reason=(\n", + " \"The Submitted UserCode is too grim in it's study. \\\n", + " Go study something else. Like music or something. \\\n", + " Also, please try other policies.\"\n", + " )\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "id": "38", + "metadata": {}, + "source": [ + "# Change code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "@sy.syft_function(\n", + " input_policy=CustomExactMatch(ages_data=asset),\n", + " output_policy=RepeatedCallPolicy(n_calls=10, downloadable_output_args=[\"y\"]),\n", + ")\n", + "def how_are_people_dying_statistics(ages_data):\n", + " df = ages_data\n", + " df[\"Lifespan\"] = df[\"Death year\"] - df[\"Birth year\"]\n", + " longest_lifespan = df.sort_values(by=\"Lifespan\", ascending=False).head(1)[\n", + " [\"Name\", \"Lifespan\"]\n", + " ]\n", + "\n", + " return longest_lifespan\n", + "\n", + "\n", + "@sy.syft_function(\n", + " input_policy=CustomExactMatch(ages_data=asset),\n", + " output_policy=RepeatedCallPolicy(n_calls=10, downloadable_output_args=[\"y\"]),\n", + ")\n", + "def how_long_are_people_living_statistics(ages_data):\n", + " df = ages_data\n", + " df[\"Lifespan\"] = df[\"Death year\"] - df[\"Birth year\"]\n", + " longest_lifespan = df.sort_values(by=\"Lifespan\", ascending=False).head(1)[\n", + " [\"Name\", \"Lifespan\"]\n", + " ]\n", + "\n", + " return longest_lifespan" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40", + "metadata": {}, + "outputs": [], + "source": [ + "# Two ways to add code for execution\n", + "user_client.code.request_code_execution(code=how_are_people_dying_statistics)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_project = user_client.get_project(name=\"The project about death\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42", + "metadata": {}, + "outputs": [], + "source": [ + "result = retrieved_project.create_code_request(\n", + " how_long_are_people_living_statistics, user_client\n", + ")\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43", + "metadata": {}, + "outputs": [], + "source": [ + "user_client.code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44", + "metadata": {}, + "outputs": [], + "source": [ + "user_client.code_history" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_project.requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/07-reviewing-user-code.ipynb b/notebooks/scenarios/getting-started/07-reviewing-user-code.ipynb new file mode 100644 index 00000000000..3055bdfab22 --- /dev/null +++ b/notebooks/scenarios/getting-started/07-reviewing-user-code.ipynb @@ -0,0 +1,245 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- notifications of code requests\n", + "# -- requests queue\n", + "# -- reviewing code\n", + "# -- carefully testing code\n", + "# -- approve / deny code\n", + "# -- substituting a result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "# user_client = server.login(email=\"scientist@test.com\", password=\"123\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "mock_data = admin_client.datasets[0].assets[0].mock\n", + "pvt_data = admin_client.datasets[0].assets[0].data" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "# View code and aspects of code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client.projects" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "project = admin_client.projects[0]\n", + "project.requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "request = project.requests[1]\n", + "request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "func = request.code\n", + "func" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "func.show_code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "func.input_policy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "func.output_policy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "users_function = func.run" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "mock_result = users_function(ages_data=mock_data)\n", + "mock_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "real_result = users_function(ages_data=pvt_data)\n", + "real_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "result = request.approve()\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(result, sy.SyftSuccess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "project.requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "type(func.input_policy)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/08-running-code.ipynb b/notebooks/scenarios/getting-started/08-running-code.ipynb new file mode 100644 index 00000000000..47dbecaa381 --- /dev/null +++ b/notebooks/scenarios/getting-started/08-running-code.ipynb @@ -0,0 +1,171 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- executing approved code\n", + "# -- working with jobs\n", + "# -- refreshing\n", + "# -- viewing logs\n", + "# -- getting final result\n", + "# -- success" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "user_client = server.login(email=\"scientist@test.com\", password=\"123\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "asset = user_client.datasets[0].assets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_project = user_client.get_project(name=\"The project about death\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_project.requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "job = user_client.code.how_are_people_dying_statistics(ages_data=asset, blocking=False)\n", + "job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "job.wait()" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "### Other exploration " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "user_client.code[1]" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "### Figure out how to programmatically get access to the function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "result_pointer = user_client.code.how_are_people_dying_statistics(ages_data=asset)\n", + "result_pointer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "result_pointer = user_client.code.how_long_are_people_living_statistics(ages_data=asset)\n", + "result_pointer" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/README.md b/notebooks/scenarios/getting-started/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/notebooks/scenarios/reverse-tunnel/01-why-reverse-tunnel.ipynb b/notebooks/scenarios/reverse-tunnel/01-why-reverse-tunnel.ipynb new file mode 100644 index 00000000000..0d664464fb1 --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/01-why-reverse-tunnel.ipynb @@ -0,0 +1,37 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- include cleaned up diagram?\n", + "# -- NAT Firewall problem\n", + "# -- Solution: rathole" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/02-creating-gateway.ipynb b/notebooks/scenarios/reverse-tunnel/02-creating-gateway.ipynb new file mode 100644 index 00000000000..53244333934 --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/02-creating-gateway.ipynb @@ -0,0 +1,36 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- helm install\n", + "# -- deploy gateway k8s to azure" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb b/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb new file mode 100644 index 00000000000..95924fddfd1 --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb @@ -0,0 +1,37 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- ingress\n", + "# -- open port for rathole server\n", + "# -- serverport vs websockets" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/04-setup-datasite-with-tunnel.ipynb b/notebooks/scenarios/reverse-tunnel/04-setup-datasite-with-tunnel.ipynb new file mode 100644 index 00000000000..fa266bb60c1 --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/04-setup-datasite-with-tunnel.ipynb @@ -0,0 +1,36 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- deploy local docker k3d\n", + "# -- enable rathole reverse tunnel" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/05-connect-to-gateway-over-tunnel.ipynb b/notebooks/scenarios/reverse-tunnel/05-connect-to-gateway-over-tunnel.ipynb new file mode 100644 index 00000000000..0af789725dc --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/05-connect-to-gateway-over-tunnel.ipynb @@ -0,0 +1,36 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- run connection request\n", + "# -- approve request" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb b/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb new file mode 100644 index 00000000000..ca07aecc592 --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb @@ -0,0 +1,36 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- how to list datasites on gateway\n", + "# -- getting a proxy client" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/07-blob-storage-streaming.ipynb b/notebooks/scenarios/reverse-tunnel/07-blob-storage-streaming.ipynb new file mode 100644 index 00000000000..0473d5eeb76 --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/07-blob-storage-streaming.ipynb @@ -0,0 +1,35 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- checking upload and download to blob storage work" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/08-debugging.ipynb b/notebooks/scenarios/reverse-tunnel/08-debugging.ipynb new file mode 100644 index 00000000000..a098afd525a --- /dev/null +++ b/notebooks/scenarios/reverse-tunnel/08-debugging.ipynb @@ -0,0 +1,40 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- include cleaned up diagram?\n", + "# -- tunnel config file and config maps\n", + "# -- determining ip addresses and testing ports are available\n", + "# -- running curl from inside the containers\n", + "# -- the internal host / header on the gateway\n", + "# -- checking logs" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/reverse-tunnel/README.md b/notebooks/scenarios/reverse-tunnel/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/notebooks/tutorials/data-engineer/01-setting-up-dev-mode.ipynb b/notebooks/tutorials/data-engineer/01-setting-up-dev-mode.ipynb deleted file mode 100644 index c0805affa3b..00000000000 --- a/notebooks/tutorials/data-engineer/01-setting-up-dev-mode.ipynb +++ /dev/null @@ -1,371 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Setting up Dev Mode" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "If you would like to work on the PySyft codebase, you can set up PySyft in dev mode. You will need to clone the repository, install syft locally and run the code you installed" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "## Cloning the Repo" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "First, we start by cloning the repo" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "If you have an SSH key enabled in your github account, use" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "`git clone git@github.com:OpenMined/PySyft.git`" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "Otherwise use" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "`git clone https://github.com/OpenMined/PySyft.git`" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "## Installing Syft" - ] - }, - { - "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "To install Syft `cd` into the directory in which you cloned PySyft and type\n", - "\n", - "```bash\n", - "pip install -e packages/syft\n", - "```\n", - "\n", - "This installs `syft` in editable mode, such any change in code are reflected in your environment." - ] - }, - { - "cell_type": "markdown", - "id": "10", - "metadata": {}, - "source": [ - "## Running Tox Tests" - ] - }, - { - "cell_type": "markdown", - "id": "11", - "metadata": {}, - "source": [ - "[Tox](https://tox.wiki/en/latest/) is a project that \"aims to automate and standardize testing in Python\". For PySyft development, it is used to simplify testing and setting up several environment in a way that works for every developer working on PySyft. You can list the commands that you can execute using `tox-l`, which will give a result similar to this" - ] - }, - { - "cell_type": "markdown", - "id": "12", - "metadata": {}, - "source": [ - "```\n", - "> tox -l\n", - "\n", - "hagrid.publish\n", - "lint\n", - "stack.test.integration\n", - "syft.docs\n", - "syft.jupyter\n", - "syft.publish\n", - "syft.test.security\n", - "syft.test.unit\n", - "syft.test.notebook\n", - "stack.test.notebook\n", - "stack.test.integration.enclave.oblv\n", - "stack.test.vm\n", - "frontend.test.unit\n", - "frontend.test.e2e\n", - "frontend.generate.types\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": {}, - "source": [ - "This shows us the list of environments that are specified for PySyft. To see what these environments do, have a look at the `tox.ini` file in the main PySyft repo." - ] - }, - { - "cell_type": "markdown", - "id": "14", - "metadata": {}, - "source": [ - "You can run an environment using `tox -e `. For instance, to run the unit tests, run" - ] - }, - { - "cell_type": "markdown", - "id": "15", - "metadata": {}, - "source": [ - "```\n", - "tox -e syft.test.unit\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "16", - "metadata": {}, - "source": [ - "This tox environment is relatively simple, and just uses pytest to run all the tests for the syft packages. However, some environments are more complicated, and run a series of commands that start multiple processes, docker containers and set up a lot of infrastructure before running the tests. The good thing is that with tox, you dont need to worry about that, you can just run the commands." - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": {}, - "source": [ - "## Using Jupyter Environment" - ] - }, - { - "cell_type": "markdown", - "id": "18", - "metadata": {}, - "source": [ - "Pysyft has a tox command to set up a local jupyter notebook environment, which is useful for development." - ] - }, - { - "cell_type": "markdown", - "id": "19", - "metadata": {}, - "source": [ - "```\n", - "tox -e syft.jupyter\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "PySyft makes extensive use of jupyter notebook, and a lot of developers use it for experiments when writing code. It can be useful to setup a local gitignore (only for you, not pushed to git) to have a playground where you can experiment, without needing to push files to git, or change the .gitignore. You can do this by adding a folder to your `.git/info/exclude` file, which works similar to the `.gitignore` file, e.g. if we add\n", - "```\n", - "notebooks/experimental/* \n", - "```\n", - "to `.git/info/exclude`, git wont sync the changes to the `experimental` folder to github\n", - "\n", - "`Note:` For developers in MS Windows, before development make sure that your development path does not contain any white spaces in between.\n", - "\n", - "Example:\n", - " \n", - "**Invalid Path:** `D:/test space/new env/openmined/PySyft`\n", - "\n", - "**Valid Path:** `D:/test-space/new_env/openmined/PySyft`\n", - "\n", - "The issue with paths containing spaces causing problems on Windows is due to the way that Windows handles file paths, but as long as the development path is free of white spaces, you are good to go. This is not a specific issue related to PySyft." - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "## Working with Python Domain" - ] - }, - { - "cell_type": "markdown", - "id": "22", - "metadata": {}, - "source": [ - "PySyft enables a network of computers to connect to each other and do privacy preserving data analysis. The Nodes in the network that hold some data are called `Domains`. When we develop with PySyft, it is very common to start a domain as the first step. `PySyft` makes it very easy to develop against a domain in a notebook by providing an interface (`sy.orchestra`) that allows you to start a domain with a webserver in a notebook in the background, which is a lightweight version of a Domain that would be used in production. You can specify options such as what kind of database you are using, whether you want to use networking and how many processes you want to use. You can launch a Domain by simply executing:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"dev-mode-example-domain-1\", port=8020, reset=True, dev_mode=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "25", - "metadata": {}, - "source": [ - "If we dont need a webserver (for development this is true in many cases), we can omit the port and use. \n", - "```\n", - "node = sy.orchestra.launch(name=\"dev-mode-example-domain-1\", dev_mode=True, reset=True)\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "**One of the benefits of not using a port is that you can use a debugger and set breakpoints within api calls. This makes debugging way faster in many cases**" - ] - }, - { - "cell_type": "markdown", - "id": "27", - "metadata": {}, - "source": [ - "Now, we are ready to start using the domain. The domain comes with standard login credentials for the admin (just for development)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "markdown", - "id": "29", - "metadata": {}, - "source": [ - "Once you are logged in, you are ready to start using the domain, for instance for creating a dataset (this one is empty, just as a example)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30", - "metadata": {}, - "outputs": [], - "source": [ - "dataset = sy.Dataset(name=\"my dataset\", asset_list=[])\n", - "client.upload_dataset(dataset)" - ] - }, - { - "cell_type": "markdown", - "id": "31", - "metadata": {}, - "source": [ - "Lastly to stop or terminate your Domain, we can execute the following command:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32", - "metadata": {}, - "outputs": [], - "source": [ - "node.land()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/02-deployment-types.ipynb b/notebooks/tutorials/data-engineer/02-deployment-types.ipynb deleted file mode 100644 index b4c43e5929d..00000000000 --- a/notebooks/tutorials/data-engineer/02-deployment-types.ipynb +++ /dev/null @@ -1,378 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deployment Types" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "## Dev Python Domain\n" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "Syft supports creating a Python domain in editable mode.\n", - "This is used mainly for experimental and development purposes.\n", - "In __Dev Python Domain__ the domain instance runs locally using the SQLite as the main storage.\n", - "This enables faster development and requires less recources to operate.\n", - "\n", - "The __Dev Python Domain__ supports two options:\n", - "1. Memory node - full `syft` functionality __locally__, SQLite as a local storage.\n", - "2. Webserver node - full `syft` functionality with API \n", - "\n", - "__When you need this?__
\n", - "_When you want to develop Syft or try-out new funcitonality from separate branch._\n", - "\n", - "__Prerequistes:__
\n", - "1. Syft repository pulled from Github - [github.com/OpenMined/PySyft](https://github.com/OpenMined/PySyft)\n", - "\n", - "For broader explanation refer to the notebook [01-setting-dev-mode.ipynb](https://github.com/OpenMined/PySyft/blob/dev/notebooks/tutorials/data-engineer/01-setting-up-dev-mode.ipynb)\n", - "\n", - "To launch the local __Dev Python Domain__ use the following steps:" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "#### 1.1 Launch Dev Memory Node" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "memory_node = sy.Orchestra.launch(\n", - " name=\"Arbitrary Dev Node\",\n", - " dev_mode=True,\n", - " reset=True,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "assert memory_node is not None" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "#### 1.2 Launch Dev Webserver Node" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "webserver_node = sy.Orchestra.launch(\n", - " name=\"Arbitrary Webserver Dev Node\", dev_mode=True, reset=True, port=8081\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "assert webserver_node is not None" - ] - }, - { - "cell_type": "markdown", - "id": "10", - "metadata": {}, - "source": [ - "#### 2. Login Into Nodes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "memory_node_client = memory_node.login(\n", - " email=\"info@openmined.org\", password=\"changethis\"\n", - ")\n", - "memory_node_client" - ] - }, - { - "cell_type": "markdown", - "id": "12", - "metadata": {}, - "source": [ - "#### 3. Landing Memory and Webserver Node" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "memory_node.land()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "webserver_node.land()" - ] - }, - { - "cell_type": "markdown", - "id": "15", - "metadata": {}, - "source": [ - "----" - ] - }, - { - "cell_type": "markdown", - "id": "16", - "metadata": {}, - "source": [ - "## Single Container / Enclave (TBD)" - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": {}, - "source": [ - "Single Container deployment is used when fast and painless deployment of `syft` with all essential functionality is needed. This deployment type contains the `syft` and SQLite as a light-weight database in a single container.\n", - "\n", - "__When you need this?__
\n", - "_When you quickly want to test syft in a single container._\n", - "\n", - "__Prerequistes:__
\n", - "1. Syft repository pulled from Github - [github.com/OpenMined/PySyft](https://github.com/OpenMined/PySyft)\n", - "1. Docker Installed - [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)\n" - ] - }, - { - "cell_type": "markdown", - "id": "18", - "metadata": {}, - "source": [ - "#### Deploy Syft in Single Container Mode" - ] - }, - { - "cell_type": "markdown", - "id": "19", - "metadata": {}, - "source": [ - "Enter the PySyft Repository and run the following command\n", - "\n", - "`docker run -it -e DEFAULT_ROOT_PASSWORD=secret -e PORT=8080 -p 8080:8080 openmined/grid-enclave:0.8.1`\n", - "\n", - "----" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "## Full Container Stack" - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "Syft can operate as a container stack. This setting consider deployment of following containers:\n", - " - Backend - contains `Syft` and corresponding logic to execute code in _sync_ manner\n", - " - Backend Stream - contains `Syft` and logic to queue message in RabbitMQ\n", - " - Celery Worker - contains `Syft` and logic to execute message received from RabbitMQ\n", - " - RabbitMQ - receives messages from Backend Stream and passes them into Celery Worker\n", - " - Redis - each `syft` object has a `UUID`, and stored in Redis as a `key`/`value` pair\n", - " - Mongo - Stores non-private metadata that are related to `grid` operation, such as __RBAC__ or `BLOB`s metadata \n", - " - SeaweedFS - Stores the `BLOB`s, compatible with Amazon S3 protocols\n", - " - Jaeger - distributed end-to-end tracing\n", - "\n", - "__When you need this?__
\n", - "_When you need a Syft domain/gateway node locally._\n", - "\n", - "__Prerequistes:__
\n", - "1. Syft installed - [pypi.org/project/syft](https://pypi.org/project/syft/)\n", - "1. Hagrid installed - [pypi.org/project/syft](https://pypi.org/project/syft/)\n", - "1. Docker Installed - [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)\n", - "\n", - "\n", - "Easiest way to launch the Full Container Stack is the `hagrid` cli tool.\n", - "\n", - "Basic syntax of Hagrdi deployment command is the following:
\n", - "> `hagrid launch to :`\n", - "\n", - "To deploy the full container stack use the following command:
\n", - "\n", - "> `hagrid launch test_domain domain to docker:8081`\n", - "\n", - "For detailed explanation of Full Container Stack deployment refer to the notebook [05-deploy-stack.ipynb](https://github.com/OpenMined/PySyft/blob/dev/notebooks/tutorials/data-engineer/05-deploy-stack.ipynb)" - ] - }, - { - "cell_type": "markdown", - "id": "22", - "metadata": {}, - "source": [ - "----" - ] - }, - { - "cell_type": "markdown", - "id": "23", - "metadata": {}, - "source": [ - "## VM Container Host" - ] - }, - { - "cell_type": "markdown", - "id": "24", - "metadata": {}, - "source": [ - "Ability to easily deploy `syft` stack to __anywhere__. By anywhere we mean an existing linux server accessible via `ssh` connection. `hagrid` cli tool can do all the hard work for us, by defining the desired system state using `ansible` and deploying all containers (defined in the previous section).\n", - "\n", - "__When you need this?__
\n", - "_When you need to deploy Syft domain/gateway node on a remote host, whether Virtual Machine or real Linux server._\n", - "\n", - "__Prerequistes:__
\n", - "1. Syft installed - [pypi.org/project/syft](https://pypi.org/project/syft/)\n", - "2. Hagrid installed - [pypi.org/project/syft](https://pypi.org/project/syft/)\n", - "3. VM accessible via SSH\n", - "\n", - "Deploy Syft `domain`/`network` node to the remote VM using following command:\n", - "\n", - "> `hagrid launch test_domain domain to 100.0.0.1 --username=ubuntu --auth-type=key --key-path=~/.ssh/hagrid_ssh_key`\n", - "\n", - "All flags marked with `--` are optional, if not provided `hagrid` will interactively ask you to provide all necessary details. More details on `hagrid` usage can be found in following notebook [03-hagrid.ipynb](https://github.com/OpenMined/PySyft/blob/dev/notebooks/tutorials/data-engineer/03-hagrid.ipynb)\n", - "\n", - "If you want to deploy to Cloud providers reffer to corresponding notebook:\n", - "- Azure - [06-deploy-to-azure.ipynb](https://github.com/OpenMined/PySyft/blob/dev/notebooks/tutorials/data-engineer/06-deploy-to-azure.ipynb)\n", - "- GCP - [07-deploy-to-gcp.ipynb](https://github.com/OpenMined/PySyft/blob/dev/notebooks/tutorials/data-engineer/07-deploy-to-gcp.ipynb)\n", - "- AWS - [08-deploy-to-aws.ipynb](https://github.com/OpenMined/PySyft/blob/dev/notebooks/tutorials/data-engineer/08-deploy-to-aws.ipynb)\n", - "\n", - ">__Note__: VM Container Host supports deployment _only from Linux or MacOS_ machines, since it requires `ansible`
that is not supported by Windows \n" - ] - }, - { - "cell_type": "markdown", - "id": "25", - "metadata": {}, - "source": [ - "----" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "## Gateway Nodes" - ] - }, - { - "cell_type": "markdown", - "id": "27", - "metadata": {}, - "source": [ - "Gateway Nodes are used to interconnect multiple `domain` nodes.\n", - "Essentially, `gateway` nodes use the same containers and code, although with different configurations.\n", - "`gateway` nodes do not have the Frontend and Blob storage. \n", - "\n", - "__When you need this?__
\n", - "_When you need to interconnect two or more domain nodes._\n", - "\n", - "__Prerequistes:__
\n", - "1. Syft installed - [pypi.org/project/syft](https://pypi.org/project/syft/)\n", - "1. Hagrid installed - [pypi.org/project/syft](https://pypi.org/project/syft/)\n", - "1. Docker installed or SSH connection to VM\n", - "\n", - "The `hagrid` cli can be used to deploy the `gateway` nodes, as a local container stack deployment or remote VM host deployment.\n", - "\n", - "To deploy `gateway` node us the following command:
\n", - "> `hagrid launch gateway to :`\n", - "\n", - "Example of launching the `gateway` node called `test-gateway`:
\n", - "> `hagrid launch test-gateway gateway to docker:9082`\n" - ] - }, - { - "cell_type": "markdown", - "id": "28", - "metadata": {}, - "source": [ - "----" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/03-hagrid.ipynb b/notebooks/tutorials/data-engineer/03-hagrid.ipynb deleted file mode 100644 index 3ad7cf9c25d..00000000000 --- a/notebooks/tutorials/data-engineer/03-hagrid.ipynb +++ /dev/null @@ -1,73 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# HAGrid" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Installing HAGrid" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "## Python PATH" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Debugging HAGrid" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "## Ansible and Windows" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/04-deploy-container.ipynb b/notebooks/tutorials/data-engineer/04-deploy-container.ipynb deleted file mode 100644 index dd016d74ae5..00000000000 --- a/notebooks/tutorials/data-engineer/04-deploy-container.ipynb +++ /dev/null @@ -1,107 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deploying a Container" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Docker 1-liner" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "```\n", - "$ docker run -it -e DEFAULT_ROOT_PASSWORD=secret -e PORT=8080 -p 8080:8080 openmined/grid-enclave:0.8.2.b0\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Azure CLI" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "$ az group create --name test-container --location eastus" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "$ az container create --resource-group test-container --name syft --image openmined/grid-enclave:0.8.2.b0 --dns-name-label syft-demo --ports 80 --environment-variables PORT=80 DEFAULT_ROOT_PASSWORD=secret" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "## From HAGrid" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "## Volume Mounts" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/05-deploy-stack.ipynb b/notebooks/tutorials/data-engineer/05-deploy-stack.ipynb deleted file mode 100644 index 2ac0fcc7dff..00000000000 --- a/notebooks/tutorials/data-engineer/05-deploy-stack.ipynb +++ /dev/null @@ -1,81 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deploy the Stack" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Docker Compose" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "## HAGrid" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Build Source" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "## Volume Mounts" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Docker Networks" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/06-deploy-to-azure.ipynb b/notebooks/tutorials/data-engineer/06-deploy-to-azure.ipynb deleted file mode 100644 index 397d3f1016b..00000000000 --- a/notebooks/tutorials/data-engineer/06-deploy-to-azure.ipynb +++ /dev/null @@ -1,114 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deploy to Azure" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Installing CLI Tool" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "## Authorizing CLI Tool" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Deploying a Single Container" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "$ az group create --name test-container --location eastus" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "$ az container create --resource-group test-container --name syft --image openmined/grid-enclave:0.8.2.b0 --dns-name-label syft-demo --ports 80 --environment-variables PORT=80 DEFAULT_ROOT_PASSWORD=secret" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "## Deploying a Domain" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "## Checking Firewall Rules" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "## Logging in via SSH" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/07-deploy-to-gcp.ipynb b/notebooks/tutorials/data-engineer/07-deploy-to-gcp.ipynb deleted file mode 100644 index 827f1d5e129..00000000000 --- a/notebooks/tutorials/data-engineer/07-deploy-to-gcp.ipynb +++ /dev/null @@ -1,73 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deploy to Google Cloud Platform (GCP)" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Installing CLI Tool" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "## Authorizing CLI Tool" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Deploying a Domain" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "## Checking Firewall Rules" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Logging in via SSH" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/08-deploy-to-aws.ipynb b/notebooks/tutorials/data-engineer/08-deploy-to-aws.ipynb deleted file mode 100644 index 7b8a28ec777..00000000000 --- a/notebooks/tutorials/data-engineer/08-deploy-to-aws.ipynb +++ /dev/null @@ -1,152 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deploy to AWS" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Installing CLI Tool" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "Please refer to the docs for installing the AWS CLI tool: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html. It has instructions for the different operating systems such as Mac, Windows and Linux" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Authorizing CLI Tool" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "Please go through this for setting up the CLI: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html. \n", - "\n", - "A common/quick way is to use to authenticate using IAM user credentials. Please refer to this doc for the steps involved: https://docs.aws.amazon.com/cli/latest/userguide/cli-authentication-user.html" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Deploying a Domain" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "Use `hagrid launch {domain_name} domain to aws [--no-provision]` command to launch your domain to an AWS EC2 instance. The --no-provision flag is optional and can be used if you do not want to provision all the resources using ansible (If you're not familiar with this, just ignore this flag) " - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "You would be prompted with a series of questions.\n", - "\n", - "Please specify the region where you want your EC2 instance to be deployed.\n", - "\n", - "Please specify a name for the security group to be created. A security group is used to control the inbound and outbound traffic to/from the EC2 instance. Please check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html for more information.\n", - "Then specify the IP addresses to be white-listed for incoming traffic to the EC2 instance. Please ensure that you enter it in CIDR notation. The default is 0.0.0.0/0 which means that all inbound traffic is allowed.\n", - "On these IP addresses, we open the following ports: 80, 443, 22.\n", - "\n", - "Then, please specify the EC2 instance type. By default, it is t2.xlarge.\n", - "\n", - "We need an EC2 key pair in order to SSH into the instance. If you already have a key-pair, please specify the name and the path where it is stored. Otherwise, if you do not have one, we will create one with the given name and store it in the path you specify. (Note: creating a keypair might not work properly with windows powershell).\n", - "\n", - "\n", - "Then specify the repo and branch for the source code. You can leave it as the default.\n", - "\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "## Checking Firewall Rules" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "You could go to the AWS console, and navigate to the region where you deployed your instance. Search for EC2 and go over to the Security Groups tab (or directly search for Security Group). In the list of security groups, identify the one you created using the name. If you go inside, you would see the inbound and outbound rules." - ] - }, - { - "cell_type": "markdown", - "id": "10", - "metadata": {}, - "source": [ - "## Logging in via SSH" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "11", - "metadata": {}, - "source": [ - "Please refer to the steps in the doc to connect to your EC2 instance using SSH: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html" - ] - }, - { - "cell_type": "markdown", - "id": "12", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/09-deploying-enclave.ipynb b/notebooks/tutorials/data-engineer/09-deploying-enclave.ipynb deleted file mode 100644 index 11c0fba438e..00000000000 --- a/notebooks/tutorials/data-engineer/09-deploying-enclave.ipynb +++ /dev/null @@ -1,41 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Deploying an Enclave" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/10-custom-deployment.ipynb b/notebooks/tutorials/data-engineer/10-custom-deployment.ipynb deleted file mode 100644 index 11b2f707b35..00000000000 --- a/notebooks/tutorials/data-engineer/10-custom-deployment.ipynb +++ /dev/null @@ -1,97 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Custom Deployment" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## What you need" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "### Container Engine" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "### File Mounts" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "### Network Access" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "### Python Client" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "### Red Hat and Podman" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "### Kubernetes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-engineer/11-installing-and-upgrading-via-helm.ipynb b/notebooks/tutorials/data-engineer/11-installing-and-upgrading-via-helm.ipynb deleted file mode 100644 index 4775672f760..00000000000 --- a/notebooks/tutorials/data-engineer/11-installing-and-upgrading-via-helm.ipynb +++ /dev/null @@ -1,364 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Installing using Helm" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Add Helm Repo" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "```bash\n", - "helm repo add openmined https://openmined.github.io/PySyft/helm\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Update Repo" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "```bash\n", - "helm repo update openmined\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Search for available Chart versions" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "### Search for available versions¶" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "```bash\n", - "helm search repo openmined/syft --versions --devel\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "### Set the version to install" - ] - }, - { - "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "```bash\n", - "export SYFT_VERSION=\"\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "10", - "metadata": {}, - "source": [ - "## Setup a registry" - ] - }, - { - "cell_type": "markdown", - "id": "11", - "metadata": {}, - "source": [ - "One needs to setup a registry either locally or on the cloud. To set one up locally, one can follow the following commands." - ] - }, - { - "cell_type": "markdown", - "id": "12", - "metadata": {}, - "source": [ - "```bash\n", - "k3d registry create registry.localhost --port 12345 -v `pwd`/k3d-registry:/var/lib/registry || true\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": {}, - "source": [ - "Setup a load balancer\n", - "\n", - "```bash\n", - "NODE_NAME=syft NODE_PORT=8080 && \\\n", - "k3d cluster create syft -p \"$NODE_PORT:80@loadbalancer\" --registry-use k3d-registry.localhost || true \\\n", - "k3d cluster start syft\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "14", - "metadata": {}, - "source": [ - "## Install using Helm" - ] - }, - { - "cell_type": "markdown", - "id": "15", - "metadata": {}, - "source": [ - "```bash\n", - "helm install my-domain openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=traefik\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "16", - "metadata": {}, - "source": [ - "# Upgrading using Helm" - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": {}, - "source": [ - "## Add Helm Repo" - ] - }, - { - "cell_type": "markdown", - "id": "18", - "metadata": {}, - "source": [ - "```bash\n", - "helm repo add openmined https://openmined.github.io/PySyft/helm\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "19", - "metadata": {}, - "source": [ - "## Update Repo" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "```bash\n", - "helm repo update openmined\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "## Search for available Helm Chart versions" - ] - }, - { - "cell_type": "markdown", - "id": "22", - "metadata": {}, - "source": [ - "### Search for available versions" - ] - }, - { - "cell_type": "markdown", - "id": "23", - "metadata": {}, - "source": [ - "```bash\n", - "helm search repo openmined/syft --versions --devel\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "24", - "metadata": {}, - "source": [ - "### Set the target version" - ] - }, - { - "cell_type": "markdown", - "id": "25", - "metadata": {}, - "source": [ - "```bash\n", - "export TARGET_VERSION=\"\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "## Get the current Helm release values (User Defined)" - ] - }, - { - "cell_type": "markdown", - "id": "27", - "metadata": {}, - "source": [ - "Set the release name and namespace\n", - "\n", - "```bash\n", - "export RELEASE_NAME=\"\"\n", - "export NAMESPACE=\"\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "28", - "metadata": {}, - "source": [ - "```bash\n", - "helm get values $RELEASE_NAME -n $NAMESPACE -o yaml > values.yaml\n", - "```\n", - "\n", - "
\n", - "\n", - "Use this file in the argument to helm upgrade command, for example:\n", - "\n", - "\n", - "`-f /home/user/values.yaml`\n", - "\n", - "\n", - "Save the path to a variable:\n", - "\n", - "```bash\n", - "export PATH_TO_VALUES=/home/user/values.yaml\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "29", - "metadata": {}, - "source": [ - "## Upgrade the Helm Chart" - ] - }, - { - "cell_type": "markdown", - "id": "30", - "metadata": {}, - "source": [ - "### Find out the number of nodes in the cluster." - ] - }, - { - "cell_type": "markdown", - "id": "31", - "metadata": {}, - "source": [ - "```bash\n", - "kubectl describe sts --namespace $NAMESPACE | grep 'Replicas'\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "32", - "metadata": {}, - "source": [ - "### Upgrade the Helm chart." - ] - }, - { - "cell_type": "markdown", - "id": "33", - "metadata": {}, - "source": [ - "```bash\n", - "helm upgrade $RELEASE_NAME openmined/syft \\\n", - " --version $TARGET_VERSION \\\n", - " -f $PATH_TO_VALUES \\\n", - " --namespace $NAMESPACE\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb b/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb index 02ed5576cb0..71567b558b0 100644 --- a/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb +++ b/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -50,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"private-data-example-domain-1\", port=\"auto\", reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"private-data-example-datasite-1\", port=\"auto\", reset=True\n", ")" ] }, @@ -82,7 +82,7 @@ "source": [ "# syft absolute\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -124,8 +124,14 @@ "metadata": {}, "outputs": [], "source": [ + "dataset_markdown_description = \"\"\"\n", + "### Contents\n", + "Numpy arrays of length 3 with integers ranging from 1 - 3.\n", + "\"\"\"\n", "dataset = sy.Dataset(\n", " name=\"my dataset\",\n", + " summary=\"Contains private and mock versions of data\",\n", + " description=dataset_markdown_description,\n", " asset_list=[\n", " sy.Asset(name=\"my asset\", data=np.array([1, 2, 3]), mock=np.array([1, 1, 1]))\n", " ],\n", @@ -316,9 +322,9 @@ "metadata": {}, "outputs": [], "source": [ - "# Cleanup local domain server\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "# Cleanup local datasite server\n", + "if server.server_type.value == \"python\":\n", + " server.land()" ] } ], diff --git a/notebooks/tutorials/data-owner/02-account-management.ipynb b/notebooks/tutorials/data-owner/02-account-management.ipynb index a4e64b74698..85e7b2fdfeb 100644 --- a/notebooks/tutorials/data-owner/02-account-management.ipynb +++ b/notebooks/tutorials/data-owner/02-account-management.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"account-management-example-domain-1\", port=8041, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"account-management-example-datasite-1\", port=8041, reset=True\n", ")" ] }, @@ -78,10 +78,8 @@ "source": [ "# syft absolute\n", "from syft.service.user.user import ServiceRole\n", - "from syft.service.user.user import UserCreate\n", - "from syft.service.user.user import UserUpdate\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -107,9 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "client.users.create(\n", - " UserCreate(email=\"newuser@openmined.org\", name=\"John Doe\", password=\"pw\")\n", - ")" + "client.users.create(email=\"newuser@openmined.org\", name=\"John Doe\", password=\"pw\")" ] }, { @@ -209,7 +205,7 @@ "outputs": [], "source": [ "updated_user = client.users.update(\n", - " new_user.id, UserUpdate(role=ServiceRole.DATA_SCIENTIST, password=\"123\")\n", + " uid=new_user.id, role=ServiceRole.DATA_SCIENTIST, password=\"123\"\n", ")" ] }, @@ -238,7 +234,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds_client = node.login(email=\"newuser@openmined.org\", password=\"123\")" + "ds_client = server.login(email=\"newuser@openmined.org\", password=\"123\")" ] }, { @@ -328,7 +324,7 @@ "metadata": {}, "outputs": [], "source": [ - "new_user = node.login(email=\"joker@test.com\", password=\"joker123\")" + "new_user = server.login(email=\"joker@test.com\", password=\"joker123\")" ] }, { @@ -356,12 +352,13 @@ "metadata": {}, "outputs": [], "source": [ - "new_user.register(\n", - " email=\"batman@test.com\",\n", - " password=\"batman123\",\n", - " password_verify=\"batman123\",\n", - " name=\"Batman\",\n", - ")" + "with sy.raises(sy.SyftException, show=True):\n", + " new_user.register(\n", + " email=\"batman@test.com\",\n", + " password=\"batman123\",\n", + " password_verify=\"batman123\",\n", + " name=\"Batman\",\n", + " )" ] }, { @@ -432,12 +429,13 @@ "metadata": {}, "outputs": [], "source": [ - "new_user.register(\n", - " email=\"harley@test.com\",\n", - " password=\"harley123\",\n", - " password_verify=\"harley123\",\n", - " name=\"Harley\",\n", - ")" + "with sy.raises(sy.SyftException, show=True):\n", + " new_user.register(\n", + " email=\"harley@test.com\",\n", + " password=\"harley123\",\n", + " password_verify=\"harley123\",\n", + " name=\"Harley\",\n", + " )" ] }, { @@ -464,14 +462,17 @@ "source": [ "client.users" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -482,7 +483,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.12.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb b/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb index 5a59e9724f0..e888c57f044 100644 --- a/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb +++ b/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"messages-requests-example-domain-1-do\", port=7021, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"messages-requests-example-datasite-1-do\", port=7021, reset=True, dev_mode=True\n", ")" ] }, @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -200,22 +200,32 @@ "metadata": {}, "outputs": [], "source": [ - "project = new_project.start()\n", + "project = new_project.send()\n", "project" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "16", "metadata": {}, + "outputs": [], "source": [ - "## Messaging" + "project" ] }, { "cell_type": "markdown", "id": "17", "metadata": {}, + "source": [ + "## Messaging" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, "source": [ "### Check New Messages" ] @@ -223,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -232,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "### Send a Message" @@ -240,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ "### Mark as Read or Unread" @@ -249,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -260,7 +270,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -270,7 +280,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -280,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -290,7 +300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -299,7 +309,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "## Requests" @@ -307,7 +317,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": { "tags": [] }, @@ -318,7 +328,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -328,7 +338,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -339,7 +349,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -349,7 +359,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -358,69 +368,8 @@ }, { "cell_type": "markdown", - "id": "32", - "metadata": {}, - "source": [ - "### Substituting" - ] - }, - { - "cell_type": "code", - "execution_count": null, "id": "33", "metadata": {}, - "outputs": [], - "source": [ - "mean_request = admin_client.requests[-2]\n", - "mean_request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "34", - "metadata": {}, - "outputs": [], - "source": [ - "admin_asset = admin_client.datasets[0].assets[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "35", - "metadata": {}, - "outputs": [], - "source": [ - "result = mean_request.code.unsafe_function(data=admin_asset)\n", - "result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36", - "metadata": {}, - "outputs": [], - "source": [ - "mean_request.accept_by_depositing_result(result)\n", - "mean_request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "37", - "metadata": {}, - "outputs": [], - "source": [ - "admin_client.projects[0].requests" - ] - }, - { - "cell_type": "markdown", - "id": "38", - "metadata": {}, "source": [ "### Rejecting" ] @@ -428,7 +377,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -439,7 +388,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -449,7 +398,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -459,7 +408,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -469,7 +418,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "38", "metadata": {}, "outputs": [], "source": [] @@ -491,7 +440,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.12.4" }, "toc": { "base_numbering": 1, diff --git a/notebooks/tutorials/data-owner/05-syft-services-api.ipynb b/notebooks/tutorials/data-owner/05-syft-services-api.ipynb index 7c3f409105a..d891bc16cdf 100644 --- a/notebooks/tutorials/data-owner/05-syft-services-api.ipynb +++ b/notebooks/tutorials/data-owner/05-syft-services-api.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -56,8 +56,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"services-api-example-domain-1\", port=\"auto\", reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"services-api-example-datasite-1\", port=\"auto\", reset=True\n", ")" ] }, @@ -78,7 +78,7 @@ "source": [ "# syft absolute\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { diff --git a/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb b/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb index 30dcf080d8c..bfae6dbdb2d 100644 --- a/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb +++ b/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb @@ -21,7 +21,7 @@ "id": "2", "metadata": {}, "source": [ - "## Connecting to a Domain" + "## Connecting to a Datasite" ] }, { diff --git a/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb b/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb index acf4ec170df..70eb66b6d23 100644 --- a/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb +++ b/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", "package_string = f'\"syft{SYFT_VERSION}\"'\n", "# %pip install {package_string} -q" ] @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"private-datasets-example-domain-1\", port=8062, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"private-datasets-example-datasite-1\", port=8062, reset=True\n", ")" ] }, @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -147,7 +147,7 @@ "metadata": {}, "outputs": [], "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -437,7 +437,7 @@ "metadata": {}, "outputs": [], "source": [ - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -534,6 +534,14 @@ "metadata": {}, "outputs": [], "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "54", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -552,7 +560,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.12.4" }, "toc": { "base_numbering": 1, diff --git a/notebooks/tutorials/data-scientist/04-action-graph.ipynb b/notebooks/tutorials/data-scientist/04-action-graph.ipynb deleted file mode 100644 index 7092ea0f1fc..00000000000 --- a/notebooks/tutorials/data-scientist/04-action-graph.ipynb +++ /dev/null @@ -1,97 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Action Graph" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Current Limitations" - ] - }, - { - "cell_type": "markdown", - "id": "2", - "metadata": {}, - "source": [ - "### Using mocks locally" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "### JAX autograd functions" - ] - }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [ - "## Viewing the Graph" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Numpy Tutorials" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "## Pandas Tutorials" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "## JAX Tutorials" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-scientist/04-syft-functions.ipynb b/notebooks/tutorials/data-scientist/04-syft-functions.ipynb new file mode 100644 index 00000000000..74724c12fee --- /dev/null +++ b/notebooks/tutorials/data-scientist/04-syft-functions.ipynb @@ -0,0 +1,666 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Syft Functions" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "## Install" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'\n", + "# %pip install {package_string} -q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"syft-functions-example-datasite-1\", port=7022, reset=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "Lets login with our root user." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "\n", + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "Create a dummy dataset for experimenting" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import numpy as np\n", + "\n", + "dataset = sy.Dataset(\n", + " name=\"my dataset\",\n", + " asset_list=[\n", + " sy.Asset(name=\"my asset\", data=np.array([1, 2, 3]), mock=np.array([1, 1, 1]))\n", + " ],\n", + ")\n", + "admin_client.upload_dataset(dataset)" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "Create a new user to use as a data scientist account" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client.register(\n", + " name=\"Jane Doe\",\n", + " email=\"jane@caltech.edu\",\n", + " password=\"abc123\",\n", + " password_verify=\"abc123\",\n", + " institution=\"Caltech\",\n", + " website=\"https://www.caltech.edu/\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Defining a Syft Function" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "Let's say you want to compute the mean of some numbers remotely with PySyft. How do you do that? Pretty easy actually:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function_single_use()\n", + "def func():\n", + " # run some computation\n", + " data = list(range(100))\n", + " return sum(data) / 100" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "## Input Policies" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "That's great but what if we want to run this function with some parameters? Maybe even some private data (why do remote data science without remote data?). Here's where Input Policies come into play. Their purpose is to define what rules will we follow when it comes to the inputs of a syft function. At the moment we provide what we call an `ExactMatch` policy which allows data scientists to specify a private asset they would like to use, just like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "asset = guest_client.datasets[0].assets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'\n", + "# %pip install {package_string} -q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function(\n", + " input_policy=sy.ExactMatch(data=asset),\n", + " output_policy=sy.SingleExecutionExactOutput(),\n", + ")\n", + "def mean(data):\n", + " return sum(data) / len(data)" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "## Output Policies" + ] + }, + { + "cell_type": "markdown", + "id": "23", + "metadata": {}, + "source": [ + "You have probably noticed that in the last example we also specified the output policy. Its purpose has to do with the release of information for a given function and controlling the parameters that this release comes with. For example, if a data owner and a data scientist agree on the content of a function run on a datasite and on what private data that can be run on, their work might not be done yet. They might negotiate how many times that function can be run, whether or not the data scientist can have access or what happens before releasing the output (maybe we add some noise like in the case of differential privacy). At the moment we have policies that allow data scientist to ask for a certain amount of runs on function, but the ones you will find most often is `SingleExecutionExactOutput` that ask for a single use on a function. We have used it so much that we came with the `syft_function_single_use` decorator that use by default that output policy. What is also cool is that you can pass the input for an input policy to this decorator to get a shorter version like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "# same functionality as before, just faster to write\n", + "\n", + "\n", + "@sy.syft_function_single_use(data=asset)\n", + "def mean(data): # noqa: F811\n", + " return sum(data) / len(data)" + ] + }, + { + "cell_type": "markdown", + "id": "25", + "metadata": {}, + "source": [ + "We are working on extending the functionalities of these policies to truly accomplish the goals we have in mind for them. However, if you have a specific use case in mind and can't wait to use it in your remote data science pipeline, check the custom policies notebook that teaches you how to implement your own input and output policies (and also reuse other users' submitted policies)!" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "## Testing it Locally" + ] + }, + { + "cell_type": "markdown", + "id": "27", + "metadata": {}, + "source": [ + "\"Right, so we have defined a function for remote use, but can I run it locally?\" - you probably ask\n", + "\n", + "Yeah, of course you can! " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "func()" + ] + }, + { + "cell_type": "markdown", + "id": "29", + "metadata": {}, + "source": [ + "\"Sure, but what about functions on the assets? That can't work!\"\n", + "\n", + "YEAH IT CAN!!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "mean(data=asset)" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "If you paid attention when we defined the dataset, you probably noticed that for the asset we have added we specified both **the private data and the mock data, and this runs on the mock data**. We use the mock data to test function on the data scientist side. This mock data requires no special access or permissions, because it is public data. This can be data that only matches the structure of the private data or might even be synthetic data if the data owner provides it. Its main goal is to help data scientists to test their functions locally before submitting a request to filter noisy requests in the process. If you would like to learn more about the data owner experience, please check out the notebooks under the tutorials section." + ] + }, + { + "cell_type": "markdown", + "id": "32", + "metadata": {}, + "source": [ + "## Submitting it for Approval" + ] + }, + { + "cell_type": "markdown", + "id": "33", + "metadata": {}, + "source": [ + "Now that we are sure our function works at intended on the mock data, we are ready to submit a request. The cleanest way to do that is to first create a project and attach your request there." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a project\n", + "new_project = sy.Project(\n", + " name=\"My Cool Project\",\n", + " description=\"\"\"Hi, I want to calculate the mean of your private data,\\\n", + " pretty please!\"\"\",\n", + " members=[guest_client],\n", + ")\n", + "new_project" + ] + }, + { + "cell_type": "markdown", + "id": "35", + "metadata": {}, + "source": [ + "Now let's add a code request to the project:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36", + "metadata": {}, + "outputs": [], + "source": [ + "new_project.create_code_request(mean, guest_client)" + ] + }, + { + "cell_type": "markdown", + "id": "37", + "metadata": {}, + "source": [ + "Now we can start our project by simply running " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [ + "project = new_project.send()\n", + "project" + ] + }, + { + "cell_type": "markdown", + "id": "39", + "metadata": {}, + "source": [ + "## Checking Approval" + ] + }, + { + "cell_type": "markdown", + "id": "40", + "metadata": {}, + "source": [ + "Very cool, now let's run our function with private data!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " guest_client.code.mean(data=asset)" + ] + }, + { + "cell_type": "markdown", + "id": "42", + "metadata": {}, + "source": [ + "Right! Our code was not approved, so we should wait for the review from the data owner. As we also deployed the datasite, we will do that quickly here, but for more details on what is happening check the data owner sections under tutorials:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43", + "metadata": {}, + "outputs": [], + "source": [ + "request = admin_client.notifications[-1].link.requests[0]\n", + "request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44", + "metadata": {}, + "outputs": [], + "source": [ + "request.code" + ] + }, + { + "cell_type": "markdown", + "id": "45", + "metadata": {}, + "source": [ + "Now that we have inspected the code, we can approve it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46", + "metadata": {}, + "outputs": [], + "source": [ + "request.approve()" + ] + }, + { + "cell_type": "markdown", + "id": "47", + "metadata": {}, + "source": [ + "## Executing your Function" + ] + }, + { + "cell_type": "markdown", + "id": "48", + "metadata": {}, + "source": [ + "Good, now we are finally ready to run the function on private data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49", + "metadata": {}, + "outputs": [], + "source": [ + "res = guest_client.code.mean(data=asset)\n", + "res" + ] + }, + { + "cell_type": "markdown", + "id": "50", + "metadata": {}, + "source": [ + "Notice that the result we see is still `1.0` which looks like the result on the mock data. That is because it actually is! The object returned is an `ActionObject` which here behaves like a pointer for the data on the datasite:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51", + "metadata": {}, + "outputs": [], + "source": [ + "isinstance(res, sy.ActionObject)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52", + "metadata": {}, + "outputs": [], + "source": [ + "type(res)" + ] + }, + { + "cell_type": "markdown", + "id": "53", + "metadata": {}, + "source": [ + "If we do not accept the result, the data owner calls" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "54", + "metadata": {}, + "outputs": [], + "source": [ + "request.deny(reason=\"you cannot have access\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(sy.SyftException, show=True):\n", + " guest_client.code.mean(data=asset)" + ] + }, + { + "cell_type": "markdown", + "id": "56", + "metadata": {}, + "source": [ + "in that case our call returns a `SyftError`" + ] + }, + { + "cell_type": "markdown", + "id": "57", + "metadata": {}, + "source": [ + "## Downloading Results" + ] + }, + { + "cell_type": "markdown", + "id": "58", + "metadata": {}, + "source": [ + "To get the real data we need one more step:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59", + "metadata": {}, + "outputs": [], + "source": [ + "real_res = res.get()\n", + "real_res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60", + "metadata": {}, + "outputs": [], + "source": [ + "assert real_res == 2.0" + ] + }, + { + "cell_type": "markdown", + "id": "61", + "metadata": {}, + "source": [ + "We can check the type of the result to see it's real data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62", + "metadata": {}, + "outputs": [], + "source": [ + "type(real_res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.5" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": true + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb b/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb new file mode 100644 index 00000000000..def56b583d9 --- /dev/null +++ b/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb @@ -0,0 +1,383 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Messaging and Requests" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "## Install" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.9,<1.0.0\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'\n", + "# %pip install {package_string} -q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"messages-requests-example-datasite-1-ds\", port=7023, reset=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "For the purpose of this tutorial we are creating a very simple dataset, which is created and owner by the root client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import numpy as np\n", + "\n", + "dataset = sy.Dataset(\n", + " name=\"my dataset\",\n", + " asset_list=[\n", + " sy.Asset(name=\"my asset\", data=np.array([1, 2, 3]), mock=np.array([1, 1, 1]))\n", + " ],\n", + ")\n", + "admin_client.upload_dataset(dataset)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client.register(\n", + " name=\"Jane Doe\",\n", + " email=\"jane@caltech.edu\",\n", + " password=\"abc123\",\n", + " password_verify=\"abc123\",\n", + " institution=\"Caltech\",\n", + " website=\"https://www.caltech.edu/\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "asset = guest_client.datasets[0].assets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "@sy.syft_function_single_use()\n", + "def func():\n", + " # stdlib\n", + " import random\n", + "\n", + " data = list(range(100))\n", + " return sum(data) / 100 + random.random()\n", + "\n", + "\n", + "@sy.syft_function_single_use(data=asset)\n", + "def mean(data):\n", + " # stdlib\n", + " import random\n", + "\n", + " return sum(data) / len(data) + +random.random()\n", + "\n", + "\n", + "@sy.syft_function_single_use(data=asset)\n", + "def reveal_data(data):\n", + " return data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a project\n", + "new_project = sy.Project(\n", + " name=\"My Cool Project\",\n", + " description=\"\"\"Hi, I want to calculate the mean of your private data,\\\n", + " pretty please!\"\"\",\n", + " members=[guest_client],\n", + ")\n", + "new_project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "new_project.create_code_request(func, guest_client)\n", + "new_project.create_code_request(mean, guest_client)\n", + "new_project.create_code_request(reveal_data, guest_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "project = new_project.send()\n", + "project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "dangerous_request = admin_client.projects[0].requests[-1]\n", + "dangerous_request.deny(reason=\"Dangerous request, you just want to reveal the data!\")" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## Messaging" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "list notifications using client.notifications (messages sent and requests)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client.notifications" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "## Common Permission Errors" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "approve request that you dont have permission for example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client.projects" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "guest_client.projects[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "func_request = guest_client.projects[0].requests[0]\n", + "func_request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "with sy.raises(Exception):\n", + " func_request.approve()" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "## Requesting Changes" + ] + }, + { + "cell_type": "markdown", + "id": "27", + "metadata": {}, + "source": [ + "request permission to an object via a pointer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "asset = guest_client.datasets[0].assets[0]\n", + "asset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "request = asset.pointer.request(guest_client)\n", + "request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": true + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/tutorials/data-scientist/05-syft-functions.ipynb b/notebooks/tutorials/data-scientist/05-syft-functions.ipynb deleted file mode 100644 index da524a933e1..00000000000 --- a/notebooks/tutorials/data-scientist/05-syft-functions.ipynb +++ /dev/null @@ -1,662 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Syft Functions" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Install" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", - "package_string = f'\"syft{SYFT_VERSION}\"'\n", - "# %pip install {package_string} -q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(SYFT_VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"syft-functions-example-domain-1\", port=7022, reset=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "Lets login with our root user." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "\n", - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "Create a dummy dataset for experimenting" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "# third party\n", - "import numpy as np\n", - "\n", - "dataset = sy.Dataset(\n", - " name=\"my dataset\",\n", - " asset_list=[\n", - " sy.Asset(name=\"my asset\", data=np.array([1, 2, 3]), mock=np.array([1, 1, 1]))\n", - " ],\n", - ")\n", - "admin_client.upload_dataset(dataset)" - ] - }, - { - "cell_type": "markdown", - "id": "10", - "metadata": {}, - "source": [ - "Create a new user to use as a data scientist account" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "admin_client.register(\n", - " name=\"Jane Doe\",\n", - " email=\"jane@caltech.edu\",\n", - " password=\"abc123\",\n", - " password_verify=\"abc123\",\n", - " institution=\"Caltech\",\n", - " website=\"https://www.caltech.edu/\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" - ] - }, - { - "cell_type": "markdown", - "id": "13", - "metadata": {}, - "source": [ - "## Defining a Syft Function" - ] - }, - { - "cell_type": "markdown", - "id": "14", - "metadata": {}, - "source": [ - "Let's say you want to compute the mean of some numbers remotely with PySyft. How do you do that? Pretty easy actually:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "@sy.syft_function_single_use()\n", - "def func():\n", - " # run some computation\n", - " data = list(range(100))\n", - " return sum(data) / 100" - ] - }, - { - "cell_type": "markdown", - "id": "16", - "metadata": {}, - "source": [ - "## Input Policies" - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": {}, - "source": [ - "That's great but what if we want to run this function with some parameters? Maybe even some private data (why do remote data science without remote data?). Here's where Input Policies come into play. Their purpose is to define what rules will we follow when it comes to the inputs of a syft function. At the moment we provide what we call an `ExactMatch` policy which allows data scientists to specify a private asset they would like to use, just like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "asset = guest_client.datasets[0].assets[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", - "package_string = f'\"syft{SYFT_VERSION}\"'\n", - "# %pip install {package_string} -q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(SYFT_VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": {}, - "outputs": [], - "source": [ - "@sy.syft_function(\n", - " input_policy=sy.ExactMatch(data=asset),\n", - " output_policy=sy.SingleExecutionExactOutput(),\n", - ")\n", - "def mean(data):\n", - " return sum(data) / len(data)" - ] - }, - { - "cell_type": "markdown", - "id": "22", - "metadata": {}, - "source": [ - "## Output Policies" - ] - }, - { - "cell_type": "markdown", - "id": "23", - "metadata": {}, - "source": [ - "You have probably noticed that in the last example we also specified the output policy. Its purpose has to do with the release of information for a given function and controlling the parameters that this release comes with. For example, if a data owner and a data scientist agree on the content of a function run on a domain and on what private data that can be run on, their work might not be done yet. They might negotiate how many times that function can be run, whether or not the data scientist can have access or what happens before releasing the output (maybe we add some noise like in the case of differential privacy). At the moment we have policies that allow data scientist to ask for a certain amount of runs on function, but the ones you will find most often is `SingleExecutionExactOutput` that ask for a single use on a function. We have used it so much that we came with the `syft_function_single_use` decorator that use by default that output policy. What is also cool is that you can pass the input for an input policy to this decorator to get a shorter version like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "# same functionality as before, just faster to write\n", - "\n", - "\n", - "@sy.syft_function_single_use(data=asset)\n", - "def mean(data): # noqa: F811\n", - " return sum(data) / len(data)" - ] - }, - { - "cell_type": "markdown", - "id": "25", - "metadata": {}, - "source": [ - "We are working on extending the functionalities of these policies to truly accomplish the goals we have in mind for them. However, if you have a specific use case in mind and can't wait to use it in your remote data science pipeline, check the custom policies notebook that teaches you how to implement your own input and output policies (and also reuse other users' submitted policies)!" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "## Testing it Locally" - ] - }, - { - "cell_type": "markdown", - "id": "27", - "metadata": {}, - "source": [ - "\"Right, so we have defined a function for remote use, but can I run it locally?\" - you probably ask\n", - "\n", - "Yeah, of course you can! " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "func()" - ] - }, - { - "cell_type": "markdown", - "id": "29", - "metadata": {}, - "source": [ - "\"Sure, but what about functions on the assets? That can't work!\"\n", - "\n", - "YEAH IT CAN!!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30", - "metadata": {}, - "outputs": [], - "source": [ - "mean(data=asset)" - ] - }, - { - "cell_type": "markdown", - "id": "31", - "metadata": {}, - "source": [ - "If you paid attention when we defined the dataset, you probably noticed that for the asset we have added we specified both **the private data and the mock data, and this runs on the mock data**. We use the mock data to test function on the data scientist side. This mock data requires no special access or permissions, because it is public data. This can be data that only matches the structure of the private data or might even be synthetic data if the data owner provides it. Its main goal is to help data scientists to test their functions locally before submitting a request to filter noisy requests in the process. If you would like to learn more about the data owner experience, please check out the notebooks under the tutorials section." - ] - }, - { - "cell_type": "markdown", - "id": "32", - "metadata": {}, - "source": [ - "## Submitting it for Approval" - ] - }, - { - "cell_type": "markdown", - "id": "33", - "metadata": {}, - "source": [ - "Now that we are sure our function works at intended on the mock data, we are ready to submit a request. The cleanest way to do that is to first create a project and attach your request there." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "34", - "metadata": {}, - "outputs": [], - "source": [ - "# Create a project\n", - "new_project = sy.Project(\n", - " name=\"My Cool Project\",\n", - " description=\"\"\"Hi, I want to calculate the mean of your private data,\\\n", - " pretty please!\"\"\",\n", - " members=[guest_client],\n", - ")\n", - "new_project" - ] - }, - { - "cell_type": "markdown", - "id": "35", - "metadata": {}, - "source": [ - "Now let's add a code request to the project:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36", - "metadata": {}, - "outputs": [], - "source": [ - "new_project.create_code_request(mean, guest_client)" - ] - }, - { - "cell_type": "markdown", - "id": "37", - "metadata": {}, - "source": [ - "Now we can start our project by simply running " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "38", - "metadata": {}, - "outputs": [], - "source": [ - "project = new_project.start()\n", - "project" - ] - }, - { - "cell_type": "markdown", - "id": "39", - "metadata": {}, - "source": [ - "## Checking Approval" - ] - }, - { - "cell_type": "markdown", - "id": "40", - "metadata": {}, - "source": [ - "Very cool, now let's run our function with private data!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "41", - "metadata": {}, - "outputs": [], - "source": [ - "guest_client.code.mean(data=asset)" - ] - }, - { - "cell_type": "markdown", - "id": "42", - "metadata": {}, - "source": [ - "Right! Our code was not approved, so we should wait for the review from the data owner. As we also deployed the domain, we will do that quickly here, but for more details on what is happening check the data owner sections under tutorials:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43", - "metadata": {}, - "outputs": [], - "source": [ - "request = admin_client.notifications[-1].link.requests[0]\n", - "request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "44", - "metadata": {}, - "outputs": [], - "source": [ - "request.code" - ] - }, - { - "cell_type": "markdown", - "id": "45", - "metadata": {}, - "source": [ - "Now that we have inspected the code, we can approve it" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "46", - "metadata": {}, - "outputs": [], - "source": [ - "request.approve()" - ] - }, - { - "cell_type": "markdown", - "id": "47", - "metadata": {}, - "source": [ - "## Executing your Function" - ] - }, - { - "cell_type": "markdown", - "id": "48", - "metadata": {}, - "source": [ - "Good, now we are finally ready to run the function on private data:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "49", - "metadata": {}, - "outputs": [], - "source": [ - "res = guest_client.code.mean(data=asset)\n", - "res" - ] - }, - { - "cell_type": "markdown", - "id": "50", - "metadata": {}, - "source": [ - "Notice that the result we see is still `1.0` which looks like the result on the mock data. That is because it actually is! The object returned is an `ActionObject` which here behaves like a pointer for the data on the domain:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51", - "metadata": {}, - "outputs": [], - "source": [ - "isinstance(res, sy.ActionObject)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "52", - "metadata": {}, - "outputs": [], - "source": [ - "type(res)" - ] - }, - { - "cell_type": "markdown", - "id": "53", - "metadata": {}, - "source": [ - "If we do not accept the result, the data owner calls" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54", - "metadata": {}, - "outputs": [], - "source": [ - "request.deny(reason=\"you cannot have access\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55", - "metadata": {}, - "outputs": [], - "source": [ - "res_denied = guest_client.code.mean(data=asset)\n", - "res_denied" - ] - }, - { - "cell_type": "markdown", - "id": "56", - "metadata": {}, - "source": [ - "in that case our call returns a `SyftError`" - ] - }, - { - "cell_type": "markdown", - "id": "57", - "metadata": {}, - "source": [ - "## Downloading Results" - ] - }, - { - "cell_type": "markdown", - "id": "58", - "metadata": {}, - "source": [ - "To get the real data we need one more step:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59", - "metadata": {}, - "outputs": [], - "source": [ - "real_res = res.get()\n", - "real_res" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60", - "metadata": {}, - "outputs": [], - "source": [ - "assert real_res == 2.0" - ] - }, - { - "cell_type": "markdown", - "id": "61", - "metadata": {}, - "source": [ - "We can check the type of the result to see it's real data:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62", - "metadata": {}, - "outputs": [], - "source": [ - "type(real_res)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/data-scientist/07-custom-policies.ipynb b/notebooks/tutorials/data-scientist/06-custom-policies.ipynb similarity index 100% rename from notebooks/tutorials/data-scientist/07-custom-policies.ipynb rename to notebooks/tutorials/data-scientist/06-custom-policies.ipynb diff --git a/notebooks/tutorials/data-scientist/06-messaging-and-requests.ipynb b/notebooks/tutorials/data-scientist/06-messaging-and-requests.ipynb deleted file mode 100644 index 3fbe3bfc055..00000000000 --- a/notebooks/tutorials/data-scientist/06-messaging-and-requests.ipynb +++ /dev/null @@ -1,382 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Messaging and Requests" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Install" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n", - "package_string = f'\"syft{SYFT_VERSION}\"'\n", - "# %pip install {package_string} -q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "\n", - "sy.requires(SYFT_VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "node = sy.orchestra.launch(\n", - " name=\"messages-requests-example-domain-1-ds\", port=7023, reset=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "For the purpose of this tutorial we are creating a very simple dataset, which is created and owner by the root client" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# third party\n", - "import numpy as np\n", - "\n", - "dataset = sy.Dataset(\n", - " name=\"my dataset\",\n", - " asset_list=[\n", - " sy.Asset(name=\"my asset\", data=np.array([1, 2, 3]), mock=np.array([1, 1, 1]))\n", - " ],\n", - ")\n", - "admin_client.upload_dataset(dataset)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "admin_client.register(\n", - " name=\"Jane Doe\",\n", - " email=\"jane@caltech.edu\",\n", - " password=\"abc123\",\n", - " password_verify=\"abc123\",\n", - " institution=\"Caltech\",\n", - " website=\"https://www.caltech.edu/\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "asset = guest_client.datasets[0].assets[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "@sy.syft_function_single_use()\n", - "def func():\n", - " # stdlib\n", - " import random\n", - "\n", - " data = list(range(100))\n", - " return sum(data) / 100 + random.random()\n", - "\n", - "\n", - "@sy.syft_function_single_use(data=asset)\n", - "def mean(data):\n", - " # stdlib\n", - " import random\n", - "\n", - " return sum(data) / len(data) + +random.random()\n", - "\n", - "\n", - "@sy.syft_function_single_use(data=asset)\n", - "def reveal_data(data):\n", - " return data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "# Create a project\n", - "new_project = sy.Project(\n", - " name=\"My Cool Project\",\n", - " description=\"\"\"Hi, I want to calculate the mean of your private data,\\\n", - " pretty please!\"\"\",\n", - " members=[guest_client],\n", - ")\n", - "new_project" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "new_project.create_code_request(func, guest_client)\n", - "new_project.create_code_request(mean, guest_client)\n", - "new_project.create_code_request(reveal_data, guest_client)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "project = new_project.start()\n", - "project" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "dangerous_request = admin_client.projects[0].requests[-1]\n", - "dangerous_request.deny(reason=\"Dangerous request, you just want to reveal the data!\")" - ] - }, - { - "cell_type": "markdown", - "id": "17", - "metadata": {}, - "source": [ - "## Messaging" - ] - }, - { - "cell_type": "markdown", - "id": "18", - "metadata": {}, - "source": [ - "list notifications using client.notifications (messages sent and requests)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "guest_client.notifications" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "## Common Permission Errors" - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "approve request that you dont have permission for example" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": {}, - "outputs": [], - "source": [ - "guest_client.projects" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "guest_client.projects[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "func_request = guest_client.projects[0].requests[0]\n", - "func_request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "func_request.approve()" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "## Requesting Changes" - ] - }, - { - "cell_type": "markdown", - "id": "27", - "metadata": {}, - "source": [ - "request permission to an object via a pointer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "asset = guest_client.datasets[0].assets[0]\n", - "asset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29", - "metadata": {}, - "outputs": [], - "source": [ - "request = asset.pointer.request(guest_client)\n", - "request" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.8" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": true - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tutorials/deployments/00-deployment-types.ipynb b/notebooks/tutorials/deployments/00-deployment-types.ipynb new file mode 100644 index 00000000000..b9283a0c94c --- /dev/null +++ b/notebooks/tutorials/deployments/00-deployment-types.ipynb @@ -0,0 +1,99 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to PySyft Deployment Options\n", + "\n", + "PySyft offers various deployment options catering to different needs and environments. Each deployment option provides a unique set of advantages, allowing users to seamlessly integrate PySyft into their workflows, whether for local development, production deployment, or experimentation in cloud environments. Below, we explore the different deployment options supported by PySyft and provide insights into when each option is most suitable." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Local Python Deployment\n", + "\n", + "This deployment option runs PySyft locally within a Python environment. It is lightweight and runs everything in-memory, making it ideal for quick prototyping and testing.\n", + "\n", + "**Recommended For:** \n", + "- Development and testing on resource-constrained systems without Docker support.\n", + "- Rapid experimentation with PySyft APIs.\n", + "\n", + "Follow [01-deploy-python.ipynb](./01-deploy-python.ipynb) for deployment instructions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Single Container Deployment\n", + "\n", + "In this deployment, PySyft is encapsulated within a single Docker container, providing better isolation and portability compared to the local Python deployment.\n", + "\n", + "**Recommended For:**\n", + "- Resource-constrained systems with Docker support.\n", + "- Standardizing PySyft deployment across different environments.\n", + "\n", + "Follow [02-deploy-container.ipynb](./02-deploy-container.ipynb) for deployment instructions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Kubernetes Deployment\n", + "\n", + "This deployment option orchestrates the entire PySyft stack on a Kubernetes cluster, enabling scalable and efficient deployment in cloud or on-premises environments. Various Kubernetes configurations are available for deployment flexibility.\n", + "\n", + "**Recommended For:**\n", + "- Production-grade deployments requiring scalability and fault tolerance.\n", + "- Cloud-native environments where Kubernetes is the preferred orchestration tool.\n", + "\n", + " **[a. Local k3d Cluster Deployment](./03-deploy-k8s-k3d.ipynb)**\n", + " - Quick setup for local development and testing using a lightweight Kubernetes cluster.\n", + "\n", + " **[b. Azure Deployment](./04-deploy-k8s-azure.ipynb)**\n", + " - Deployment on Microsoft Azure cloud infrastructure for scalable and reliable operation.\n", + "\n", + " **[c. GCP Deployment](./05-deploy-k8s-gcp.ipynb)**\n", + " - Deployment on Google Cloud Platform for seamless integration with GCP services.\n", + "\n", + " **[d. AWS Deployment](./06-deploy-k8s-aws.ipynb)**\n", + " - Deployment on Amazon Web Services for robust and flexible cloud-based deployment." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Devspace Deployment\n", + "\n", + "This deployment option utilizes Devspace to streamline the development process for PySyft. It provides features such as local image building, port-forwarding, volume mounting, hot-reloading, and debugging to enhance the development experience.\n", + "\n", + "**Recommended For:**\n", + "- Developers contributing to PySyft codebase.\n", + "- Simplifying local development setup and debugging processes.\n", + "\n", + "Follow [07-deploy-devspace.ipynb](./07-deploy-devspace.ipynb) for deployment instructions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Choosing the Right Deployment Option\n", + "\n", + "Selecting the appropriate deployment option depends on factors such as development objectives, resource constraints, scalability requirements, and familiarity with the deployment environment. For quick experimentation and local development, the local Python deployment or single container deployment may suffice. However, for production-grade deployments requiring scalability and reliability, Kubernetes deployment is recommended. Developers actively contributing to PySyft can benefit from the Devspace deployment option for a streamlined development experience." + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/01-deploy-python.ipynb b/notebooks/tutorials/deployments/01-deploy-python.ipynb new file mode 100644 index 00000000000..0dedeb13ea5 --- /dev/null +++ b/notebooks/tutorials/deployments/01-deploy-python.ipynb @@ -0,0 +1,191 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Local in-memory python deployment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n", + "\n", + "One of the quickest way to try out PySyft is to install the pre-built python package on your local environment using pip. The python package is lightweight and runs the PySyft stack in-memory.\n", + "\n", + "**Recommended For:**\n", + "- Development and testing on resource-constrained systems without Docker support.\n", + "- Rapid experimentation with PySyft APIs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prerequisites\n", + "Before we begin, ensure you have the following prerequisites installed on your system:\n", + "1. Python (3.10 - 3.12)\n", + "2. pip (or uv)\n", + "3. venv (optional, but recommended)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Deployment Steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installing Syft\n", + "1. Create and activate a python virtual environment (Optional, but recommended)\n", + " ```bash\n", + " python -m venv venv/\n", + " source venv/bin/activate\n", + " ```\n", + "\n", + "2. Install PySyft\n", + " ```bash\n", + " pip install syft\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Working with Python Datasite\n", + "\n", + "`PySyft` makes it very easy to develop against a datasite in a notebook by providing the `sy.orchestra` interface. It allows you to start a datasite with a webserver in a notebook in the background, which is a lightweight version of a Datasite that would be used in production. You can specify options such as what kind of database you are using, whether you want to use networking and how many processes you want to use. You can launch a Datasite by simply executing:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server = sy.orchestra.launch(\n", + " name=\"dev-mode-example-datasite-1\", port=8020, reset=True, dev_mode=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we don't need a webserver (for development this is true in many cases), we can omit the port and instead use\n", + "\n", + "```python\n", + "server = sy.orchestra.launch(name=\"dev-mode-example-datasite-1\", dev_mode=True, reset=True)\n", + "```\n", + "\n", + "One of the benefits of not using a port is that you can use a debugger and set breakpoints within api calls. This makes debugging way faster in many cases.\n", + "\n", + "Now, we are ready to start using the datasite. The datasite comes with test login credentials for the admin." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once you are logged in, you are ready to start using the datasite, for instance for creating a dataset (this one is empty, just as a example)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset = sy.Dataset(name=\"my dataset\", asset_list=[])\n", + "client.upload_dataset(dataset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Lastly to stop or terminate your Datasite, we can execute the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "Congratulations! You have successfully deployed a local in-memory PySyft stack using python. Now, you can explore its capabilities and use cases through our API example notebooks:\n", + "\n", + "📝 [API Example Notebooks](../../api)\n", + "- [00-load-data.ipynb](../../api/0.8/00-load-data.ipynb)\n", + "- [01-submit-code.ipynb](../../api/0.8/01-submit-code.ipynb)\n", + "- [02-review-code-and-approve.ipynb](../../api/0.8/02-review-code-and-approve.ipynb)\n", + "- [03-data-scientist-download-result.ipynb](../../api/0.8/03-data-scientist-download-result.ipynb)\n", + "- [04-pytorch-example.ipynb](../../api/0.8/04-pytorch-example.ipynb)\n", + "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", + "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", + "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", + "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", + "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", + "- [11-container-images-k8s.ipynb](../../api/0.8/11-container-images-k8s.ipynb)\n", + "\n", + "Feel free to explore these notebooks to get started with PySyft and unlock its full potential for privacy-preserving machine learning!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "PySyft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/02-deploy-container.ipynb b/notebooks/tutorials/deployments/02-deploy-container.ipynb new file mode 100644 index 00000000000..6c8b5bbfdb8 --- /dev/null +++ b/notebooks/tutorials/deployments/02-deploy-container.ipynb @@ -0,0 +1,171 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Single container deployment" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Introduction\n", + "\n", + "In this deployment, PySyft is encapsulated within a single Docker container, providing better isolation and portability compared to the local Python deployment.\n", + "\n", + "**Recommended For:**\n", + "- Resource-constrained systems with Docker support.\n", + "- Standardizing PySyft deployment across different environments." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prerequisites\n", + "Before we begin, ensure you have [Docker](https://docs.docker.com/install/) installed on your system." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deployment steps\n", + "\n", + "You can execute the below command in your terminal to run the PySyft stack within a single docker container on port `8080`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Set Your Preferred Syft Version\n", + "\n", + "```sh\n", + "SYFT_VERSION=\"\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "``` bash\n", + "docker run -it \\\n", + " -e SERVER_NAME=syft-example-datasite-1 \\\n", + " -e SERVER_TYPE=datasite \\\n", + " -e N_CONSUMERS=1 \\\n", + " -e SINGLE_CONTAINER_MODE=true \\\n", + " -e CREATE_PRODUCER=true \\\n", + " -e INMEMORY_WORKERS=true \\\n", + " -p 8080:80 --add-host=host.docker.internal:host-gateway \\\n", + " --name syft-example-datasite-1 openmined/syft-backend:$SYFT_VERSION\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Working with the single container deployment\n", + "\n", + "PySyft makes it very simple to connect to any existing Syft cluster by providing the `sy.orchestra` interface. You can connect to the datasite by executing these steps in your jupyter notebook:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python3\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "server = sy.orchestra.launch(name=\"syft-example-datasite-1\", deploy_to=\"remote\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will return a server handle by connecting to `http://localhost:8080` which is the default host and port where your docker container will be running. You can connect to a different host and port by setting the environment variables `SERVER_URL` and `SERVER_PORT`.\n", + "```python\n", + "import os\n", + "\n", + "os.environ[\"SERVER_URL\"] = \"\"\n", + "os.environ[\"SERVER_PORT\"] = \"\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we are ready to start using the datasite. The datasite comes with default login credentials for the admin." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python3\n", + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once you are logged in, you are ready to start using the datasite, for instance for creating a dataset (this one is empty, just as a example)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python3\n", + "dataset = sy.Dataset(name=\"my dataset\", asset_list=[])\n", + "client.upload_dataset(dataset)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "Congratulations! You have successfully deployed PySyft on your local Kubernetes cluster. Now, you can explore its capabilities and use cases through our API example notebooks:\n", + "\n", + "📝 [API Example Notebooks](../../api)\n", + "- [00-load-data.ipynb](../../api/0.8/00-load-data.ipynb)\n", + "- [01-submit-code.ipynb](../../api/0.8/01-submit-code.ipynb)\n", + "- [02-review-code-and-approve.ipynb](../../api/0.8/02-review-code-and-approve.ipynb)\n", + "- [03-data-scientist-download-result.ipynb](../../api/0.8/03-data-scientist-download-result.ipynb)\n", + "- [04-pytorch-example.ipynb](../../api/0.8/04-pytorch-example.ipynb)\n", + "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", + "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", + "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", + "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", + "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", + "- [11-container-images-k8s.ipynb](../../api/0.8/11-container-images-k8s.ipynb)\n", + "\n", + "Feel free to explore these notebooks to get started with PySyft and unlock its full potential for privacy-preserving machine learning!" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb b/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb new file mode 100644 index 00000000000..a92f7987e68 --- /dev/null +++ b/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb @@ -0,0 +1,231 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Deploying PySyft on a Local Kubernetes Cluster" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Introduction\n", + "Welcome to our quick start guide for deploying PySyft on a local Kubernetes cluster! PySyft is a powerful framework for privacy-preserving machine learning, and deploying it on Kubernetes allows an easy way to quickly try out the full PySyft stack on your own system. This guide will walk you through the process step by step." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Prerequisites\n", + "Before we begin, ensure you have the following prerequisites installed on your system:\n", + "1. [Docker](https://docs.docker.com/install/): Docker is required to create and manage containers.\n", + "2. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl): kubectl is the command-line tool for interacting with Kubernetes clusters.\n", + "3. [k3d](https://k3d.io/v5.6.3/#installation): k3d is used to create local Kubernetes clusters.\n", + "4. [Helm](https://helm.sh/docs/intro/install/): Helm is the package manager for Kubernetes, used to install and manage applications on Kubernetes clusters." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deployment Steps\n", + "\n", + "### 1. Create a Local Kubernetes Cluster\n", + "First, create a local Kubernetes cluster named \"syft\" using k3d:\n", + "```sh\n", + "k3d cluster create syft -p \"8080:80@loadbalancer\"\n", + "```\n", + "\n", + "### 2. Add and Update Helm Repo for Syft\n", + "Add the Helm repository for PySyft and update it:\n", + "```sh\n", + "helm repo add openmined https://openmined.github.io/PySyft/helm\n", + "helm repo update openmined\n", + "```\n", + "\n", + "### 3. Search for Available Syft Versions\n", + "Explore available versions of PySyft using Helm:\n", + "```sh\n", + "helm search repo openmined/syft --versions --devel\n", + "```\n", + "\n", + "### 4. Set Your Preferred Syft Chart Version\n", + "Set the version of PySyft you want to install:\n", + "```sh\n", + "SYFT_VERSION=\"\"\n", + "```\n", + "\n", + "### 5. Provision Helm Charts\n", + "Install PySyft on the Kubernetes cluster with your preferred version:\n", + "```sh\n", + "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\"\n", + "```\n", + "\n", + "
\n", + "💡 Tip 1:\n", + "\n", + "If you want to deploy your Kubernetes cluster in a resource-constrained environment, use the following flags to override the default configurations. Please note that you will need at least 1 CPU and 2 GB of RAM on Docker, and some tests may not work in such low-resource environments:\n", + "\n", + "```sh\n", + "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\" --set server.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set postgres.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null\n", + "```\n", + "\n", + "
\n", + "\n", + "
\n", + "💡 Tip 2:\n", + "\n", + "If you would like to set your own default password even for the production style deployment, use the following command:\n", + "\n", + "```sh\n", + "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\" --set global.randomizedSecrets=false --set server.secret.defaultRootPassword=\"changethis\" --set seaweedfs.secret.s3RootPassword=\"admin\" --set postgres.secret.rootPassword=\"example\"\n", + "```\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Working with the local Kubernetes deployment\n", + "\n", + "PySyft makes it very simple to connect to your existing Syft cluster by providing the `sy.orchestra` interface. You can connect to the datasite by executing these steps in your jupyter notebook:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "server = sy.orchestra.launch(name=\"syft-example-datasite-1\", deploy_to=\"remote\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will return a server handle by connecting to `http://localhost:8080` which is the default host and port where your kubernetes cluster will be running. You can connect to a different host and port by setting the environment variables `SERVER_URL` and `SERVER_PORT`.\n", + "\n", + "```python\n", + "import os\n", + "\n", + "os.environ[\"SERVER_URL\"] = \"\"\n", + "os.environ[\"SERVER_PORT\"] = \"\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we are ready to start using the datasite. Since helm is a product grade deployement stack, the datasite comes with a randomized password for the default email credentials for the admin. Either run with Step 5 with your custom password or to extract the randomized password using `kubectl`, run the following command (in case you use a custom cluster name in step 1, replace `--context=k3d-$CLUSTER_NAME` appropriately): \n", + "\n", + "```sh\n", + "kubectl --context=k3d-syft get secret backend-secret -n syft -o jsonpath='{.data.defaultRootPassword}' | base64 --decode\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and use the password instead of \"changethis\" below:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python\n", + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once you are logged in, you are ready to start using the datasite, for instance for creating a dataset (this one is empty, just as a example)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python3\n", + "dataset = sy.Dataset(name=\"my dataset\", asset_list=[])\n", + "client.upload_dataset(dataset)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "Congratulations! You have successfully deployed PySyft on your local Kubernetes cluster. Now, you can explore its capabilities and use cases through our API example notebooks:\n", + "\n", + "📝 [API Example Notebooks](../../api)\n", + "- [00-load-data.ipynb](../../api/0.8/00-load-data.ipynb)\n", + "- [01-submit-code.ipynb](../../api/0.8/01-submit-code.ipynb)\n", + "- [02-review-code-and-approve.ipynb](../../api/0.8/02-review-code-and-approve.ipynb)\n", + "- [03-data-scientist-download-result.ipynb](../../api/0.8/03-data-scientist-download-result.ipynb)\n", + "- [04-pytorch-example.ipynb](../../api/0.8/04-pytorch-example.ipynb)\n", + "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", + "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", + "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", + "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", + "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", + "- [11-container-images-k8s.ipynb](../../api/0.8/11-container-images-k8s.ipynb)\n", + "\n", + "Feel free to explore these notebooks to get started with PySyft and unlock its full potential for privacy-preserving machine learning!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "PySyft", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/04-deploy-k8s-azure.ipynb b/notebooks/tutorials/deployments/04-deploy-k8s-azure.ipynb new file mode 100644 index 00000000000..71c158afddd --- /dev/null +++ b/notebooks/tutorials/deployments/04-deploy-k8s-azure.ipynb @@ -0,0 +1,18 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODO" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/05-deploy-k8s-gcp.ipynb b/notebooks/tutorials/deployments/05-deploy-k8s-gcp.ipynb new file mode 100644 index 00000000000..71c158afddd --- /dev/null +++ b/notebooks/tutorials/deployments/05-deploy-k8s-gcp.ipynb @@ -0,0 +1,18 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODO" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/06-deploy-k8s-aws.ipynb b/notebooks/tutorials/deployments/06-deploy-k8s-aws.ipynb new file mode 100644 index 00000000000..71c158afddd --- /dev/null +++ b/notebooks/tutorials/deployments/06-deploy-k8s-aws.ipynb @@ -0,0 +1,18 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODO" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/deployments/07-deploy-devspace.ipynb b/notebooks/tutorials/deployments/07-deploy-devspace.ipynb new file mode 100644 index 00000000000..1033814036b --- /dev/null +++ b/notebooks/tutorials/deployments/07-deploy-devspace.ipynb @@ -0,0 +1,137 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Developing over PySyft\n", + "This guide is meant for developers trying to deploy PySyft locally on a Kubernetes cluster. PySyft is a powerful framework for privacy-preserving data analysis and machine learning, and deploying it on Kubernetes allows an easy way to quickly try out the full PySyft stack on your own system. This guide will walk you through the process step by step. \n", + "\n", + "Follow [00-deployment-types.ipynb](./00-deployment-types.ipynb) for other deployment options and an overview of different deployment types." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Prerequisites\n", + "Before we begin, ensure you have the following prerequisites installed on your system. You may need a package management systems like [brew](https://brew.sh/) for installation of dependencies and packages. \n", + "* [devspace](https://www.devspace.sh/docs/getting-started/installation?x0=3): DevSpace lets you automate all build and deployment steps and enables interactive modification.\n", + "* [Docker](https://docs.docker.com/install/): Docker is required to create and manage containers.\n", + "* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl): kubectl is the command-line tool for interacting with Kubernetes clusters.\n", + "* [k3d](https://k3d.io/v5.6.3/#installation): k3d is used to create local Kubernetes clusters.\n", + "* [tox](https://pypi.org/project/tox/): We use tox as a command executor. \n", + "* [uv](): This allows for faster tox environment builds and speeds up dev time. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Dev Environment set-up" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "In order to perform an isolated setup, create a virtual environment. Below are some instructions to get started (example `environment.yml` and `requirements.txt` file are [provided below](#dependency-files)):\n", + "| Conda | Venv |\n", + "|:---|:---|\n", + "| 1. Create `environment.yml` with name `syft-dev`
2. Run `conda env create -f environment.yml`
3. Run `conda activate syft-dev` | 1. Create a `requirements.txt` file
2. Run `python -m venv syft-dev`
3. Run `.\\syft-dev\\Scripts\\activate` for Windows or
`source syft-dev/bin/activate` for Mac/Linux
4. `pip install -r requirements.txt`
5. `pip install -e \"packages/syft[ dev ]\"` |\n", + "\n", + "If you would like to do a non-isolated install then install the dependencies using [brew](https://brew.sh/) as follows: \n", + "* `brew install helm devspace k9s k3d`\n", + "* `pip install -e 'packages/syft[dev]'`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "source": [ + "## Running the k8s cluster\n", + "\n", + "We have a number of tox commands that help with the setup of the local k8s cluster.\n", + "\n", + "* For starting the cluster & registry (run this once initially or after every hard reset)\n", + " - `tox -e dev.k8s.start` to start a local registry + cluster and patch hosts file\n", + "* For deploying syft (run this each time you update the code or helm charts)\n", + " - `tox -e dev.k8s.deploy` to deploy syft on the cluster\n", + " - `tox -e dev.k8s.hotreload` to deploy syft on the cluster with hot reloading and port forwards (run once and keep editing that code)\n", + "* For cleaning up (run as required)\n", + " - `tox -e dev.k8s.cleanup` only removes the syft deployment but keep the cluster running (soft reset, re-run using `dev.k8s.deploy`)\n", + " - `tox -e dev.k8s.destroy` deletes the syft deployment, cluster, but keeps the registry running (mid reset, re-run using `dev.k8s.start`)\n", + " - `tox -e dev.k8s.destroyall` deletes the syft deployment, cluster and registry (hard reset, run re-run using `dev.k8s.start`)\n", + "\n", + "You can use the command `k9s` or `lazydocker` (will require installing lazydocker) to view your cluster.\n", + "\n", + "\n", + "#### Launching Different Server Types\n", + "Alternatively, you can also use the following commands to launch different server types:\n", + "* Launching a Datasite: `CLUSTER_NAME=testdatasite1 CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.datasite`\n", + "* Launching a Gateway: `CLUSTER_NAME=testgateway1 CLUSTER_HTTP_PORT=9081 tox -e dev.k8s.launch.gateway`\n", + "* Launching a Enclave: `CLUSTER_NAME=testenclave1 CLUSTER_HTTP_PORT=9083 tox -e dev.k8s.launch.enclave`\n", + "* Launching Servers with `hotreload` using tox posargs: `tox -e dev.k8s.launch.datasite -- hotreload`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "Congratulations! You have successfully deployed PySyft on your local Kubernetes cluster. Now, you can explore its capabilities and use cases through our API example notebooks:\n", + "\n", + "📝 [API Example Notebooks](../../api)\n", + "- [00-load-data.ipynb](../../api/0.8/00-load-data.ipynb)\n", + "- [01-submit-code.ipynb](../../api/0.8/01-submit-code.ipynb)\n", + "- [02-review-code-and-approve.ipynb](../../api/0.8/02-review-code-and-approve.ipynb)\n", + "- [03-data-scientist-download-result.ipynb](../../api/0.8/03-data-scientist-download-result.ipynb)\n", + "- [04-jax-example.ipynb](../../api/0.8/04-jax-example.ipynb)\n", + "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", + "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", + "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", + "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", + "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", + "- [11-container-images-k8s.ipynb](../../api/0.8/11-container-images-k8s.ipynb)\n", + "\n", + "Feel free to explore these notebooks to get started with PySyft and unlock its full potential for privacy-preserving machine learning!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dependency files\n", + "Example `environment.yml` and `requirement.txt` files:\n", + "\n", + "| Conda | Venv |\n", + "|:---|:---|\n", + "| `environment.yml`
name: syft-dev
channels:
- conda-forge
dependencies:
- python=3.12
- jupyter
- ipython
- ipykernel
- pandas
- pre-commit
- black
- mypy
- flake8
- ruff
- isort
- pip
- pip:
- pipdeptree
- johnnydep
- tox
- tox-uv
- uvloop
- \"-e packages/syft[dev]\"
 | `requirements.txt` 
 # Standard dependencies
jupyter
ipython
ipykernel
pandas
pre-commit
black
mypy
flake8
ruff
isort

# Pip dependencies
pipdeptree
johnnydep
tox
tox-uv
uvloop
 |\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/tutorials/enclaves/Enclave-single-notebook-DO-DS.ipynb b/notebooks/tutorials/enclaves/Enclave-single-notebook-DO-DS.ipynb
deleted file mode 100644
index 6c7a85ab0a5..00000000000
--- a/notebooks/tutorials/enclaves/Enclave-single-notebook-DO-DS.ipynb
+++ /dev/null
@@ -1,734 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# third party\n",
-    "from recordlinkage.datasets import load_febrl4\n",
-    "\n",
-    "# syft absolute\n",
-    "import syft as sy"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "1",
-   "metadata": {},
-   "source": [
-    "# Create Nodes and connect to gateway"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "2",
-   "metadata": {},
-   "source": [
-    "create enclave node"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "3",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Local Python Node\n",
-    "enclave_node = sy.orchestra.launch(\n",
-    "    name=\"Enclave\",\n",
-    "    node_type=sy.NodeType.ENCLAVE,\n",
-    "    local_db=True,\n",
-    "    dev_mode=True,\n",
-    "    reset=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "4",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# syft absolute\n",
-    "from syft.abstract_node import NodeType\n",
-    "\n",
-    "assert enclave_node.python_node.node_type == NodeType.ENCLAVE"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "5",
-   "metadata": {},
-   "source": [
-    "Create canada node & italy node"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "6",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ca_node = sy.orchestra.launch(name=\"Canada\", local_db=True, reset=True, dev_mode=True)\n",
-    "it_node = sy.orchestra.launch(name=\"Italy\", local_db=True, reset=True, dev_mode=True)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "7",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert ca_node.python_node.node_type == NodeType.DOMAIN\n",
-    "assert it_node.python_node.node_type == NodeType.DOMAIN"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "8",
-   "metadata": {},
-   "source": [
-    "Create gateway Node"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "9",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "gateway_node = sy.orchestra.launch(\n",
-    "    name=\"gateway\",\n",
-    "    node_type=sy.NodeType.GATEWAY,\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    dev_mode=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "10",
-   "metadata": {},
-   "source": [
-    "Connect nodes to gateway"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "11",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "enclave_guest_client = enclave_node.client\n",
-    "ca_guest_client = ca_node.client\n",
-    "it_guest_client = it_node.client"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "12",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# syft absolute\n",
-    "from syft.client.domain_client import DomainClient\n",
-    "from syft.client.enclave_client import EnclaveClient\n",
-    "from syft.client.gateway_client import GatewayClient\n",
-    "\n",
-    "assert isinstance(enclave_guest_client, EnclaveClient)\n",
-    "assert isinstance(ca_guest_client, DomainClient)\n",
-    "assert isinstance(it_guest_client, DomainClient)\n",
-    "assert isinstance(gateway_node.client, GatewayClient)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "13",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# syft absolute\n",
-    "# Connect enclave to gateway\n",
-    "from syft.service.response import SyftSuccess\n",
-    "\n",
-    "res = enclave_guest_client.connect_to_gateway(handle=gateway_node)\n",
-    "assert isinstance(res, SyftSuccess)\n",
-    "res"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "14",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Connect Canada to gateway\n",
-    "res = ca_guest_client.connect_to_gateway(handle=gateway_node)\n",
-    "assert isinstance(res, SyftSuccess)\n",
-    "res"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "15",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Connect Italy to gateway\n",
-    "res = it_guest_client.connect_to_gateway(handle=gateway_node)\n",
-    "assert isinstance(res, SyftSuccess)\n",
-    "res"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "16",
-   "metadata": {},
-   "source": [
-    "# DOs"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "17",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "do_ca_client = ca_node.login(email=\"info@openmined.org\", password=\"changethis\")\n",
-    "do_it_client = it_node.login(email=\"info@openmined.org\", password=\"changethis\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "18",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# syft absolute\n",
-    "from syft.client.domain_client import DomainClient\n",
-    "\n",
-    "assert isinstance(do_ca_client, DomainClient)\n",
-    "assert isinstance(do_it_client, DomainClient)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "19",
-   "metadata": {},
-   "source": [
-    "## Upload dataset"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "20",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Using public datasets from  Freely Extensible Biomedical Record Linkage (Febrl)\n",
-    "canada_census_data, italy_census_data = load_febrl4()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "21",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for dataset, client, country in zip(\n",
-    "    [canada_census_data, italy_census_data],\n",
-    "    [do_ca_client, do_it_client],\n",
-    "    [\"Canada\", \"Italy\"],\n",
-    "):\n",
-    "    private_data, mock_data = dataset[:2500], dataset[2500:]\n",
-    "    dataset = sy.Dataset(\n",
-    "        name=f\"{country} - FEBrl Census Data\",\n",
-    "        description=\"abc\",\n",
-    "        asset_list=[\n",
-    "            sy.Asset(\n",
-    "                name=\"census_data\",\n",
-    "                mock=mock_data,\n",
-    "                data=private_data,\n",
-    "                shape=private_data.shape,\n",
-    "                mock_is_real=True,\n",
-    "            )\n",
-    "        ],\n",
-    "    )\n",
-    "    client.upload_dataset(dataset)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "22",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert len(do_ca_client.datasets.get_all()) == 1\n",
-    "assert len(do_it_client.datasets.get_all()) == 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "23",
-   "metadata": {},
-   "source": [
-    "## create accounts for DS"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "24",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for client in [do_ca_client, do_it_client]:\n",
-    "    res = client.register(\n",
-    "        name=\"Sheldon\",\n",
-    "        email=\"sheldon@caltech.edu\",\n",
-    "        password=\"changethis\",\n",
-    "        password_verify=\"changethis\",\n",
-    "    )\n",
-    "    assert isinstance(res, SyftSuccess)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "25",
-   "metadata": {},
-   "source": [
-    "# DS"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "26",
-   "metadata": {},
-   "source": [
-    "## Login into gateway as guest"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "27",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ds_gateway_client = gateway_node.client"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "28",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Explore the domains and enclaves connected to the gateway\n",
-    "ds_gateway_client.domains"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "29",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Log into canada as proxy_client\n",
-    "ds_ca_proxy_client = ds_gateway_client.domains[0]\n",
-    "ds_ca_proxy_client = ds_ca_proxy_client.login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")\n",
-    "assert ds_ca_proxy_client.name == \"Canada\"\n",
-    "assert ds_ca_proxy_client.connection.proxy_target_uid == do_ca_client.id\n",
-    "assert isinstance(ds_ca_proxy_client, DomainClient)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "30",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Log into italy as proxy_client\n",
-    "ds_it_proxy_client = ds_gateway_client.domains[1]\n",
-    "ds_it_proxy_client = ds_it_proxy_client.login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")\n",
-    "assert ds_it_proxy_client.name == \"Italy\"\n",
-    "assert ds_it_proxy_client.connection.proxy_target_uid == do_it_client.id\n",
-    "assert isinstance(ds_it_proxy_client, DomainClient)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "31",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Create an account and log into enclave as proxy client\n",
-    "ds_enclave_proxy_client = ds_gateway_client.enclaves[0]\n",
-    "ds_enclave_proxy_client = ds_enclave_proxy_client.login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\", name=\"Sheldon\", register=True\n",
-    ")\n",
-    "assert ds_enclave_proxy_client.name == \"Enclave\"\n",
-    "assert ds_enclave_proxy_client.connection.proxy_target_uid == enclave_guest_client.id\n",
-    "assert isinstance(ds_enclave_proxy_client, EnclaveClient)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "32",
-   "metadata": {},
-   "source": [
-    "## Find datasets"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "33",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "canada_census_data = ds_ca_proxy_client.datasets[-1].assets[0]\n",
-    "italy_census_data = ds_it_proxy_client.datasets[-1].assets[0]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "34",
-   "metadata": {},
-   "source": [
-    "## Create Request"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "35",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "@sy.syft_function_single_use(\n",
-    "    canada_census_data=canada_census_data,\n",
-    "    italy_census_data=italy_census_data,\n",
-    "    share_results_with_owners=True,\n",
-    ")\n",
-    "def compute_census_matches(canada_census_data, italy_census_data):\n",
-    "    # third party\n",
-    "    import recordlinkage\n",
-    "\n",
-    "    # Index step\n",
-    "    indexer = recordlinkage.Index()\n",
-    "    indexer.block(\"given_name\")\n",
-    "\n",
-    "    candidate_links = indexer.index(canada_census_data, italy_census_data)\n",
-    "\n",
-    "    # Comparison step\n",
-    "    compare_cl = recordlinkage.Compare()\n",
-    "\n",
-    "    compare_cl.exact(\"given_name\", \"given_name\", label=\"given_name\")\n",
-    "    compare_cl.string(\n",
-    "        \"surname\", \"surname\", method=\"jarowinkler\", threshold=0.85, label=\"surname\"\n",
-    "    )\n",
-    "    compare_cl.exact(\"date_of_birth\", \"date_of_birth\", label=\"date_of_birth\")\n",
-    "    compare_cl.exact(\"suburb\", \"suburb\", label=\"suburb\")\n",
-    "    compare_cl.exact(\"state\", \"state\", label=\"state\")\n",
-    "    compare_cl.string(\"address_1\", \"address_1\", threshold=0.85, label=\"address_1\")\n",
-    "\n",
-    "    features = compare_cl.compute(\n",
-    "        candidate_links, canada_census_data, italy_census_data\n",
-    "    )\n",
-    "\n",
-    "    # Classification step\n",
-    "    matches = features[features.sum(axis=1) > 3]\n",
-    "\n",
-    "    return len(matches)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "36",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Check result of mock data execution\n",
-    "mock_result = compute_census_matches(\n",
-    "    canada_census_data=canada_census_data.mock,\n",
-    "    italy_census_data=italy_census_data.mock,\n",
-    ")\n",
-    "mock_result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "37",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "req = ds_enclave_proxy_client.request_code_execution(compute_census_matches)\n",
-    "req"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "38",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert isinstance(req, sy.service.request.request.Request)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "39",
-   "metadata": {},
-   "source": [
-    "# DOs"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "40",
-   "metadata": {},
-   "source": [
-    "## Approve"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "41",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for client in [do_ca_client, do_it_client]:\n",
-    "    res = client.requests[-1].approve()\n",
-    "    assert isinstance(res, SyftSuccess)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "42",
-   "metadata": {},
-   "source": [
-    "# DS"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "43",
-   "metadata": {},
-   "source": [
-    "##  Get result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "44",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "status = ds_enclave_proxy_client.code.get_all()[-1].status\n",
-    "status"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "45",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for st, _ in status.status_dict.values():\n",
-    "    assert st == sy.service.request.request.UserCodeStatus.APPROVED"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "46",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ds_enclave_proxy_client.code[-1].output_policy"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "47",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_pointer = ds_enclave_proxy_client.code.compute_census_matches(\n",
-    "    canada_census_data=canada_census_data, italy_census_data=italy_census_data\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "48",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_pointer"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "49",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_pointer.syft_action_data == 858"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "50",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "real_result = result_pointer.get()\n",
-    "real_result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "51",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert real_result == 813"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "52",
-   "metadata": {},
-   "source": [
-    "# DO"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "53",
-   "metadata": {},
-   "source": [
-    "## Can also get the result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "54",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "request = do_ca_client.requests[0]\n",
-    "request"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "55",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_ptr = request.get_results()\n",
-    "result_ptr"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "56",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert result_ptr.syft_action_data == 813"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "57",
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.16"
-  },
-  "toc": {
-   "base_numbering": 1,
-   "nav_menu": {},
-   "number_sections": true,
-   "sideBar": true,
-   "skip_h1_title": false,
-   "title_cell": "Table of Contents",
-   "title_sidebar": "Contents",
-   "toc_cell": false,
-   "toc_position": {
-    "height": "calc(100% - 180px)",
-    "left": "10px",
-    "top": "150px",
-    "width": "358.398px"
-   },
-   "toc_section_display": true,
-   "toc_window_display": true
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/notebooks/tutorials/enclaves/Enclave-single-notebook-high-low-network.ipynb b/notebooks/tutorials/enclaves/Enclave-single-notebook-high-low-network.ipynb
deleted file mode 100644
index 95df68875ba..00000000000
--- a/notebooks/tutorials/enclaves/Enclave-single-notebook-high-low-network.ipynb
+++ /dev/null
@@ -1,1086 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# third party\n",
-    "from recordlinkage.datasets import load_febrl4\n",
-    "\n",
-    "# syft absolute\n",
-    "import syft as sy"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "1",
-   "metadata": {},
-   "source": [
-    "# Create Nodes"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "2",
-   "metadata": {},
-   "source": [
-    "## Staging Low side"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "3",
-   "metadata": {},
-   "source": [
-    "create enclave node"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "4",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "embassador_node_low = sy.Orchestra.launch(\n",
-    "    name=\"ambassador node\",\n",
-    "    node_side_type=\"low\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "5",
-   "metadata": {},
-   "source": [
-    "Create canada node & italy node"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "6",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ca_node_low = sy.Orchestra.launch(\n",
-    "    name=\"canada-1\",\n",
-    "    node_side_type=\"low\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")\n",
-    "it_node_low = sy.Orchestra.launch(\n",
-    "    name=\"italy-1\",\n",
-    "    node_side_type=\"low\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "7",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "gateway_node_low = sy.orchestra.launch(\n",
-    "    name=\"gateway-1\",\n",
-    "    node_type=\"gateway\",\n",
-    "    node_side_type=\"low\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    dev_mode=True,\n",
-    "    #                                        enable_warnings=True\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "8",
-   "metadata": {},
-   "source": [
-    "## High side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "9",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "enclave_node_high = sy.orchestra.launch(\n",
-    "    name=\"enclave node\",\n",
-    "    node_type=\"enclave\",\n",
-    "    reset=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")\n",
-    "ca_node_high = sy.Orchestra.launch(\n",
-    "    name=\"canada-2\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")\n",
-    "it_node_high = sy.Orchestra.launch(\n",
-    "    name=\"italy-2\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "10",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "gateway_node_high = sy.orchestra.launch(\n",
-    "    name=\"gateway-2\",\n",
-    "    node_type=\"gateway\",\n",
-    "    local_db=True,\n",
-    "    reset=True,\n",
-    "    dev_mode=True,\n",
-    "    #     enable_warnings=True,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "11",
-   "metadata": {},
-   "source": [
-    "# DOs"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "12",
-   "metadata": {},
-   "source": [
-    "## Login"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "13",
-   "metadata": {},
-   "source": [
-    "### Staging Low side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "14",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "do_ca_client_low = ca_node_low.login(email=\"info@openmined.org\", password=\"changethis\")\n",
-    "do_it_client_low = it_node_low.login(email=\"info@openmined.org\", password=\"changethis\")\n",
-    "embassador_client_low = embassador_node_low.login(\n",
-    "    email=\"info@openmined.org\", password=\"changethis\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "15",
-   "metadata": {},
-   "source": [
-    "### Production High side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "16",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "do_ca_client_high = ca_node_high.login(\n",
-    "    email=\"info@openmined.org\", password=\"changethis\"\n",
-    ")\n",
-    "do_it_client_high = it_node_high.login(\n",
-    "    email=\"info@openmined.org\", password=\"changethis\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "17",
-   "metadata": {},
-   "source": [
-    "## Connect to network"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "18",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# TODO: add security layer here"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "19",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "enclave_client_high = enclave_node_high.client"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "20",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# gateway_root_client.register(name=\"\", email=\"info@openmined.org\", password=\"changethis\")\n",
-    "# gateway_root_client.register(name=\"\", email=\"info@openmined.org\", password=\"changethis\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "21",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "res = do_ca_client_low.connect_to_gateway(\n",
-    "    handle=gateway_node_low\n",
-    ")  # add credentials here"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "22",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "res = do_ca_client_low.connect_to_gateway(\n",
-    "    handle=gateway_node_low\n",
-    ")  # add credentials here\n",
-    "res = do_it_client_low.connect_to_gateway(\n",
-    "    handle=gateway_node_low\n",
-    ")  # add credentials here"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "23",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "res = do_ca_client_high.connect_to_gateway(handle=gateway_node_high)\n",
-    "res = do_it_client_high.connect_to_gateway(handle=gateway_node_high)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "24",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "## Also for ambassador"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "25",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# TODO: who is going to be responsible for connecting the enclave to the gateway\n",
-    "res = enclave_client_high.connect_to_gateway(handle=gateway_node_high)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "26",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "res = embassador_client_low.connect_to_gateway(\n",
-    "    handle=gateway_node_low\n",
-    ")  # add credentials here"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "27",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "## Upload dataset"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "28",
-   "metadata": {},
-   "source": [
-    "### Staging Low side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "29",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Using public datasets from  Freely Extensible Biomedical Record Linkage (Febrl)\n",
-    "canada_census_data_low, italy_census_data_low = load_febrl4()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "30",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for dataset, client, country in zip(\n",
-    "    [canada_census_data_low, italy_census_data_low],\n",
-    "    [do_ca_client_low, do_it_client_low],\n",
-    "    [\"Canada\", \"Italy\"],\n",
-    "):\n",
-    "    private_data, mock_data = dataset[:2500], dataset[2500:]\n",
-    "    dataset = sy.Dataset(\n",
-    "        name=f\"{country} - FEBrl Census Data\",\n",
-    "        description=\"abc\",\n",
-    "        asset_list=[\n",
-    "            sy.Asset(\n",
-    "                name=\"census_data\",\n",
-    "                mock=mock_data,\n",
-    "                data=private_data,\n",
-    "                shape=private_data.shape,\n",
-    "                mock_is_real=True,\n",
-    "            )\n",
-    "        ],\n",
-    "    )\n",
-    "    client.upload_dataset(dataset)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "31",
-   "metadata": {},
-   "source": [
-    "### Production High side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "32",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Using public datasets from  Freely Extensible Biomedical Record Linkage (Febrl)\n",
-    "canada_census_data_high, italy_census_data_high = load_febrl4()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "33",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for dataset, client, country in zip(\n",
-    "    [canada_census_data_high, italy_census_data_high],\n",
-    "    [do_ca_client_high, do_it_client_high],\n",
-    "    [\"Canada\", \"Italy\"],\n",
-    "):\n",
-    "    private_data, mock_data = dataset[:2500], dataset[2500:]\n",
-    "    dataset = sy.Dataset(\n",
-    "        name=f\"{country} - FEBrl Census Data\",\n",
-    "        description=\"abc\",\n",
-    "        asset_list=[\n",
-    "            sy.Asset(\n",
-    "                name=\"census_data\",\n",
-    "                mock=mock_data,\n",
-    "                data=private_data,\n",
-    "                shape=private_data.shape,\n",
-    "                mock_is_real=True,\n",
-    "            )\n",
-    "        ],\n",
-    "    )\n",
-    "    client.upload_dataset(dataset)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "34",
-   "metadata": {},
-   "source": [
-    "## create accounts for DS"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "35",
-   "metadata": {},
-   "source": [
-    "### Staging Low side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "36",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for client in [do_ca_client_low, do_it_client_low]:\n",
-    "    client.register(\n",
-    "        name=\"Sheldon\",\n",
-    "        email=\"sheldon@caltech.edu\",\n",
-    "        password=\"changethis\",\n",
-    "        password_verify=\"changethis\",\n",
-    "    )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "37",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "embassador_client_low.register(\n",
-    "    name=\"Sheldon\",\n",
-    "    email=\"sheldon@caltech.edu\",\n",
-    "    password=\"changethis\",\n",
-    "    password_verify=\"changethis\",\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "38",
-   "metadata": {},
-   "source": [
-    "## Create account for embassador"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "39",
-   "metadata": {},
-   "source": [
-    "### Production High Side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "40",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "for client in [do_ca_client_high, do_it_client_high]:\n",
-    "    client.register(\n",
-    "        name=\"Sheldon\",\n",
-    "        email=\"sheldon@caltech.edu\",\n",
-    "        password=\"changethis\",\n",
-    "        password_verify=\"changethis\",\n",
-    "    )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "41",
-   "metadata": {},
-   "source": [
-    "# DS Low Side"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "42",
-   "metadata": {},
-   "source": [
-    "## DS Get proxy clients"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "43",
-   "metadata": {},
-   "source": [
-    "### Staging Low side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "44",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ds_gateway_client_low = gateway_node_low.client"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "45",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert len(ds_gateway_client_low.domains) == 3\n",
-    "ds_gateway_client_low.domains"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "46",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ds_ca_proxy_client_low = ds_gateway_client_low.domains[1].login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")\n",
-    "ds_it_proxy_client_low = ds_gateway_client_low.domains[2].login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")\n",
-    "ds_amb_proxy_client_low = ds_gateway_client_low.domains[0].login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "47",
-   "metadata": {},
-   "source": [
-    "## Find datasets"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "48",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "canada_census_data = ds_ca_proxy_client_low.datasets[-1].assets[0]\n",
-    "italy_census_data = ds_it_proxy_client_low.datasets[-1].assets[0]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "49",
-   "metadata": {},
-   "source": [
-    "## Create Request"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "50",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "@sy.syft_function_single_use(\n",
-    "    canada_census_data=canada_census_data, italy_census_data=italy_census_data\n",
-    ")\n",
-    "def compute_census_matches(canada_census_data, italy_census_data):\n",
-    "    # third party\n",
-    "    import recordlinkage\n",
-    "\n",
-    "    # Index step\n",
-    "    indexer = recordlinkage.Index()\n",
-    "    indexer.block(\"given_name\")\n",
-    "\n",
-    "    candidate_links = indexer.index(canada_census_data, italy_census_data)\n",
-    "\n",
-    "    # Comparison step\n",
-    "    compare_cl = recordlinkage.Compare()\n",
-    "\n",
-    "    compare_cl.exact(\"given_name\", \"given_name\", label=\"given_name\")\n",
-    "    compare_cl.string(\n",
-    "        \"surname\", \"surname\", method=\"jarowinkler\", threshold=0.85, label=\"surname\"\n",
-    "    )\n",
-    "    compare_cl.exact(\"date_of_birth\", \"date_of_birth\", label=\"date_of_birth\")\n",
-    "    compare_cl.exact(\"suburb\", \"suburb\", label=\"suburb\")\n",
-    "    compare_cl.exact(\"state\", \"state\", label=\"state\")\n",
-    "    compare_cl.string(\"address_1\", \"address_1\", threshold=0.85, label=\"address_1\")\n",
-    "\n",
-    "    features = compare_cl.compute(\n",
-    "        candidate_links, canada_census_data, italy_census_data\n",
-    "    )\n",
-    "\n",
-    "    # Classification step\n",
-    "    matches = features[features.sum(axis=1) > 3]\n",
-    "\n",
-    "    return len(matches)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "51",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Checking result of mock data execution\n",
-    "mock_result = compute_census_matches(\n",
-    "    canada_census_data=canada_census_data.mock,\n",
-    "    italy_census_data=italy_census_data.mock,\n",
-    ")\n",
-    "mock_result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "52",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ds_amb_proxy_client_low.code.request_code_execution(compute_census_matches)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "53",
-   "metadata": {},
-   "source": [
-    "# Ambassador flow"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "54",
-   "metadata": {},
-   "source": [
-    "## Check Code Staging Low Side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "55",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "embassador_client_low.requests[0].code"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "56",
-   "metadata": {},
-   "source": [
-    "## Login to Production High Side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "57",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "amb_gateway_client_high = gateway_node_high.client"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "58",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert len(amb_gateway_client_high.domains) == 2\n",
-    "amb_gateway_client_high.domains"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "59",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "amb_ca_proxy_client_high = amb_gateway_client_high.domains[1].login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")\n",
-    "amb_it_proxy_client_high = amb_gateway_client_high.domains[0].login(\n",
-    "    email=\"sheldon@caltech.edu\", password=\"changethis\"\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "60",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "assert len(amb_gateway_client_high.enclaves) == 1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "61",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "amb_ca_proxy_client_high"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "62",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "amb_enclave_proxy_client_high = amb_gateway_client_high.enclaves[0].login(\n",
-    "    name=\"Sheldon\", email=\"sheldon@caltech.edu\", password=\"changethis\", register=True\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "63",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# # this also creates a guest client\n",
-    "# embassador_client_high = enclave_node_high.login(email=\"info@openmined.org\", password=\"changethis\",\n",
-    "#                                                  name=\"Signor Ambassador\", register=True)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "64",
-   "metadata": {},
-   "source": [
-    "## Find Datasets Production High side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "65",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "canada_census_data_high = amb_ca_proxy_client_high.datasets[-1].assets[0]\n",
-    "italy_census_data_high = amb_it_proxy_client_high.datasets[-1].assets[0]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "66",
-   "metadata": {},
-   "source": [
-    "Copy code from the request"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "67",
-   "metadata": {},
-   "source": [
-    "## Submit code Production High side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "68",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "@sy.syft_function_single_use(\n",
-    "    canada_census_data=canada_census_data_high, italy_census_data=italy_census_data_high\n",
-    ")\n",
-    "def compute_census_matches_high(canada_census_data, italy_census_data):\n",
-    "    # third party\n",
-    "    import recordlinkage\n",
-    "\n",
-    "    # Index step\n",
-    "    indexer = recordlinkage.Index()\n",
-    "    indexer.block(\"given_name\")\n",
-    "\n",
-    "    candidate_links = indexer.index(canada_census_data, italy_census_data)\n",
-    "\n",
-    "    # Comparison step\n",
-    "    compare_cl = recordlinkage.Compare()\n",
-    "\n",
-    "    compare_cl.exact(\"given_name\", \"given_name\", label=\"given_name\")\n",
-    "    compare_cl.string(\n",
-    "        \"surname\", \"surname\", method=\"jarowinkler\", threshold=0.85, label=\"surname\"\n",
-    "    )\n",
-    "    compare_cl.exact(\"date_of_birth\", \"date_of_birth\", label=\"date_of_birth\")\n",
-    "    compare_cl.exact(\"suburb\", \"suburb\", label=\"suburb\")\n",
-    "    compare_cl.exact(\"state\", \"state\", label=\"state\")\n",
-    "    compare_cl.string(\"address_1\", \"address_1\", threshold=0.85, label=\"address_1\")\n",
-    "\n",
-    "    features = compare_cl.compute(\n",
-    "        candidate_links, canada_census_data, italy_census_data\n",
-    "    )\n",
-    "\n",
-    "    # Classification step\n",
-    "    matches = features[features.sum(axis=1) > 3]\n",
-    "\n",
-    "    return len(matches)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "69",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Checking result of mock data execution\n",
-    "mock_result = compute_census_matches_high(\n",
-    "    canada_census_data=canada_census_data_high.mock,\n",
-    "    italy_census_data=italy_census_data_high.mock,\n",
-    ")\n",
-    "mock_result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "70",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# note that this is not embassador_client_high.**code**.request_code_execution\n",
-    "amb_enclave_proxy_client_high.request_code_execution(compute_census_matches_high)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "71",
-   "metadata": {},
-   "source": [
-    "## DOs Approve Production High Side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "72",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "do_ca_client_high.requests[0].approve()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "73",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "do_it_client_high.requests[0].approve()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "74",
-   "metadata": {},
-   "source": [
-    "## Embassdor gets result from Production High Side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "75",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "amb_enclave_proxy_client_high.code[-1].status"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "76",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_pointer = amb_enclave_proxy_client_high.code.compute_census_matches_high(\n",
-    "    canada_census_data=canada_census_data_high,\n",
-    "    italy_census_data=italy_census_data_high,\n",
-    ")\n",
-    "\n",
-    "result_pointer"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "77",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "real_result = result_pointer.get()\n",
-    "real_result"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "78",
-   "metadata": {},
-   "source": [
-    "## Ambassador Deposits Result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "79",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "embassador_client_low.requests[0].accept_by_depositing_result(real_result)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "80",
-   "metadata": {},
-   "source": [
-    "# DS"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "81",
-   "metadata": {},
-   "source": [
-    "##  Get result from Staging Low Side"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "82",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ds_amb_proxy_client_low.code[-1].status"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "83",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_pointer = ds_amb_proxy_client_low.code.compute_census_matches(\n",
-    "    canada_census_data=canada_census_data,\n",
-    "    italy_census_data=italy_census_data,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "84",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "result_pointer"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "85",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "real_result = result_pointer.get()\n",
-    "real_result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "86",
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.16"
-  },
-  "toc": {
-   "base_numbering": 1,
-   "nav_menu": {},
-   "number_sections": true,
-   "sideBar": true,
-   "skip_h1_title": false,
-   "title_cell": "Table of Contents",
-   "title_sidebar": "Contents",
-   "toc_cell": false,
-   "toc_position": {
-    "height": "calc(100% - 180px)",
-    "left": "10px",
-    "top": "150px",
-    "width": "358.391px"
-   },
-   "toc_section_display": true,
-   "toc_window_display": true
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/notebooks/tutorials/hello-syft/01-hello-syft.ipynb b/notebooks/tutorials/hello-syft/01-hello-syft.ipynb
index b7354b469b1..cc2da4bf58f 100644
--- a/notebooks/tutorials/hello-syft/01-hello-syft.ipynb
+++ b/notebooks/tutorials/hello-syft/01-hello-syft.ipynb
@@ -56,7 +56,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -83,7 +83,7 @@
    "source": [
     "## Launch a dummy server \n",
     "\n",
-    "In this tutorial, for the sake of demonstration, we will be using in-memory workers as dummy servers. For details of deploying a server on your own using `syft` and `hagrid`, please refer to the `quickstart` tutorials."
+    "In this tutorial, for the sake of demonstration, we will be using in-memory workers as dummy servers. For details of deploying a server on your own using `syft`."
    ]
   },
   {
@@ -93,9 +93,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"hello-syft-usa-server\", port=9000, reset=True)\n",
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")\n",
-    "root_domain_client.register(\n",
+    "server = sy.orchestra.launch(name=\"hello-syft-usa-server\", port=9000, reset=True)\n",
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n",
+    "root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"janedoe@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -104,7 +104,7 @@
     "    website=\"https://www.caltech.edu/\",\n",
     ")\n",
     "\n",
-    "ds_client = node.login(email=\"janedoe@caltech.edu\", password=\"abc123\")"
+    "ds_client = server.login(email=\"janedoe@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -114,7 +114,7 @@
    "source": [
     "## Data owner - Part 1\n",
     "\n",
-    "### Upload Data to Domain"
+    "### Upload Data to Datasite"
    ]
   },
   {
@@ -162,7 +162,7 @@
     "        )\n",
     "    ],\n",
     ")\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -319,7 +319,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "root_domain_client.requests"
+    "root_datasite_client.requests"
    ]
   },
   {
@@ -329,7 +329,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "request = root_domain_client.requests[0]"
+    "request = root_datasite_client.requests[0]"
    ]
   },
   {
@@ -412,7 +412,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "get_mean_age_user_function = usercode.unsafe_function"
+    "get_mean_age_user_function = usercode.run"
    ]
   },
   {
@@ -432,7 +432,7 @@
    "id": "39",
    "metadata": {},
    "source": [
-    "### Share the real result with the Data Scientist"
+    "### Approving the request"
    ]
   },
   {
@@ -442,9 +442,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
-    "print(result)\n",
-    "assert isinstance(result, sy.SyftSuccess)"
+    "result = request.approve()\n",
+    "assert isinstance(result, sy.SyftSuccess)\n",
+    "result"
    ]
   },
   {
@@ -454,7 +454,7 @@
    "source": [
     "## Data Scientist - Part 2\n",
     "\n",
-    "### Fetch Real Result"
+    "### Computing the Real Result"
    ]
   },
   {
@@ -519,26 +519,67 @@
    "id": "48",
    "metadata": {},
    "source": [
-    "Once you are done with this tutorial, you can safely shut down the servers as following,"
+    "## Final note: autocomplete"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "49",
+   "metadata": {},
+   "source": [
+    "Earlier in this tutorial, we used services defined on the client, such as `ds_client.code.request_code_execution`. To find out more about the available methods, like `.request_code_execution()`, and services, like `client.code` you can use autocomplete, simply type `ds_client.code.` or `ds_client.services.` for an example."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "49",
+   "id": "50",
    "metadata": {},
    "outputs": [],
    "source": [
-    "node.land()"
+    "# autocompletion, but programtic. To test it out, just type client.services. instead in a new cell\n",
+    "autocompleter = get_ipython().Completer\n",
+    "_, completions1 = autocompleter.complete(text=\"ds_client.code.\")\n",
+    "_, completions2 = autocompleter.complete(text=\"ds_client.services.\")\n",
+    "_, completions3 = autocompleter.complete(text=\"ds_client.api.services.\")\n",
+    "_, completions4 = autocompleter.complete(text=\"ds_client.api.\")"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "50",
+   "id": "51",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert all(\n",
+    "    [\n",
+    "        \"ds_client.code.get_all\" in completions1,\n",
+    "        \"ds_client.services.code\" in completions2,\n",
+    "        \"ds_client.api.services.code\" in completions3,\n",
+    "        \"ds_client.api.code\" in completions4,\n",
+    "        \"ds_client.api.parse_raw\" not in completions4,  # no pydantic completions on api\n",
+    "    ]\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "52",
+   "metadata": {},
+   "source": [
+    "Once you are done with this tutorial, you can safely shut down the servers as following,"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "53",
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "server.land()"
+   ]
   }
  ],
  "metadata": {
@@ -557,7 +598,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.4"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb b/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb
index f53c1374203..e97442b5dac 100644
--- a/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb
+++ b/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb
@@ -13,7 +13,7 @@
    "id": "1",
    "metadata": {},
    "source": [
-    "In this tutorial, we show how external parties can audit internal AI systems without accessing them — mitigating privacy, security, and IP costs and risks. **This tutorial uses syft 0.8.2.b0, with a domain setup that does not use networking, to run the tutorial with networking read more in section 1.1.1**\n",
+    "In this tutorial, we show how external parties can audit internal AI systems without accessing them — mitigating privacy, security, and IP costs and risks. **This tutorial uses syft 0.8.2.b0, with a datasite setup that does not use networking, to run the tutorial with networking read more in section 1.1.1**\n",
     "\n",
     "You can read more about this tutorial and the follow up tutorials here on the [blog post](https://blog.openmined.org/)."
    ]
@@ -44,7 +44,7 @@
    "outputs": [],
    "source": [
     "# install syft\n",
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "%pip install {package_string} -q"
    ]
@@ -70,7 +70,7 @@
    "id": "6",
    "metadata": {},
    "source": [
-    "### Launch PySyft domain server"
+    "### Launch PySyft datasite server"
    ]
   },
   {
@@ -78,7 +78,7 @@
    "id": "7",
    "metadata": {},
    "source": [
-    "To start we launch a `PySyft` domain server. This is the backend that stores the private data."
+    "To start we launch a `PySyft` datasite server. This is the backend that stores the private data."
    ]
   },
   {
@@ -90,7 +90,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"syft-domain\", reset=True)"
+    "server = sy.orchestra.launch(name=\"syft-datasite\", reset=True)"
    ]
   },
   {
@@ -98,19 +98,19 @@
    "id": "9",
    "metadata": {},
    "source": [
-    "There are 3 ways to launch a `PySyft` domain\n",
+    "There are 3 ways to launch a `PySyft` datasite\n",
     "\n",
     "**A) From a notebook, with simulated networking \\*\\*THIS NOTEBOOK\\*\\***\n",
     "  - Apart from the network calls, this uses exactly the same code as other setups\n",
-    "  - run orchestra **without a port**: `sy.orchestra.launch(name=\"syft-domain\")`\n",
+    "  - run orchestra **without a port**: `sy.orchestra.launch(name=\"syft-datasite\")`\n",
     "  \n",
     "**B) From a notebook with networking (also supports docker)**\n",
     "  - This spawns a separate process that starts a uvicorn webserver\n",
-    "  - run orchestra **with a port**:`sy.orchestra.launch(name=\"syft-domain\", port=8080)`\n",
+    "  - run orchestra **with a port**:`sy.orchestra.launch(name=\"syft-datasite\", port=8080)`\n",
     "  \n",
     "**C) From the command line (supports docker/kubernetes)**\n",
     "  - setup for production\n",
-    "  - run `syft launch` or `hagrid launch` from the terminal\n",
+    "  - run `syft launch`  from the terminal\n",
     "  \n",
     "  \n",
     "We are using the **A)** here, as it is the only option available using google colab, switching to a real webserver is as easy as running this notebook in jupyter locally and adding a port. Read more about deployment on our [README.md](https://github.com/OpenMined/PySyft) and other setups for syft [here](https://github.com/OpenMined/PySyft/tree/dev/notebooks/tutorials/data-engineer)"
@@ -129,7 +129,7 @@
    "id": "11",
    "metadata": {},
    "source": [
-    "We can now login to our domain using the default admin credentials. In production we would change these."
+    "We can now login to our datasite using the default admin credentials. In production we would change these."
    ]
   },
   {
@@ -141,7 +141,7 @@
    },
    "outputs": [],
    "source": [
-    "mo_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "mo_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -149,7 +149,7 @@
    "id": "13",
    "metadata": {},
    "source": [
-    "### Configure node to allow user registration"
+    "### Configure server to allow user registration"
    ]
   },
   {
@@ -157,7 +157,7 @@
    "id": "14",
    "metadata": {},
    "source": [
-    "For this tutorial we allow other users to create their own account. New accounts will get limited permissions and will only be able to see the mock version of any datasets we upload to the domain."
+    "For this tutorial we allow other users to create their own account. New accounts will get limited permissions and will only be able to see the mock version of any datasets we upload to the datasite."
    ]
   },
   {
@@ -223,7 +223,7 @@
    "id": "21",
    "metadata": {},
    "source": [
-    "To upload our dataset to the domain we need to wrap it in a `Syft Dataset` object. We can add some metadata to the object."
+    "To upload our dataset to the datasite we need to wrap it in a `Syft Dataset` object. We can add some metadata to the object."
    ]
   },
   {
@@ -296,13 +296,13 @@
    },
    "outputs": [],
    "source": [
-    "auditor_client = node.register(\n",
+    "auditor_client = server.register(\n",
     "    name=\"Peter Jones\",\n",
     "    email=\"pjones@aisb.org\",\n",
     "    password=\"password1234\",\n",
     "    password_verify=\"password1234\",\n",
     ")\n",
-    "auditor_client = node.login(email=\"pjones@aisb.org\", password=\"password1234\")"
+    "auditor_client = server.login(email=\"pjones@aisb.org\", password=\"password1234\")"
    ]
   },
   {
@@ -481,7 +481,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "project = audit_project.start()\n",
+    "project = audit_project.send()\n",
     "project"
    ]
   },
@@ -572,7 +572,7 @@
    "id": "53",
    "metadata": {},
    "source": [
-    "Once the model owner feels confident that this code is not malicious, we can run the function on the real data."
+    "Once the model owner feels confident that this code is not malicious, we can run the function on the real data to inspect the result."
    ]
   },
   {
@@ -592,7 +592,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "real_result = request.code.unsafe_function(data=asset.data)\n",
+    "real_result = request.code.run(data=asset.data)\n",
     "real_result"
    ]
   },
@@ -601,7 +601,7 @@
    "id": "56",
    "metadata": {},
    "source": [
-    "This gives us a result which we can attach to the request"
+    "If everything looks good, we can approve the request"
    ]
   },
   {
@@ -611,7 +611,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "request.accept_by_depositing_result(real_result)"
+    "request.approve()"
    ]
   },
   {
@@ -619,7 +619,7 @@
    "id": "58",
    "metadata": {},
    "source": [
-    "## Auditor Receives Final Results"
+    "## Auditor computes Final Results"
    ]
   },
   {
@@ -661,14 +661,6 @@
     "  \n",
     ""
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "63",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -687,7 +679,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb b/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb
index 3bc8bc69c2e..ccc142c1fac 100644
--- a/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb
+++ b/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb
@@ -26,7 +26,7 @@
    "id": "1",
    "metadata": {},
    "source": [
-    "## 1. Launch the domain, upload the data"
+    "## 1. Launch the datasite, upload the data"
    ]
   },
   {
@@ -36,8 +36,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"mnist-domain\", dev_mode=True, reset=True)\n",
-    "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True, reset=True)\n",
+    "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -371,11 +371,6 @@
   }
  ],
  "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
@@ -386,7 +381,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.12.5"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb b/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb
index 4d245cd6f06..cdd6d19c35b 100644
--- a/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb
+++ b/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb
@@ -4,11 +4,12 @@
    "cell_type": "code",
    "execution_count": null,
    "id": "0",
-   "metadata": {},
+   "metadata": {
+    "metadata": {}
+   },
    "outputs": [],
    "source": [
     "# third party\n",
-    "import jax\n",
     "import matplotlib.pyplot as plt\n",
     "import numpy as np\n",
     "\n",
@@ -21,18 +22,20 @@
    "id": "1",
    "metadata": {},
    "source": [
-    "## 1. DS logins to the domain with the credentials created by the DO"
+    "## 1. DS logins to the datasite with the credentials created by the DO"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "id": "2",
-   "metadata": {},
+   "metadata": {
+    "metadata": {}
+   },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"mnist-domain\", dev_mode=True)\n",
-    "ds_client = node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")"
+    "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True)\n",
+    "ds_client = server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")"
    ]
   },
   {
@@ -40,14 +43,16 @@
    "id": "3",
    "metadata": {},
    "source": [
-    "### Inspect the datasets on the domain"
+    "### Inspect the datasets on the datasite"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "id": "4",
-   "metadata": {},
+   "metadata": {
+    "metadata": {}
+   },
    "outputs": [],
    "source": [
     "datasets = ds_client.datasets.get_all()\n",
@@ -104,7 +109,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "assert training_images.data is None"
+    "assert training_images.data is None\n",
+    "training_labels.data"
    ]
   },
   {
@@ -182,90 +188,82 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def mnist_3_linear_layers(mnist_images, mnist_labels):\n",
-    "    # import the packages\n",
-    "    # stdlib\n",
-    "    import itertools\n",
-    "    import time\n",
-    "\n",
+    "def mnist_3_linear_layers_torch(mnist_images, mnist_labels):\n",
     "    # third party\n",
-    "    from jax import grad\n",
-    "    from jax import jit\n",
-    "    from jax import random\n",
-    "    from jax.example_libraries import optimizers\n",
-    "    from jax.example_libraries import stax\n",
-    "    from jax.example_libraries.stax import Dense\n",
-    "    from jax.example_libraries.stax import LogSoftmax\n",
-    "    from jax.example_libraries.stax import Relu\n",
-    "    import jax.numpy as jnp\n",
-    "    import numpy.random as npr\n",
-    "\n",
-    "    # define the neural network\n",
-    "    init_random_params, predict = stax.serial(\n",
-    "        Dense(1024), Relu, Dense(1024), Relu, Dense(10), LogSoftmax\n",
-    "    )\n",
+    "    import torch\n",
+    "    import torch.nn as nn\n",
+    "    import torch.optim as optim\n",
+    "    from torch.utils.data import TensorDataset\n",
     "\n",
-    "    # initialize the random parameters\n",
-    "    rng = random.PRNGKey(0)\n",
-    "    _, init_params = init_random_params(rng, (-1, 784))\n",
-    "\n",
-    "    # the hyper parameters\n",
-    "    num_epochs = 10\n",
-    "    batch_size = 4\n",
-    "    num_train = mnist_images.shape[0]\n",
-    "    num_complete_batches, leftover = divmod(num_train, batch_size)\n",
-    "    num_batches = num_complete_batches + bool(leftover)\n",
-    "    step_size = 0.001\n",
-    "    momentum_mass = 0.9\n",
-    "\n",
-    "    # initialize the optimizer\n",
-    "    opt_init, opt_update, get_params = optimizers.momentum(\n",
-    "        step_size, mass=momentum_mass\n",
+    "    # Convert NumPy arrays to PyTorch tensors\n",
+    "    images_tensor = torch.tensor(mnist_images, dtype=torch.float32)\n",
+    "    labels_tensor = torch.tensor(mnist_labels, dtype=torch.float32)\n",
+    "    # Create a PyTorch dataset using TensorDataset\n",
+    "    custom_dataset = TensorDataset(images_tensor, labels_tensor)\n",
+    "    # Define the data loader\n",
+    "    train_loader = torch.utils.data.DataLoader(\n",
+    "        custom_dataset, batch_size=4, shuffle=True\n",
     "    )\n",
-    "    opt_state = opt_init(init_params)\n",
-    "    itercount = itertools.count()\n",
     "\n",
-    "    @jit\n",
-    "    def update(i, opt_state, batch):\n",
-    "        params = get_params(opt_state)\n",
-    "        return opt_update(i, grad(loss)(params, batch), opt_state)\n",
+    "    # Define the neural network class\n",
+    "    class MLP(nn.Module):\n",
+    "        def __init__(self):\n",
+    "            super().__init__()\n",
+    "            self.fc1 = nn.Linear(784, 1024)\n",
+    "            self.fc2 = nn.Linear(1024, 1024)\n",
+    "            self.fc3 = nn.Linear(1024, 10)\n",
     "\n",
-    "    def data_stream():\n",
-    "        \"\"\"\n",
-    "        Create a batch of data picked randomly\n",
-    "        \"\"\"\n",
-    "        rng = npr.RandomState(0)\n",
-    "        while True:\n",
-    "            perm = rng.permutation(num_train)\n",
-    "            for i in range(num_batches):\n",
-    "                batch_idx = perm[i * batch_size : (i + 1) * batch_size]\n",
-    "                yield mnist_images[batch_idx], mnist_labels[batch_idx]\n",
+    "        def forward(self, x):\n",
+    "            x = x.view(-1, 784)\n",
+    "            x = torch.relu(self.fc1(x))\n",
+    "            x = torch.relu(self.fc2(x))\n",
+    "            x = torch.log_softmax(self.fc3(x), dim=1)\n",
+    "            return x\n",
     "\n",
-    "    def loss(params, batch):\n",
-    "        inputs, targets = batch\n",
-    "        preds = predict(params, inputs)\n",
-    "        return -jnp.mean(jnp.sum(preds * targets, axis=1))\n",
+    "    # Define the model, optimizer, and loss function\n",
+    "    model = MLP()\n",
+    "    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n",
+    "    criterion = nn.CrossEntropyLoss()\n",
     "\n",
-    "    def accuracy(params, batch):\n",
-    "        inputs, targets = batch\n",
-    "        target_class = jnp.argmax(targets, axis=1)\n",
-    "        predicted_class = jnp.argmax(predict(params, inputs), axis=1)\n",
-    "        return jnp.mean(predicted_class == target_class)\n",
+    "    # Function to calculate accuracy\n",
+    "    def accuracy(model, data_loader):\n",
+    "        correct = 0\n",
+    "        total = 0\n",
+    "        with torch.no_grad():\n",
+    "            for data in data_loader:\n",
+    "                inputs, labels = data\n",
+    "                outputs = model(inputs)\n",
+    "                _, predicted = torch.max(outputs.data, 1)\n",
+    "                total += labels.size(0)\n",
+    "                correct += (predicted == torch.argmax(labels, dim=1)).sum().item()\n",
+    "        return correct / total\n",
     "\n",
-    "    batches = data_stream()\n",
+    "    # Train the model\n",
+    "    num_epochs = 20\n",
     "    train_accs = []\n",
-    "    print(\"\\nStarting training...\")\n",
     "    for epoch in range(num_epochs):\n",
-    "        start_time = time.time()\n",
-    "        for _ in range(num_batches):\n",
-    "            opt_state = update(next(itercount), opt_state, next(batches))\n",
-    "        epoch_time = time.time() - start_time\n",
-    "        params = get_params(opt_state)\n",
-    "        train_acc = accuracy(params, (mnist_images, mnist_labels))\n",
-    "        print(f\"Epoch {epoch} in {epoch_time:0.2f} sec\")\n",
-    "        print(f\"Training set accuracy {train_acc}\")\n",
-    "        train_accs.append(train_acc)\n",
+    "        running_loss = 0.0\n",
+    "        for _, data in enumerate(train_loader, 0):\n",
+    "            inputs, labels = data\n",
+    "            optimizer.zero_grad()\n",
+    "            outputs = model(inputs)\n",
+    "            loss = criterion(outputs, labels)\n",
+    "            loss.backward()\n",
+    "            optimizer.step()\n",
+    "            running_loss += loss.item()\n",
+    "        print(\n",
+    "            f\"Epoch {epoch + 1}, Loss: {(running_loss / len(train_loader)):.4f}\",\n",
+    "            end=\". \",\n",
+    "        )\n",
+    "        # Calculate accuracy on the training set\n",
+    "        train_accuracy = accuracy(model, train_loader)\n",
+    "        train_accs.append(train_accuracy)\n",
+    "        print(f\"Training set accuracy: {train_accuracy}%\")\n",
+    "\n",
+    "    # Get model parameters\n",
+    "    params = model.state_dict()\n",
     "\n",
+    "    # Return training accuracy and model parameters\n",
     "    return train_accs, params"
    ]
   },
@@ -276,7 +274,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "train_accs, params = mnist_3_linear_layers(\n",
+    "train_accs, params = mnist_3_linear_layers_torch(\n",
     "    mnist_images=mock_images, mnist_labels=mock_labels\n",
     ")"
    ]
@@ -286,7 +284,7 @@
    "id": "19",
    "metadata": {},
    "source": [
-    "#### Inspect the training accuracies and the shape of the model's parameters"
+    "#### Inspect the training accuracies and the model's parameters"
    ]
   },
   {
@@ -306,7 +304,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "jax.tree_map(lambda x: x.shape, params)"
+    "assert isinstance(params, dict)"
    ]
   },
   {
@@ -338,90 +336,82 @@
     "    ),\n",
     "    output_policy=sy.SingleExecutionExactOutput(),\n",
     ")\n",
-    "def mnist_3_linear_layers(mnist_images, mnist_labels):\n",
-    "    # import the packages\n",
-    "    # stdlib\n",
-    "    import itertools\n",
-    "    import time\n",
-    "\n",
+    "def mnist_3_linear_layers_torch(mnist_images, mnist_labels):\n",
     "    # third party\n",
-    "    from jax import grad\n",
-    "    from jax import jit\n",
-    "    from jax import random\n",
-    "    from jax.example_libraries import optimizers\n",
-    "    from jax.example_libraries import stax\n",
-    "    from jax.example_libraries.stax import Dense\n",
-    "    from jax.example_libraries.stax import LogSoftmax\n",
-    "    from jax.example_libraries.stax import Relu\n",
-    "    import jax.numpy as jnp\n",
-    "    import numpy.random as npr\n",
+    "    import torch\n",
+    "    import torch.nn as nn\n",
+    "    import torch.optim as optim\n",
+    "    from torch.utils.data import TensorDataset\n",
     "\n",
-    "    # define the neural network\n",
-    "    init_random_params, predict = stax.serial(\n",
-    "        Dense(1024), Relu, Dense(1024), Relu, Dense(10), LogSoftmax\n",
+    "    # Convert NumPy arrays to PyTorch tensors\n",
+    "    images_tensor = torch.tensor(mnist_images, dtype=torch.float32)\n",
+    "    labels_tensor = torch.tensor(mnist_labels, dtype=torch.float32)\n",
+    "    # Create a PyTorch dataset using TensorDataset\n",
+    "    custom_dataset = TensorDataset(images_tensor, labels_tensor)\n",
+    "    # Define the data loader\n",
+    "    train_loader = torch.utils.data.DataLoader(\n",
+    "        custom_dataset, batch_size=4, shuffle=True\n",
     "    )\n",
     "\n",
-    "    # initialize the random parameters\n",
-    "    rng = random.PRNGKey(0)\n",
-    "    _, init_params = init_random_params(rng, (-1, 784))\n",
+    "    # Define the neural network class\n",
+    "    class MLP(nn.Module):\n",
+    "        def __init__(self):\n",
+    "            super().__init__()\n",
+    "            self.fc1 = nn.Linear(784, 1024)\n",
+    "            self.fc2 = nn.Linear(1024, 1024)\n",
+    "            self.fc3 = nn.Linear(1024, 10)\n",
     "\n",
-    "    # the hyper parameters\n",
-    "    num_epochs = 10\n",
-    "    batch_size = 4\n",
-    "    num_train = mnist_images.shape[0]\n",
-    "    num_complete_batches, leftover = divmod(num_train, batch_size)\n",
-    "    num_batches = num_complete_batches + bool(leftover)\n",
-    "    step_size = 0.001\n",
-    "    momentum_mass = 0.9\n",
+    "        def forward(self, x):\n",
+    "            x = x.view(-1, 784)\n",
+    "            x = torch.relu(self.fc1(x))\n",
+    "            x = torch.relu(self.fc2(x))\n",
+    "            x = torch.log_softmax(self.fc3(x), dim=1)\n",
+    "            return x\n",
     "\n",
-    "    # initialize the optimizer\n",
-    "    opt_init, opt_update, get_params = optimizers.momentum(\n",
-    "        step_size, mass=momentum_mass\n",
-    "    )\n",
-    "    opt_state = opt_init(init_params)\n",
-    "    itercount = itertools.count()\n",
+    "    # Define the model, optimizer, and loss function\n",
+    "    model = MLP()\n",
+    "    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n",
+    "    criterion = nn.CrossEntropyLoss()\n",
     "\n",
-    "    @jit\n",
-    "    def update(i, opt_state, batch):\n",
-    "        params = get_params(opt_state)\n",
-    "        return opt_update(i, grad(loss)(params, batch), opt_state)\n",
+    "    # Function to calculate accuracy\n",
+    "    def accuracy(model, data_loader):\n",
+    "        correct = 0\n",
+    "        total = 0\n",
+    "        with torch.no_grad():\n",
+    "            for data in data_loader:\n",
+    "                inputs, labels = data\n",
+    "                outputs = model(inputs)\n",
+    "                _, predicted = torch.max(outputs.data, 1)\n",
+    "                total += labels.size(0)\n",
+    "                correct += (predicted == torch.argmax(labels, dim=1)).sum().item()\n",
+    "        return correct / total\n",
     "\n",
-    "    def data_stream():\n",
-    "        \"\"\"\n",
-    "        Create a batch of data picked randomly\n",
-    "        \"\"\"\n",
-    "        rng = npr.RandomState(0)\n",
-    "        while True:\n",
-    "            perm = rng.permutation(num_train)\n",
-    "            for i in range(num_batches):\n",
-    "                batch_idx = perm[i * batch_size : (i + 1) * batch_size]\n",
-    "                yield mnist_images[batch_idx], mnist_labels[batch_idx]\n",
-    "\n",
-    "    def loss(params, batch):\n",
-    "        inputs, targets = batch\n",
-    "        preds = predict(params, inputs)\n",
-    "        return -jnp.mean(jnp.sum(preds * targets, axis=1))\n",
-    "\n",
-    "    def accuracy(params, batch):\n",
-    "        inputs, targets = batch\n",
-    "        target_class = jnp.argmax(targets, axis=1)\n",
-    "        predicted_class = jnp.argmax(predict(params, inputs), axis=1)\n",
-    "        return jnp.mean(predicted_class == target_class)\n",
-    "\n",
-    "    batches = data_stream()\n",
+    "    # Train the model\n",
+    "    num_epochs = 20\n",
     "    train_accs = []\n",
-    "    print(\"\\nStarting training...\")\n",
     "    for epoch in range(num_epochs):\n",
-    "        start_time = time.time()\n",
-    "        for _ in range(num_batches):\n",
-    "            opt_state = update(next(itercount), opt_state, next(batches))\n",
-    "        epoch_time = time.time() - start_time\n",
-    "        params = get_params(opt_state)\n",
-    "        train_acc = accuracy(params, (mnist_images, mnist_labels))\n",
-    "        print(f\"Epoch {epoch} in {epoch_time:0.2f} sec\")\n",
-    "        print(f\"Training set accuracy {train_acc}\")\n",
-    "        train_accs.append(train_acc)\n",
+    "        running_loss = 0.0\n",
+    "        for _, data in enumerate(train_loader, 0):\n",
+    "            inputs, labels = data\n",
+    "            optimizer.zero_grad()\n",
+    "            outputs = model(inputs)\n",
+    "            loss = criterion(outputs, labels)\n",
+    "            loss.backward()\n",
+    "            optimizer.step()\n",
+    "            running_loss += loss.item()\n",
+    "        print(\n",
+    "            f\"Epoch {epoch + 1}, Loss: {(running_loss / len(train_loader)):.4f}\",\n",
+    "            end=\". \",\n",
+    "        )\n",
+    "        # Calculate accuracy on the training set\n",
+    "        train_accuracy = accuracy(model, train_loader)\n",
+    "        train_accs.append(train_accuracy)\n",
+    "        print(f\"Training set accuracy: {train_accuracy}%\")\n",
+    "\n",
+    "    # Get model parameters\n",
+    "    params = model.state_dict()\n",
     "\n",
+    "    # Return training accuracy and model parameters\n",
     "    return train_accs, params"
    ]
   },
@@ -441,7 +431,7 @@
    "outputs": [],
    "source": [
     "new_project = sy.Project(\n",
-    "    name=\"Training a 3-layer jax neural network on MNIST data\",\n",
+    "    name=\"Training a 3-layer torch neural network on MNIST data\",\n",
     "    description=\"\"\"Hi, I would like to train my neural network on your MNIST data \n",
     "                (I can download it online too but I just want to use Syft coz it's cool)\"\"\",\n",
     "    members=[ds_client],\n",
@@ -464,7 +454,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "new_project.create_code_request(obj=mnist_3_linear_layers, client=ds_client)"
+    "new_project.create_code_request(obj=mnist_3_linear_layers_torch, client=ds_client)"
    ]
   },
   {
@@ -492,7 +482,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "project = new_project.start()"
+    "project = new_project.send()"
    ]
   },
   {
@@ -502,7 +492,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "project.events"
+    "assert isinstance(project, sy.service.project.project.Project)"
    ]
   },
   {
@@ -512,7 +502,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "project.requests"
+    "project.events"
    ]
   },
   {
@@ -521,13 +511,23 @@
    "id": "34",
    "metadata": {},
    "outputs": [],
+   "source": [
+    "project.requests"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "35",
+   "metadata": {},
+   "outputs": [],
    "source": [
     "project.requests[0]"
    ]
   },
   {
    "cell_type": "markdown",
-   "id": "35",
+   "id": "36",
    "metadata": {},
    "source": [
     "### 📓 Now switch to the [second DO's notebook](./02-data-owner-review-approve-code.ipynb)"
@@ -536,18 +536,13 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "36",
+   "id": "37",
    "metadata": {},
    "outputs": [],
    "source": []
   }
  ],
  "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
@@ -558,7 +553,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.12.5"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb b/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb
index fd381b26733..d68790e723a 100644
--- a/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb
+++ b/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb
@@ -21,8 +21,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"mnist-domain\", dev_mode=True)\n",
-    "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True)\n",
+    "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -156,7 +156,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "users_function = user_code.unsafe_function\n",
+    "users_function = user_code.run\n",
     "users_function"
    ]
   },
@@ -190,7 +190,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "assert isinstance(mock_params, list)\n",
+    "assert isinstance(mock_params, dict)\n",
     "mock_params"
    ]
   },
@@ -199,7 +199,7 @@
    "id": "18",
    "metadata": {},
    "source": [
-    "## 2. DO runs the submitted code on private data, then deposits the results to the domain so the DS can retrieve them"
+    "## 2. DO runs the submitted code on private data, then approves the request so the DS can execute the function"
    ]
   },
   {
@@ -246,7 +246,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "assert isinstance(params, list)\n",
+    "assert isinstance(params, dict)\n",
     "params"
    ]
   },
@@ -257,7 +257,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "res = request.accept_by_depositing_result((train_accs, params))"
+    "res = request.approve()"
    ]
   },
   {
@@ -278,22 +278,9 @@
    "source": [
     "### 📓 Now switch to the [second DS's notebook](./03-data-scientist-download-results.ipynb)"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "26",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
@@ -304,7 +291,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.12.5"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb b/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb
index 0fbc19747a9..2277e6ad2f2 100644
--- a/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb
+++ b/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb
@@ -8,14 +8,8 @@
    "outputs": [],
    "source": [
     "# third party\n",
-    "import jax\n",
-    "from jax import random\n",
-    "from jax.example_libraries import stax\n",
-    "from jax.example_libraries.stax import Dense\n",
-    "from jax.example_libraries.stax import LogSoftmax\n",
-    "from jax.example_libraries.stax import Relu\n",
-    "import jax.numpy as jnp\n",
     "from mnist_dataset import mnist\n",
+    "import torch\n",
     "\n",
     "# syft absolute\n",
     "import syft as sy"
@@ -28,8 +22,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"mnist-domain\", dev_mode=True)\n",
-    "ds_client = node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")"
+    "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True)\n",
+    "ds_client = server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")"
    ]
   },
   {
@@ -80,7 +74,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "result = ds_client.code.mnist_3_linear_layers(\n",
+    "result = ds_client.code.mnist_3_linear_layers_torch(\n",
     "    mnist_images=training_images, mnist_labels=training_labels\n",
     ")"
    ]
@@ -113,8 +107,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "assert isinstance(params, list)\n",
-    "jax.tree_map(lambda x: x.shape, params)"
+    "assert isinstance(params, dict)\n",
+    "params"
    ]
   },
   {
@@ -161,9 +155,29 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "init_random_params, predict = stax.serial(\n",
-    "    Dense(1024), Relu, Dense(1024), Relu, Dense(10), LogSoftmax\n",
-    ")"
+    "# third party\n",
+    "import torch.nn as nn\n",
+    "\n",
+    "\n",
+    "class MLP(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super().__init__()\n",
+    "        self.fc1 = nn.Linear(784, 1024)\n",
+    "        self.fc2 = nn.Linear(1024, 1024)\n",
+    "        self.fc3 = nn.Linear(1024, 10)\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        x = x.view(-1, 784)\n",
+    "        x = torch.relu(self.fc1(x))\n",
+    "        x = torch.relu(self.fc2(x))\n",
+    "        x = torch.log_softmax(self.fc3(x), dim=1)\n",
+    "        return x\n",
+    "\n",
+    "\n",
+    "# Print the model to see the architecture\n",
+    "model = MLP()\n",
+    "\n",
+    "model"
    ]
   },
   {
@@ -173,11 +187,26 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def accuracy(params, batch):\n",
+    "def accuracy(model, batch, params=None):\n",
+    "    if params is not None:\n",
+    "        model.load_state_dict(params)\n",
+    "\n",
+    "    # Convert inputs and targets to PyTorch tensor\n",
     "    inputs, targets = batch\n",
-    "    target_class = jnp.argmax(targets, axis=1)\n",
-    "    predicted_class = jnp.argmax(predict(params, inputs), axis=1)\n",
-    "    return jnp.mean(predicted_class == target_class)"
+    "    inputs = torch.tensor(inputs)\n",
+    "    targets = torch.tensor(targets)\n",
+    "\n",
+    "    # Get model predictions\n",
+    "    with torch.no_grad():\n",
+    "        outputs = model(inputs)\n",
+    "        print(outputs.shape)\n",
+    "    # Get predicted class\n",
+    "    _, predicted_class = torch.max(outputs, dim=1)\n",
+    "    print(predicted_class.shape)\n",
+    "\n",
+    "    # Calculate accuracy\n",
+    "    accuracy = torch.mean((predicted_class == torch.argmax(targets, dim=1)).float())\n",
+    "    return accuracy.item()  # Convert accuracy to a Python scalar"
    ]
   },
   {
@@ -195,10 +224,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "rng = random.PRNGKey(0)\n",
-    "_, random_params = init_random_params(rng, (-1, 28 * 28))\n",
-    "\n",
-    "test_acc = accuracy(random_params, (test_images, test_labels))\n",
+    "test_acc = accuracy(model, (test_images, test_labels))\n",
     "print(f\"Test set accuracy with random weights = {test_acc * 100 : .2f}%\")"
    ]
   },
@@ -217,7 +243,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "test_acc = accuracy(params, (test_images, test_labels))\n",
+    "test_acc = accuracy(model, (test_images, test_labels), params)\n",
     "print(f\"Test set accuracy with trained weights = {test_acc * 100 : .2f}%\")"
    ]
   },
@@ -227,6 +253,16 @@
    "id": "20",
    "metadata": {},
    "outputs": [],
+   "source": [
+    "assert test_acc * 100 > 70"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "21",
+   "metadata": {},
+   "outputs": [],
    "source": []
   }
  ],
@@ -246,7 +282,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.10.9"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/model-training/mnist_dataset.py b/notebooks/tutorials/model-training/mnist_dataset.py
index 8e93b5b9364..77b7c2c7afe 100644
--- a/notebooks/tutorials/model-training/mnist_dataset.py
+++ b/notebooks/tutorials/model-training/mnist_dataset.py
@@ -1,6 +1,6 @@
 """
-Source: https://github.com/google/jax/blob/main/examples/datasets.py
 Code for the MNIST dataset
+Source: https://github.com/google/jax/blob/main/examples/datasets.py
 """
 
 # stdlib
@@ -13,11 +13,12 @@
 
 # third party
 import numpy as np
+from numpy import ndarray
 
-_DATA = "/tmp/jax_example_data/"
+_DATA = "/tmp/mnist_data/"
 
 
-def _download(url, filename):
+def _download(url: str, filename: str) -> None:
     """Download a url to a file in the JAX data temp directory."""
     if not path.exists(_DATA):
         os.makedirs(_DATA)
@@ -27,17 +28,17 @@ def _download(url, filename):
         print(f"downloaded {url} to {_DATA}")
 
 
-def _partial_flatten(x):
+def _partial_flatten(x) -> ndarray:
     """Flatten all but the first dimension of an ndarray."""
     return np.reshape(x, (x.shape[0], -1))
 
 
-def _one_hot(x, k, dtype=np.float32):
+def _one_hot(x: ndarray, k: int, dtype: type = np.float32) -> ndarray:
     """Create a one-hot encoding of x of size k."""
     return np.array(x[:, None] == np.arange(k), dtype)
 
 
-def mnist_raw():
+def mnist_raw() -> tuple[ndarray, ndarray, ndarray, ndarray]:
     """Download and parse the raw MNIST dataset."""
     # CVDF mirror of http://yann.lecun.com/exdb/mnist/
     base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
@@ -70,7 +71,7 @@ def parse_images(filename):
     return train_images, train_labels, test_images, test_labels
 
 
-def mnist(permute_train=False):
+def mnist(permute_train: bool = False) -> tuple[ndarray, ndarray, ndarray, ndarray]:
     """Download, parse and process MNIST data to unit scale and one-hot labels."""
     train_images, train_labels, test_images, test_labels = mnist_raw()
 
diff --git a/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb b/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb
index 730391a5881..c0e76617809 100644
--- a/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb
@@ -25,7 +25,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -54,7 +54,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-1\", port=7081, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-1\", port=7081, reset=True)"
    ]
   },
   {
@@ -74,7 +74,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -256,7 +256,7 @@
     "    name=\"test\",\n",
     "    asset_list=[sy.Asset(name=\"bikes\", data=fixed_df, mock=mock, mock_is_real=False)],\n",
     ")\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -276,7 +276,7 @@
    },
    "outputs": [],
    "source": [
-    "user = root_domain_client.register(\n",
+    "user = root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -307,7 +307,7 @@
    },
    "outputs": [],
    "source": [
-    "guest_domain_client = node.client"
+    "guest_datasite_client = server.client"
    ]
   },
   {
@@ -319,7 +319,7 @@
    },
    "outputs": [],
    "source": [
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -355,8 +355,8 @@
    },
    "outputs": [],
    "source": [
-    "guest_domain_client = node.client\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_datasite_client = server.client\n",
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -368,7 +368,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[0]"
+    "ds = guest_datasite_client.datasets[0]"
    ]
   },
   {
@@ -554,7 +554,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -612,7 +612,7 @@
    "id": "50",
    "metadata": {},
    "source": [
-    "# Data owner: execute function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -632,7 +632,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.client.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.client.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -644,7 +644,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = domain_client.notifications.get_all_unread()"
+    "notifications = datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -688,38 +688,33 @@
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
+   "cell_type": "markdown",
    "id": "57",
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
+   "metadata": {},
    "source": [
-    "request = project_notification.link.events[0].request"
+    "### Review and approve request"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "id": "58",
-   "metadata": {},
+   "metadata": {
+    "tags": []
+   },
    "outputs": [],
    "source": [
-    "func = request.code"
+    "request = project_notification.link.events[0].request"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "id": "59",
-   "metadata": {
-    "tags": []
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "# func = request.code\n",
-    "#"
+    "func = request.code"
    ]
   },
   {
@@ -743,7 +738,7 @@
    },
    "outputs": [],
    "source": [
-    "get_col_user_function = func.unsafe_function"
+    "get_col_user_function = func.run"
    ]
   },
   {
@@ -755,7 +750,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = domain_client.datasets[0].assets[0].data"
+    "real_data = datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -791,7 +786,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -800,7 +795,7 @@
    "id": "66",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -851,26 +846,6 @@
     "real_result = result_ptr.get()\n",
     "real_result.plot()"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "71",
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "node.land()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "72",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -889,7 +864,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb b/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb
index 28587a7e3d4..434741c19fc 100644
--- a/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb
@@ -25,7 +25,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -54,7 +54,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-2\", port=9082, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-2\", port=9082, reset=True)"
    ]
   },
   {
@@ -74,7 +74,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -340,7 +340,7 @@
     "        sy.Asset(name=\"complaints\", data=complaints, mock=mock, mock_is_real=False)\n",
     "    ],\n",
     ")\n",
-    "domain_client.upload_dataset(dataset)"
+    "datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -360,7 +360,7 @@
    },
    "outputs": [],
    "source": [
-    "user = domain_client.register(\n",
+    "user = datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -371,9 +371,9 @@
     "\n",
     "# todo: give user data scientist role\n",
     "\n",
-    "guest_domain_client = node.client\n",
+    "guest_datasite_client = server.client\n",
     "\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -409,7 +409,7 @@
    },
    "outputs": [],
    "source": [
-    "guest_domain_client = node.client"
+    "guest_datasite_client = server.client"
    ]
   },
   {
@@ -421,8 +421,8 @@
    },
    "outputs": [],
    "source": [
-    "# guest_domain_client = worker.guest_client\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "# guest_datasite_client = worker.guest_client\n",
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -434,7 +434,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[0]"
+    "ds = guest_datasite_client.datasets[0]"
    ]
   },
   {
@@ -760,7 +760,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -818,7 +818,7 @@
    "id": "66",
    "metadata": {},
    "source": [
-    "# Data owner: execute function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -830,7 +830,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.client.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.client.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -850,7 +850,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = domain_client.notifications.get_all_unread()"
+    "notifications = datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -913,7 +913,7 @@
    },
    "outputs": [],
    "source": [
-    "get_counts_user_func = func.unsafe_function"
+    "get_counts_user_func = func.run"
    ]
   },
   {
@@ -925,7 +925,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = domain_client.datasets[0].assets[0].data"
+    "real_data = datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -961,7 +961,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -970,7 +970,7 @@
    "id": "79",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -1020,7 +1020,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -1040,7 +1040,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb b/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb
index 747f7c0f792..c81eb08e469 100644
--- a/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb
@@ -21,11 +21,14 @@
    "execution_count": null,
    "id": "2",
    "metadata": {
+    "jupyter": {
+     "source_hidden": true
+    },
     "tags": []
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -54,7 +57,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-3\", port=7083, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-3\", port=7083, reset=True)"
    ]
   },
   {
@@ -74,7 +77,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -352,7 +355,7 @@
     "        sy.Asset(name=\"complaints\", data=complaints, mock=mock, mock_is_real=False)\n",
     "    ],\n",
     ")\n",
-    "domain_client.upload_dataset(dataset)"
+    "datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -372,7 +375,7 @@
    },
    "outputs": [],
    "source": [
-    "user = domain_client.register(\n",
+    "user = datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -383,9 +386,9 @@
     "\n",
     "# todo: give user data scientist role\n",
     "\n",
-    "guest_domain_client = node.client\n",
+    "guest_datasite_client = server.client\n",
     "\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -421,8 +424,8 @@
    },
    "outputs": [],
    "source": [
-    "guest_domain_client = node.client\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_datasite_client = server.client\n",
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -434,7 +437,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[0]"
+    "ds = guest_datasite_client.datasets[0]"
    ]
   },
   {
@@ -874,7 +877,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -932,7 +935,7 @@
    "id": "75",
    "metadata": {},
    "source": [
-    "# Data owner: execute function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -952,7 +955,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -964,7 +967,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = domain_client.notifications.get_all_unread()"
+    "notifications = datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -1027,7 +1030,7 @@
    },
    "outputs": [],
    "source": [
-    "get_counts_user_func = func.unsafe_function"
+    "get_counts_user_func = func.run"
    ]
   },
   {
@@ -1039,7 +1042,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = domain_client.datasets[0].assets[0].data"
+    "real_data = datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -1075,7 +1078,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -1084,7 +1087,7 @@
    "id": "88",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -1134,7 +1137,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -1154,7 +1157,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb b/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb
index 278363f5e6d..3f99c0b2cf9 100644
--- a/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb
@@ -17,7 +17,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -46,7 +46,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-4\", port=9084, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-4\", port=9084, reset=True)"
    ]
   },
   {
@@ -66,7 +66,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -211,7 +211,7 @@
     "    name=\"bikes2\",\n",
     "    asset_list=[sy.Asset(name=\"bikes\", data=df, mock=mock, mock_is_real=False)],\n",
     ")\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -231,7 +231,7 @@
    },
    "outputs": [],
    "source": [
-    "user = root_domain_client.register(\n",
+    "user = root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -242,9 +242,9 @@
     "\n",
     "# todo: give user data scientist role\n",
     "\n",
-    "guest_domain_client = node.client\n",
+    "guest_datasite_client = server.client\n",
     "\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -288,7 +288,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[0]"
+    "ds = guest_datasite_client.datasets[0]"
    ]
   },
   {
@@ -634,7 +634,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -692,7 +692,7 @@
    "id": "57",
    "metadata": {},
    "source": [
-    "# Data owner: execute syft_function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -716,7 +716,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -736,7 +736,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = root_domain_client.notifications.get_all_unread()"
+    "notifications = root_datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -799,7 +799,7 @@
    },
    "outputs": [],
    "source": [
-    "get_col_user_function = func.unsafe_function"
+    "get_col_user_function = func.run"
    ]
   },
   {
@@ -811,7 +811,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = root_domain_client.datasets[0].assets[0].data"
+    "real_data = root_datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -847,7 +847,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -856,7 +856,7 @@
    "id": "71",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -906,7 +906,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -926,7 +926,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb b/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb
index 384b8e10701..0878c0a9cfe 100644
--- a/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb
@@ -25,7 +25,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -54,7 +54,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-5\", port=9085, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-5\", port=9085, reset=True)"
    ]
   },
   {
@@ -74,7 +74,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -298,7 +298,7 @@
    "outputs": [],
    "source": [
     "dataset = sy.Dataset(name=\"test\", asset_list=assets)\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -330,7 +330,7 @@
    },
    "outputs": [],
    "source": [
-    "user = root_domain_client.register(\n",
+    "user = root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -341,9 +341,9 @@
     "\n",
     "# todo: give user data scientist role\n",
     "\n",
-    "guest_domain_client = node.client\n",
+    "guest_datasite_client = server.client\n",
     "\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -382,7 +382,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[-1]"
+    "ds = guest_datasite_client.datasets[-1]"
    ]
   },
   {
@@ -821,7 +821,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -879,7 +879,7 @@
    "id": "72",
    "metadata": {},
    "source": [
-    "# Data owner: execute syft function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -903,7 +903,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -923,7 +923,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = root_domain_client.notifications.get_all_unread()"
+    "notifications = root_datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -986,7 +986,7 @@
    },
    "outputs": [],
    "source": [
-    "get_col_user_function = func.unsafe_function"
+    "get_col_user_function = func.run"
    ]
   },
   {
@@ -999,8 +999,8 @@
    "outputs": [],
    "source": [
     "real_data1, real_data2 = (\n",
-    "    root_domain_client.datasets[-1].assets[\"weather1\"].data,\n",
-    "    root_domain_client.datasets[-1].assets[\"weather2\"].data,\n",
+    "    root_datasite_client.datasets[-1].assets[\"weather1\"].data,\n",
+    "    root_datasite_client.datasets[-1].assets[\"weather2\"].data,\n",
     ")"
    ]
   },
@@ -1049,7 +1049,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -1058,7 +1058,7 @@
    "id": "87",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -1070,7 +1070,7 @@
    },
    "outputs": [],
    "source": [
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -1175,7 +1175,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -1195,7 +1195,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb b/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb
index 404bdc30026..d7c4cafd3d0 100644
--- a/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb
@@ -25,7 +25,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -54,7 +54,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-6\", port=9086, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-6\", port=9086, reset=True)"
    ]
   },
   {
@@ -74,7 +74,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -287,7 +287,7 @@
     "        sy.Asset(name=\"weather\", data=weather_2012_final, mock=mock, mock_is_real=False)\n",
     "    ],\n",
     ")\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -319,7 +319,7 @@
    },
    "outputs": [],
    "source": [
-    "user = root_domain_client.register(\n",
+    "user = root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -328,8 +328,8 @@
     "    website=\"https://www.caltech.edu/\",\n",
     ")\n",
     "# todo: give user data scientist role\n",
-    "guest_domain_client = node.client\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_datasite_client = server.client\n",
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -368,7 +368,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[0]"
+    "ds = guest_datasite_client.datasets[0]"
    ]
   },
   {
@@ -723,7 +723,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -781,7 +781,7 @@
    "id": "63",
    "metadata": {},
    "source": [
-    "# Data owner: execute syft_function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -805,7 +805,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -826,7 +826,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = domain_client.notifications.get_all_unread()"
+    "notifications = datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -889,7 +889,7 @@
    },
    "outputs": [],
    "source": [
-    "get_col_user_function = func.unsafe_function"
+    "get_col_user_function = func.run"
    ]
   },
   {
@@ -901,7 +901,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = domain_client.datasets[0].assets[0].data"
+    "real_data = datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -937,7 +937,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -946,7 +946,7 @@
    "id": "77",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -958,7 +958,7 @@
    },
    "outputs": [],
    "source": [
-    "guest_client = node.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_client = server.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -1072,7 +1072,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -1092,7 +1092,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.8"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb b/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb
index c5a1887d04e..303e0a808d4 100644
--- a/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb
@@ -25,7 +25,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -54,7 +54,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-7\", port=9087, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-7\", port=9087, reset=True)"
    ]
   },
   {
@@ -74,7 +74,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -307,7 +307,7 @@
     "        )\n",
     "    ],\n",
     ")\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -327,7 +327,7 @@
    },
    "outputs": [],
    "source": [
-    "user = root_domain_client.register(\n",
+    "user = root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -336,8 +336,8 @@
     "    website=\"https://www.caltech.edu/\",\n",
     ")\n",
     "# todo: give user data scientist role\n",
-    "guest_domain_client = node.client\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_datasite_client = server.client\n",
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -778,7 +778,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -836,7 +836,7 @@
    "id": "64",
    "metadata": {},
    "source": [
-    "# Data owner: execute syft_function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -860,7 +860,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -881,7 +881,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = domain_client.notifications.get_all_unread()"
+    "notifications = datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -944,7 +944,7 @@
    },
    "outputs": [],
    "source": [
-    "zip_codes = func.unsafe_function"
+    "zip_codes = func.run"
    ]
   },
   {
@@ -956,7 +956,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = domain_client.datasets[0].assets[0].data"
+    "real_data = datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -1004,7 +1004,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -1025,7 +1025,7 @@
    "id": "80",
    "metadata": {},
    "source": [
-    "# Data scientist: fetch result"
+    "# Data scientist: compute result"
    ]
   },
   {
@@ -1086,7 +1086,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -1106,7 +1106,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb b/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb
index 5bb016f1cae..2e40e221bfb 100644
--- a/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb
+++ b/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb
@@ -27,7 +27,7 @@
    },
    "outputs": [],
    "source": [
-    "SYFT_VERSION = \">=0.8.2.b0,<0.9\"\n",
+    "SYFT_VERSION = \">=0.9,<1.0.0\"\n",
     "package_string = f'\"syft{SYFT_VERSION}\"'\n",
     "# %pip install {package_string} -q"
    ]
@@ -56,7 +56,7 @@
    },
    "outputs": [],
    "source": [
-    "node = sy.orchestra.launch(name=\"pandas-test-domain-8\", port=9088, reset=True)"
+    "server = sy.orchestra.launch(name=\"pandas-test-datasite-8\", port=9088, reset=True)"
    ]
   },
   {
@@ -77,7 +77,7 @@
    },
    "outputs": [],
    "source": [
-    "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -328,7 +328,7 @@
     "        )\n",
     "    ],\n",
     ")\n",
-    "root_domain_client.upload_dataset(dataset)"
+    "root_datasite_client.upload_dataset(dataset)"
    ]
   },
   {
@@ -361,7 +361,7 @@
    },
    "outputs": [],
    "source": [
-    "user = root_domain_client.register(\n",
+    "user = root_datasite_client.register(\n",
     "    name=\"Jane Doe\",\n",
     "    email=\"jane@caltech.edu\",\n",
     "    password=\"abc123\",\n",
@@ -370,8 +370,8 @@
     "    website=\"https://www.caltech.edu/\",\n",
     ")\n",
     "# todo: give user data scientist role\n",
-    "guest_domain_client = node.client\n",
-    "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
+    "guest_datasite_client = server.client\n",
+    "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")"
    ]
   },
   {
@@ -425,7 +425,7 @@
    },
    "outputs": [],
    "source": [
-    "ds = guest_domain_client.datasets[0]"
+    "ds = guest_datasite_client.datasets[0]"
    ]
   },
   {
@@ -728,7 +728,7 @@
    },
    "outputs": [],
    "source": [
-    "project = new_project.start()\n",
+    "project = new_project.send()\n",
     "assert isinstance(project, sy.service.project.project.Project)\n",
     "project"
    ]
@@ -787,7 +787,7 @@
    "id": "62",
    "metadata": {},
    "source": [
-    "# Data owner: execute syft_function"
+    "# Data owner: approve request"
    ]
   },
   {
@@ -811,7 +811,7 @@
    },
    "outputs": [],
    "source": [
-    "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")"
+    "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
    ]
   },
   {
@@ -832,7 +832,7 @@
    },
    "outputs": [],
    "source": [
-    "notifications = domain_client.notifications.get_all_unread()"
+    "notifications = datasite_client.notifications.get_all_unread()"
    ]
   },
   {
@@ -895,7 +895,7 @@
    },
    "outputs": [],
    "source": [
-    "find_recently_installed = func.unsafe_function"
+    "find_recently_installed = func.run"
    ]
   },
   {
@@ -907,7 +907,7 @@
    },
    "outputs": [],
    "source": [
-    "real_data = domain_client.datasets[0].assets[0].data"
+    "real_data = datasite_client.datasets[0].assets[0].data"
    ]
   },
   {
@@ -943,7 +943,7 @@
    },
    "outputs": [],
    "source": [
-    "result = request.accept_by_depositing_result(real_result)\n",
+    "result = request.approve()\n",
     "assert isinstance(result, sy.SyftSuccess)"
    ]
   },
@@ -953,7 +953,7 @@
    "id": "76",
    "metadata": {},
    "source": [
-    "# Data Owner: fetch result"
+    "# Data Owner: compute result"
    ]
   },
   {
@@ -1014,7 +1014,7 @@
    },
    "outputs": [],
    "source": [
-    "node.land()"
+    "server.land()"
    ]
   }
  ],
@@ -1034,7 +1034,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.10.13"
   },
   "toc": {
    "base_numbering": 1,
diff --git a/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb b/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb
new file mode 100644
index 00000000000..9c070a9734b
--- /dev/null
+++ b/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb
@@ -0,0 +1,259 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# third party\n",
+    "import numpy as np\n",
+    "\n",
+    "# syft absolute\n",
+    "import syft as sy"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "1",
+   "metadata": {},
+   "source": [
+    "# Verify Version"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pip_info = !pip index versions syft\n",
+    "latest_deployed_version = pip_info[-1].split(\"LATEST: \")[-1].strip()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# this notebook should only be used to run the latest deployed version of syft\n",
+    "# the notebooks after this (1a/1b and 2), will test migrating from that latest version\n",
+    "print(\n",
+    "    f\"latest deployed version: {latest_deployed_version}, installed version: {sy.__version__}\"\n",
+    ")\n",
+    "# assert (\n",
+    "#     latest_deployed_version == sy.__version__\n",
+    "# ), f\"{latest_deployed_version} does not match installed version {sy.__version__}\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4",
+   "metadata": {},
+   "source": [
+    "# Launch Server"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "server = sy.orchestra.launch(\n",
+    "    name=\"test_upgradability\",\n",
+    "    dev_mode=True,\n",
+    "    reset=True,\n",
+    "    n_consumers=2,\n",
+    "    create_producer=True,\n",
+    "    port=\"auto\",\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client.register(\n",
+    "    email=\"ds@openmined.org\", name=\"John Doe\", password=\"pw\", password_verify=\"pw\"\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8",
+   "metadata": {},
+   "source": [
+    "# Prepare some data to be migrated"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client_ds = server.login(email=\"ds@openmined.org\", password=\"pw\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "10",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dataset = sy.Dataset(\n",
+    "    name=\"my-dataset\",\n",
+    "    description=\"abc\",\n",
+    "    asset_list=[\n",
+    "        sy.Asset(\n",
+    "            name=\"numpy-data\",\n",
+    "            mock=np.array([10, 11, 12, 13, 14]),\n",
+    "            data=np.array([[15, 16, 17, 18, 19] for _ in range(100_000)]),\n",
+    "            mock_is_real=True,\n",
+    "        )\n",
+    "    ],\n",
+    ")\n",
+    "\n",
+    "client.upload_dataset(dataset)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "11",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "data_low = client_ds.datasets[0].assets[0]\n",
+    "\n",
+    "\n",
+    "@sy.syft_function_single_use(data=data_low)\n",
+    "def compute_mean(datasite, data) -> float:\n",
+    "    # launch another job\n",
+    "    print(\"Computing mean...\")\n",
+    "    return data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "req = client_ds.code.request_code_execution(compute_mean)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client.requests[0].approve()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "14",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "job = client_ds.code.compute_mean(data=data_low, blocking=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "15",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res = job.wait()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "16",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res.get().shape"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "17",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# todo: add more data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "18",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if server.server_type.value == \"python\":\n",
+    "    server.land()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "19",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb b/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb
new file mode 100644
index 00000000000..f093b7b458e
--- /dev/null
+++ b/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb
@@ -0,0 +1,152 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# stdlib\n",
+    "import os\n",
+    "from pathlib import Path\n",
+    "\n",
+    "# syft absolute\n",
+    "import syft as sy"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "server = sy.orchestra.launch(\n",
+    "    name=\"test_upgradability\",\n",
+    "    dev_mode=True,\n",
+    "    reset=False,\n",
+    "    port=\"auto\",\n",
+    "    migrate=False,\n",
+    ")\n",
+    "\n",
+    "client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=8080)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Check if this server has data on it\n",
+    "assert len(client.users.get_all()) == 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data = client.get_migration_data(include_blobs=True)\n",
+    "migration_data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert migration_data.includes_blobs\n",
+    "assert migration_data.num_action_objects > 0"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n",
+    "migration_data_dir.mkdir(exist_ok=True)\n",
+    "\n",
+    "blob_path = migration_data_dir / \"migration.blob\"\n",
+    "yaml_path = migration_data_dir / \"migration.yaml\"\n",
+    "\n",
+    "blob_path.unlink(missing_ok=True)\n",
+    "yaml_path.unlink(missing_ok=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data.save(blob_path, yaml_path=yaml_path)\n",
+    "\n",
+    "assert blob_path.exists()\n",
+    "assert yaml_path.exists()\n",
+    "\n",
+    "print(f\"Saved migration data to {str(blob_path.resolve())}\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if server.server_type.value == \"python\":\n",
+    "    server.land()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb b/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb
new file mode 100644
index 00000000000..96c6e432764
--- /dev/null
+++ b/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb
@@ -0,0 +1,317 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# stdlib\n",
+    "import os\n",
+    "from pathlib import Path\n",
+    "\n",
+    "# syft absolute\n",
+    "import syft as sy\n",
+    "from syft.service.code.user_code import UserCode\n",
+    "from syft.service.user.user import User"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "1",
+   "metadata": {},
+   "source": [
+    "# Login"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "server = sy.orchestra.launch(\n",
+    "    name=\"test_upgradability\",\n",
+    "    dev_mode=True,\n",
+    "    reset=True,\n",
+    "    port=\"auto\",\n",
+    ")\n",
+    "\n",
+    "client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Check if this is a new server\n",
+    "migration_data = client.get_migration_data()\n",
+    "\n",
+    "assert len(migration_data.store_objects[User]) == 1\n",
+    "assert UserCode not in migration_data.store_objects"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4",
+   "metadata": {},
+   "source": [
+    "# Load migration data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n",
+    "blob_path = migration_data_dir / \"migration.blob\"\n",
+    "yaml_path = migration_data_dir / \"migration.yaml\"\n",
+    "\n",
+    "print(f\"Loading migration data from {str(blob_path.resolve())}\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res = client.load_migration_data(blob_path)\n",
+    "assert isinstance(res, sy.SyftSuccess), res.message"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8",
+   "metadata": {},
+   "source": [
+    "# Post migration tests"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert len(client.users.get_all()) == 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "10",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client_ds = server.login(email=\"ds@openmined.org\", password=\"pw\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "11",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# syft absolute\n",
+    "from syft.client.api import APIRegistry"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "APIRegistry.__api_registry__.keys()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "14",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "code = client.code.get_all()[0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "15",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "code.status"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "16",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "req1 = client.requests[0]\n",
+    "req2 = client_ds.requests[0]\n",
+    "assert req1.status.name == \"APPROVED\" and req2.status.name == \"APPROVED\"\n",
+    "assert isinstance(req1._repr_html_(), str)\n",
+    "assert isinstance(req2._repr_html_(), str)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "17",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "jobs = client_ds.jobs\n",
+    "assert isinstance(jobs[0]._repr_html_(), str)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "18",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ds = client_ds.datasets\n",
+    "asset = ds[0].assets[0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "19",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res = client_ds.code.compute_mean(data=asset)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert res.get().shape == (100_000, 5)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "21",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "jobs = client_ds.jobs.get_all()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "22",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "job = jobs[0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "23",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "job.logs()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "24",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "logs = job.logs(_print=False)\n",
+    "assert isinstance(logs, str)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "25",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if server.server_type.value == \"python\":\n",
+    "    server.land()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "26",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/tutorials/version-upgrades/custom_workerpool/0-prepare-migration-data.ipynb b/notebooks/tutorials/version-upgrades/custom_workerpool/0-prepare-migration-data.ipynb
new file mode 100644
index 00000000000..a7fda0e7fdc
--- /dev/null
+++ b/notebooks/tutorials/version-upgrades/custom_workerpool/0-prepare-migration-data.ipynb
@@ -0,0 +1,605 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# stdlib\n",
+    "import os\n",
+    "\n",
+    "# third party\n",
+    "import numpy as np\n",
+    "import requests\n",
+    "\n",
+    "# syft absolute\n",
+    "import syft as sy\n",
+    "from syft import test_settings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n",
+    "# os.environ[\"DEV_MODE\"] = \"True\"\n",
+    "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"\n",
+    "# os.environ[\"SERVER_PORT\"] = \"8081\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n",
+    "server_port = os.environ.get(\"SERVER_PORT\", \"auto\")\n",
+    "print(environment, server_port)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3",
+   "metadata": {},
+   "source": [
+    "# Verify Version"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "PROJECT_NAME = \"syft\"\n",
+    "PYPI_JSON_URL = f\"https://pypi.org/pypi/{PROJECT_NAME}/json\"\n",
+    "\n",
+    "\n",
+    "def get_latest_pypi_version():\n",
+    "    response = requests.get(PYPI_JSON_URL)\n",
+    "    data = response.json()\n",
+    "    return data[\"info\"][\"version\"]\n",
+    "\n",
+    "\n",
+    "latest_deployed_version = get_latest_pypi_version()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# this notebook should only be used to run the latest deployed version of syft\n",
+    "# the notebooks after this (1a/1b and 2), will test migrating from that latest version\n",
+    "print(\n",
+    "    f\"latest deployed version: {latest_deployed_version}, installed version: {sy.__version__}\"\n",
+    ")\n",
+    "# assert (\n",
+    "#     latest_deployed_version == sy.__version__\n",
+    "# ), f\"{latest_deployed_version} does not match installed version {sy.__version__}\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "6",
+   "metadata": {},
+   "source": [
+    "# Launch Server"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "server = sy.orchestra.launch(\n",
+    "    name=\"test_upgradability\",\n",
+    "    dev_mode=True,\n",
+    "    reset=True,\n",
+    "    n_consumers=2,\n",
+    "    create_producer=True,\n",
+    "    port=server_port,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client.register(\n",
+    "    email=\"ds@openmined.org\", name=\"John Doe\", password=\"pw\", password_verify=\"pw\"\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "10",
+   "metadata": {},
+   "source": [
+    "# Prepare some data to be migrated"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "11",
+   "metadata": {},
+   "source": [
+    "## External registry"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "external_registry = test_settings.get(\"external_registry\", default=\"docker.io\")\n",
+    "external_registry"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "result = client.api.services.image_registry.add(external_registry)\n",
+    "result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "14",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "image_registry_list = client.api.services.image_registry.get_all()\n",
+    "image_registry_list"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "15",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "local_registry = image_registry_list[0]\n",
+    "local_registry"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "16",
+   "metadata": {},
+   "source": [
+    "## custom workerpool"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "17",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Get base worker image\n",
+    "base_worker_image = client.images.get_all()[0]\n",
+    "base_worker_image"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "18",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "worker_dockerfile = f\"\"\"\n",
+    "FROM {str(base_worker_image.image_identifier)}\n",
+    "\n",
+    "RUN uv pip install db-dtypes google-cloud-bigquery\n",
+    "\"\"\".strip()\n",
+    "\n",
+    "docker_config = sy.DockerWorkerConfig(dockerfile=worker_dockerfile)\n",
+    "\n",
+    "print(docker_config)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "19",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert docker_config.dockerfile == worker_dockerfile"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "submit_result = client.api.services.worker_image.submit(worker_config=docker_config)\n",
+    "\n",
+    "assert isinstance(submit_result, sy.SyftSuccess)\n",
+    "custom_image = submit_result.value"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "21",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "docker_tag = str(base_worker_image.image_identifier).replace(\n",
+    "    \"backend\", \"worker-bigquery\"\n",
+    ")\n",
+    "docker_tag"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "22",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "build_result = client.api.services.worker_image.build(\n",
+    "    image_uid=custom_image.id,\n",
+    "    tag=docker_tag,\n",
+    "    registry_uid=local_registry.id,\n",
+    ")\n",
+    "\n",
+    "assert isinstance(build_result, sy.SyftSuccess)\n",
+    "build_result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "23",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "push_result = client.api.services.worker_image.push(custom_image.id)\n",
+    "push_result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "24",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "custom_pool_name = \"custom-bigquery\"\n",
+    "\n",
+    "worker_pool_res = client.api.services.worker_pool.launch(\n",
+    "    pool_name=custom_pool_name,\n",
+    "    image_uid=custom_image.id,\n",
+    "    num_workers=4,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "25",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "worker_pool_res"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "26",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "images = client.services.worker_image.get_all()\n",
+    "\n",
+    "assert len(images) == 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "27",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "worker_pools = client.worker_pools.get_all()\n",
+    "\n",
+    "assert len(worker_pools) == 2\n",
+    "\n",
+    "worker_pools"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "28",
+   "metadata": {},
+   "source": [
+    "## Prebuilt custom workerpool"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "29",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "docker_tag"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "30",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "docker_config = sy.PrebuiltWorkerConfig(tag=docker_tag)\n",
+    "docker_config"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "31",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "result = client.api.services.worker_image.submit(worker_config=docker_config)\n",
+    "assert isinstance(result, sy.SyftSuccess)\n",
+    "\n",
+    "prebuilt_image = result.value\n",
+    "prebuilt_image"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "32",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "prebuilt_pool_name = \"bigquery-prebuilt\"\n",
+    "result = client.api.services.worker_pool.launch(\n",
+    "    pool_name=prebuilt_pool_name,\n",
+    "    image_uid=prebuilt_image.id,\n",
+    "    num_workers=1,\n",
+    ")\n",
+    "result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "33",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client.api.services.worker_image.get_all()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "34",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "images = client.api.services.worker_image.get_all()\n",
+    "\n",
+    "assert len(images) == 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "35",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "worker_pools = client.worker_pools.get_all()\n",
+    "\n",
+    "assert len(worker_pools) == 3"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "36",
+   "metadata": {},
+   "source": [
+    "## Dataset, Code, Job"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "37",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client_ds = server.login(email=\"ds@openmined.org\", password=\"pw\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "38",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dataset = sy.Dataset(\n",
+    "    name=\"my-dataset\",\n",
+    "    description=\"abc\",\n",
+    "    asset_list=[\n",
+    "        sy.Asset(\n",
+    "            name=\"numpy-data\",\n",
+    "            mock=np.array([10, 11, 12, 13, 14]),\n",
+    "            data=np.array([[15, 16, 17, 18, 19] for _ in range(100_000)]),\n",
+    "            mock_is_real=True,\n",
+    "        )\n",
+    "    ],\n",
+    ")\n",
+    "\n",
+    "client.upload_dataset(dataset)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "39",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "data_low = client_ds.datasets[0].assets[0]\n",
+    "\n",
+    "\n",
+    "@sy.syft_function_single_use(data=data_low, worker_pool_name=\"bigquery-prebuilt\")\n",
+    "def compute_mean(datasite, data) -> float:\n",
+    "    # launch job on prebuilt workerpool\n",
+    "    print(\"Computing mean...\")\n",
+    "    return data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "40",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "req = client_ds.code.request_code_execution(compute_mean)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "41",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client.requests[0].approve()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "42",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "job = client_ds.code.compute_mean(data=data_low, blocking=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "43",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res = job.wait()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "44",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert isinstance(res.get(), np.ndarray)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "45",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# todo: add more data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "46",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if server.server_type.value == \"python\":\n",
+    "    server.land()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "47",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/tutorials/version-upgrades/custom_workerpool/1-dump-database-to-file.ipynb b/notebooks/tutorials/version-upgrades/custom_workerpool/1-dump-database-to-file.ipynb
new file mode 100644
index 00000000000..adca6f09a05
--- /dev/null
+++ b/notebooks/tutorials/version-upgrades/custom_workerpool/1-dump-database-to-file.ipynb
@@ -0,0 +1,169 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# stdlib\n",
+    "import os\n",
+    "from pathlib import Path\n",
+    "\n",
+    "# syft absolute\n",
+    "import syft as sy"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n",
+    "# os.environ[\"DEV_MODE\"] = \"True\"\n",
+    "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"\n",
+    "# os.environ[\"SERVER_PORT\"] = \"8081\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n",
+    "server_port = os.environ.get(\"SERVER_PORT\", \"auto\")\n",
+    "print(environment, server_port)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "server = sy.orchestra.launch(\n",
+    "    name=\"test_upgradability\",\n",
+    "    dev_mode=True,\n",
+    "    reset=False,\n",
+    "    port=server_port,\n",
+    "    migrate=False,\n",
+    "    n_consumers=2,\n",
+    "    create_producer=True,\n",
+    ")\n",
+    "\n",
+    "client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Check if this server has data on it\n",
+    "assert len(client.users.get_all()) == 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data = client.get_migration_data(include_blobs=True)\n",
+    "migration_data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert migration_data.includes_blobs\n",
+    "assert migration_data.num_action_objects > 0"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n",
+    "migration_data_dir.mkdir(exist_ok=True)\n",
+    "\n",
+    "blob_path = migration_data_dir / \"migration.blob\"\n",
+    "yaml_path = migration_data_dir / \"migration.yaml\"\n",
+    "\n",
+    "blob_path.unlink(missing_ok=True)\n",
+    "yaml_path.unlink(missing_ok=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data.save(blob_path, yaml_path=yaml_path)\n",
+    "\n",
+    "assert blob_path.exists()\n",
+    "assert yaml_path.exists()\n",
+    "\n",
+    "print(f\"Saved migration data to {str(blob_path.resolve())}\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if server.server_type.value == \"python\":\n",
+    "    server.land()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "10",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/tutorials/version-upgrades/custom_workerpool/2-migrate-from-file.ipynb b/notebooks/tutorials/version-upgrades/custom_workerpool/2-migrate-from-file.ipynb
new file mode 100644
index 00000000000..fffab8d8667
--- /dev/null
+++ b/notebooks/tutorials/version-upgrades/custom_workerpool/2-migrate-from-file.ipynb
@@ -0,0 +1,323 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# stdlib\n",
+    "import os\n",
+    "from pathlib import Path\n",
+    "\n",
+    "# syft absolute\n",
+    "import syft as sy\n",
+    "from syft.service.code.user_code import UserCode\n",
+    "from syft.service.user.user import User\n",
+    "\n",
+    "print(\"upgrading to\", sy.__version__)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n",
+    "# os.environ[\"DEV_MODE\"] = \"True\"\n",
+    "# os.environ[\"TEST_EXTERNAL_REGISTRY\"] = \"k3d-registry.localhost:5800\"\n",
+    "# os.environ[\"SERVER_PORT\"] = \"8081\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "environment = os.environ.get(\"ORCHESTRA_DEPLOYMENT_TYPE\", \"python\")\n",
+    "server_port = os.environ.get(\"SERVER_PORT\", \"auto\")\n",
+    "print(environment, server_port)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3",
+   "metadata": {},
+   "source": [
+    "# Login"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "server = sy.orchestra.launch(\n",
+    "    name=\"test_upgradability\",\n",
+    "    dev_mode=True,\n",
+    "    reset=True,\n",
+    "    port=server_port,\n",
+    "    n_consumers=1,\n",
+    "    create_producer=True,\n",
+    ")\n",
+    "\n",
+    "client = server.login(email=\"info@openmined.org\", password=\"changethis\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Check if this is a new server\n",
+    "migration_data = client.get_migration_data()\n",
+    "\n",
+    "assert len(migration_data.store_objects[User]) == 1\n",
+    "assert UserCode not in migration_data.store_objects\n",
+    "assert not migration_data.includes_custom_workerpools"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "6",
+   "metadata": {},
+   "source": [
+    "# Load migration data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "migration_data_dir = Path(os.getenv(\"MIGRATION_DATA_DIR\", \".\"))\n",
+    "blob_path = migration_data_dir / \"migration.blob\"\n",
+    "yaml_path = migration_data_dir / \"migration.yaml\"\n",
+    "\n",
+    "print(f\"Loading migration data from {str(blob_path.resolve())}\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res = client.load_migration_data(blob_path)\n",
+    "assert isinstance(res, sy.SyftSuccess), res.message"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "9",
+   "metadata": {},
+   "source": [
+    "## Notes on workerpool upgrades\n",
+    "- We always skip default pool, since it is hardcoded as backend baseimage and cannot be changed by the user.\n",
+    "- Upgrade prebuilt pool: just enter your new tag.\n",
+    "- Upgrade syft-built pool: edit dockerfile string, rebuild.\n",
+    "- new pool is launched with same name, num_workers, upgraded image.\n",
+    "- mode=\"auto\" should only be used in testing scenarios, as it assumes all custom image tags use syft versioning"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "10",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# NOTE do not use mode=\"auto\" if you use custom image versioning.\n",
+    "\n",
+    "sy.upgrade_custom_workerpools(client, blob_path, mode=\"auto\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "11",
+   "metadata": {},
+   "source": [
+    "# Post migration tests"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert len(client.worker_pools.get_all()) == 3"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert len(client.users.get_all()) == 2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "14",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client_ds = server.login(email=\"ds@openmined.org\", password=\"pw\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "15",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "req1 = client.requests[0]\n",
+    "req2 = client_ds.requests[0]\n",
+    "assert req1.status.name == \"APPROVED\" and req2.status.name == \"APPROVED\"\n",
+    "assert isinstance(req1._repr_html_(), str)\n",
+    "assert isinstance(req2._repr_html_(), str)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "16",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "jobs = client_ds.jobs\n",
+    "assert isinstance(jobs[0]._repr_html_(), str)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "17",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ds = client_ds.datasets\n",
+    "asset = ds[0].assets[0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "18",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "res = client_ds.code.compute_mean(data=asset, blocking=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "19",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "assert res.wait().get().shape == (100_000, 5)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "jobs = client_ds.jobs.get_all()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "21",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "job = jobs[0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "22",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "job.logs()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "23",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "logs = job.logs(_print=False)\n",
+    "assert isinstance(logs, str)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "24",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if server.server_type.value == \"python\":\n",
+    "    server.land()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "25",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/packages/.dockerignore b/packages/.dockerignore
deleted file mode 100644
index ba9aa4b6829..00000000000
--- a/packages/.dockerignore
+++ /dev/null
@@ -1,11 +0,0 @@
-**/*.pyc
-
-grid/*
-!grid/backend
-
-syftcli
-
-syft/tests
-syft/README.md
-
-hagrid
\ No newline at end of file
diff --git a/packages/grid/README.md b/packages/grid/README.md
deleted file mode 100644
index c9dd9508514..00000000000
--- a/packages/grid/README.md
+++ /dev/null
@@ -1,791 +0,0 @@
-# grid
-
-## Backend Requirements
-
-- [Docker](https://www.docker.com/).
-- [Docker Compose](https://docs.docker.com/compose/install/).
-- [Poetry](https://python-poetry.org/) for Python package and environment management.
-
-## Frontend Requirements
-
-- Node.js (with `npm`).
-
-## Backend local development
-
-- Start the stack with Docker Compose:
-
-```bash
-docker-compose up -d
-```
-
-- Now you can open your browser and interact with these URLs:
-
-Frontend, built with Docker, with routes handled based on the path: http://localhost
-
-Backend, JSON based web API based on OpenAPI: http://localhost/api/
-
-Automatic interactive documentation with Swagger UI (from the OpenAPI backend): http://localhost/docs
-
-Alternative automatic documentation with ReDoc (from the OpenAPI backend): http://localhost/redoc
-
-PGAdmin, PostgreSQL web administration: http://localhost:5050
-
-Flower, administration of Celery tasks: http://localhost:5555
-
-Traefik UI, to see how the routes are being handled by the proxy: http://localhost:8090
-
-**Note**: The first time you start your stack, it might take a minute for it to be ready. While the backend waits for the database to be ready and configures everything. You can check the logs to monitor it.
-
-To check the logs, run:
-
-```bash
-docker-compose logs
-```
-
-To check the logs of a specific service, add the name of the service, e.g.:
-
-```bash
-docker-compose logs backend
-```
-
-If your Docker is not running in `localhost` (the URLs above wouldn't work) check the sections below on **Development with Docker Toolbox** and **Development with a custom IP**.
-
-## Backend local development, additional details
-
-### General workflow
-
-By default, the dependencies are managed with [Poetry](https://python-poetry.org/), go there and install it.
-
-From `./backend/app/` you can install all the dependencies with:
-
-```console
-$ poetry install
-```
-
-Then you can start a shell session with the new environment with:
-
-```console
-$ poetry shell
-```
-
-Next, open your editor at `./backend/app/` (instead of the project root: `./`), so that you see an `./app/` directory with your code inside. That way, your editor will be able to find all the imports, etc. Make sure your editor uses the environment you just created with Poetry.
-
-Add and modify tasks to the Celery worker in `./backend/app/app/worker.py`.
-
-If you need to install any additional package to the worker, add it to the file `./backend/app/celeryworker.dockerfile`.
-
-### Docker Compose Override
-
-During development, you can change Docker Compose settings that will only affect the local development environment, in the file `docker-compose.override.yml`.
-
-The changes to that file only affect the local development environment, not the production environment. So, you can add "temporary" changes that help the development workflow.
-
-For example, the directory with the backend code is mounted as a Docker "host volume", mapping the code you change live to the directory inside the container. That allows you to test your changes right away, without having to build the Docker image again. It should only be done during development, for production, you should build the Docker image with a recent version of the backend code. But during development, it allows you to iterate very fast.
-
-There is also a command override that runs `/start-reload.sh` (included in the base image) instead of the default `/start.sh` (also included in the base image). It starts a single server process (instead of multiple, as would be for production) and reloads the process whenever the code changes. Have in mind that if you have a syntax error and save the Python file, it will break and exit, and the container will stop. After that, you can restart the container by fixing the error and running again:
-
-```console
-$ docker-compose up -d
-```
-
-There is also a commented out `command` override, you can uncomment it and comment the default one. It makes the backend container run a process that does "nothing", but keeps the container alive. That allows you to get inside your running container and execute commands inside, for example a Python interpreter to test installed dependencies, or start the development server that reloads when it detects changes, or start a Jupyter Notebook session.
-
-To get inside the container with a `bash` session you can start the stack with:
-
-```console
-$ docker-compose up -d
-```
-
-and then `exec` inside the running container:
-
-```console
-$ docker-compose exec backend bash
-```
-
-You should see an output like:
-
-```console
-root@7f2607af31c3:/app#
-```
-
-that means that you are in a `bash` session inside your container, as a `root` user, under the `/app` directory.
-
-There you can use the script `/start-reload.sh` to run the debug live reloading server. You can run that script from inside the container with:
-
-```console
-$ bash /start-reload.sh
-```
-
-...it will look like:
-
-```console
-root@7f2607af31c3:/app# bash /start-reload.sh
-```
-
-and then hit enter. That runs the live reloading server that auto reloads when it detects code changes.
-
-Nevertheless, if it doesn't detect a change but a syntax error, it will just stop with an error. But as the container is still alive and you are in a Bash session, you can quickly restart it after fixing the error, running the same command ("up arrow" and "Enter").
-
-...this previous detail is what makes it useful to have the container alive doing nothing and then, in a Bash session, make it run the live reload server.
-
-### Backend tests
-
-To test the backend run:
-
-```console
-$ DOMAIN=backend sh ./scripts/test.sh
-```
-
-The file `./scripts/test.sh` has the commands to generate a testing `docker-stack.yml` file, start the stack and test it.
-
-The tests run with Pytest, modify and add tests to `./backend/app/app/tests/`.
-
-If you use GitLab CI the tests will run automatically.
-
-#### Local tests
-
-Start the stack with this command:
-
-```Bash
-DOMAIN=backend sh ./scripts/test-local.sh
-```
-
-The `./backend/app` directory is mounted as a "host volume" inside the docker container (set in the file `docker-compose.dev.volumes.yml`).
-You can rerun the test on live code:
-
-```Bash
-docker-compose exec backend /app/tests-start.sh
-```
-
-#### Test running stack
-
-If your stack is already up and you just want to run the tests, you can use:
-
-```bash
-docker-compose exec backend /app/tests-start.sh
-```
-
-That `/app/tests-start.sh` script just calls `pytest` after making sure that the rest of the stack is running. If you need to pass extra arguments to `pytest`, you can pass them to that command and they will be forwarded.
-
-For example, to stop on first error:
-
-```bash
-docker-compose exec backend bash /app/tests-start.sh -x
-```
-
-#### Test Coverage
-
-Because the test scripts forward arguments to `pytest`, you can enable test coverage HTML report generation by passing `--cov-report=html`.
-
-To run the local tests with coverage HTML reports:
-
-```Bash
-DOMAIN=backend sh ./scripts/test-local.sh --cov-report=html
-```
-
-To run the tests in a running stack with coverage HTML reports:
-
-```bash
-docker-compose exec backend bash /app/tests-start.sh --cov-report=html
-```
-
-### Live development with Python Jupyter Notebooks
-
-If you know about Python [Jupyter Notebooks](http://jupyter.org/), you can take advantage of them during local development.
-
-The `docker-compose.override.yml` file sends a variable `env` with a value `dev` to the build process of the Docker image (during local development) and the `Dockerfile` has steps to then install and configure Jupyter inside your Docker container.
-
-So, you can enter into the running Docker container:
-
-```bash
-docker-compose exec backend bash
-```
-
-And use the environment variable `$JUPYTER` to run a Jupyter Notebook with everything configured to listen on the public port (so that you can use it from your browser).
-
-It will output something like:
-
-```console
-root@73e0ec1f1ae6:/app# $JUPYTER
-[I 12:02:09.975 NotebookApp] Writing notebook server cookie secret to /root/.local/share/jupyter/runtime/notebook_cookie_secret
-[I 12:02:10.317 NotebookApp] Serving notebooks from local directory: /app
-[I 12:02:10.317 NotebookApp] The Jupyter Notebook is running at:
-[I 12:02:10.317 NotebookApp] http://(73e0ec1f1ae6 or 127.0.0.1):8888/?token=f20939a41524d021fbfc62b31be8ea4dd9232913476f4397
-[I 12:02:10.317 NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
-[W 12:02:10.317 NotebookApp] No web browser found: could not locate runnable browser.
-[C 12:02:10.317 NotebookApp]
-
-    Copy/paste this URL into your browser when you connect for the first time,
-    to login with a token:
-        http://(73e0ec1f1ae6 or 127.0.0.1):8888/?token=f20939a41524d021fbfc62b31be8ea4dd9232913476f4397
-```
-
-you can copy that URL and modify the "host" to be `localhost` or the domain you are using for development (e.g. `local.dockertoolbox.tiangolo.com`), in the case above, it would be, e.g.:
-
-```
-http://localhost:8888/token=f20939a41524d021fbfc62b31be8ea4dd9232913476f4397
-```
-
-and then open it in your browser.
-
-You will have a full Jupyter Notebook running inside your container that has direct access to your database by the container name (`db`), etc. So, you can just run sections of your backend code directly, for example with [VS Code Python Jupyter Interactive Window](https://code.visualstudio.com/docs/python/jupyter-support-py) or [Hydrogen](https://github.com/nteract/hydrogen).
-
-### Development with Docker Toolbox
-
-If you are using **Docker Toolbox** in Windows or macOS instead of **Docker for Windows** or **Docker for Mac**, Docker will be running in a VirtualBox Virtual Machine, and it will have a local IP different than `127.0.0.1`, which is the IP address for `localhost` in your machine.
-
-The address of your Docker Toolbox virtual machine would probably be `192.168.99.100` (that is the default).
-
-As this is a common case, the domain `local.dockertoolbox.tiangolo.com` points to that (private) IP, just to help with development (actually `dockertoolbox.tiangolo.com` and all its subdomains point to that IP). That way, you can start the stack in Docker Toolbox, and use that domain for development. You will be able to open that URL in Chrome and it will communicate with your local Docker Toolbox directly as if it was a cloud server, including CORS (Cross Origin Resource Sharing).
-
-If you used the default CORS enabled domains while generating the project, `local.dockertoolbox.tiangolo.com` was configured to be allowed. If you didn't, you will need to add it to the list in the variable `BACKEND_CORS_ORIGINS` in the `.env` file.
-
-To configure it in your stack, follow the section **Change the development "domain"** below, using the domain `local.dockertoolbox.tiangolo.com`.
-
-After performing those steps you should be able to open: http://local.dockertoolbox.tiangolo.com and it will be server by your stack in your Docker Toolbox virtual machine.
-
-Check all the corresponding available URLs in the section at the end.
-
-### Development in `localhost` with a custom domain
-
-You might want to use something different than `localhost` as the domain. For example, if you are having problems with cookies that need a subdomain, and Chrome is not allowing you to use `localhost`.
-
-In that case, you have two options: you could use the instructions to modify your system `hosts` file with the instructions below in **Development with a custom IP** or you can just use `localhost.tiangolo.com`, it is set up to point to `localhost` (to the IP `127.0.0.1`) and all its subdomains too. And as it is an actual domain, the browsers will store the cookies you set during development, etc.
-
-If you used the default CORS enabled domains while generating the project, `localhost.tiangolo.com` was configured to be allowed. If you didn't, you will need to add it to the list in the variable `BACKEND_CORS_ORIGINS` in the `.env` file.
-
-To configure it in your stack, follow the section **Change the development "domain"** below, using the domain `localhost.tiangolo.com`.
-
-After performing those steps you should be able to open: http://localhost.tiangolo.com and it will be server by your stack in `localhost`.
-
-Check all the corresponding available URLs in the section at the end.
-
-### Development with a custom IP
-
-If you are running Docker in an IP address different than `127.0.0.1` (`localhost`) and `192.168.99.100` (the default of Docker Toolbox), you will need to perform some additional steps. That will be the case if you are running a custom Virtual Machine, a secondary Docker Toolbox or your Docker is located in a different machine in your network.
-
-In that case, you will need to use a fake local domain (`dev.grid.openmined.org`) and make your computer think that the domain is is served by the custom IP (e.g. `192.168.99.150`).
-
-If you used the default CORS enabled domains, `dev.grid.openmined.org` was configured to be allowed. If you want a custom one, you need to add it to the list in the variable `BACKEND_CORS_ORIGINS` in the `.env` file.
-
-- Open your `hosts` file with administrative privileges using a text editor:
-
-  - **Note for Windows**: If you are in Windows, open the main Windows menu, search for "notepad", right click on it, and select the option "open as Administrator" or similar. Then click the "File" menu, "Open file", go to the directory `c:\Windows\System32\Drivers\etc\`, select the option to show "All files" instead of only "Text (.txt) files", and open the `hosts` file.
-  - **Note for Mac and Linux**: Your `hosts` file is probably located at `/etc/hosts`, you can edit it in a terminal running `sudo nano /etc/hosts`.
-
-- Additional to the contents it might have, add a new line with the custom IP (e.g. `192.168.99.150`) a space character, and your fake local domain: `dev.grid.openmined.org`.
-
-The new line might look like:
-
-```
-192.168.99.100    dev.grid.openmined.org
-```
-
-- Save the file.
-  - **Note for Windows**: Make sure you save the file as "All files", without an extension of `.txt`. By default, Windows tries to add the extension. Make sure the file is saved as is, without extension.
-
-...that will make your computer think that the fake local domain is served by that custom IP, and when you open that URL in your browser, it will talk directly to your locally running server when it is asked to go to `dev.grid.openmined.org` and think that it is a remote server while it is actually running in your computer.
-
-To configure it in your stack, follow the section **Change the development "domain"** below, using the domain `dev.grid.openmined.org`.
-
-After performing those steps you should be able to open: http://dev.grid.openmined.org and it will be server by your stack in `localhost`.
-
-Check all the corresponding available URLs in the section at the end.
-
-### Change the development "domain"
-
-If you need to use your local stack with a different domain than `localhost`, you need to make sure the domain you use points to the IP where your stack is set up. See the different ways to achieve that in the sections above (i.e. using Docker Toolbox with `local.dockertoolbox.tiangolo.com`, using `localhost.tiangolo.com` or using `dev.grid.openmined.org`).
-
-To simplify your Docker Compose setup, for example, so that the API docs (Swagger UI) knows where is your API, you should let it know you are using that domain for development. You will need to edit 1 line in 2 files.
-
-- Open the file located at `./.env`. It would have a line like:
-
-```
-DOMAIN=localhost
-```
-
-- Change it to the domain you are going to use, e.g.:
-
-```
-DOMAIN=localhost.tiangolo.com
-```
-
-That variable will be used by the Docker Compose files.
-
-- Now open the file located at `./frontend/.env`. It would have a line like:
-
-```
-VUE_APP_DOMAIN_DEV=localhost
-```
-
-- Change that line to the domain you are going to use, e.g.:
-
-```
-VUE_APP_DOMAIN_DEV=localhost.tiangolo.com
-```
-
-That variable will make your frontend communicate with that domain when interacting with your backend API, when the other variable `VUE_APP_ENV` is set to `development`.
-
-After changing the two lines, you can re-start your stack with:
-
-```bash
-docker-compose up -d
-```
-
-and check all the corresponding available URLs in the section at the end.
-
-## Frontend development
-
-- Enter the `frontend` directory, install the NPM packages and start the live server using the `npm` scripts:
-
-```bash
-cd frontend
-npm install
-npm run serve
-```
-
-Then open your browser at http://localhost:8080
-
-Notice that this live server is not running inside Docker, it is for local development, and that is the recommended workflow. Once you are happy with your frontend, you can build the frontend Docker image and start it, to test it in a production-like environment. But compiling the image at every change will not be as productive as running the local development server with live reload.
-
-Check the file `package.json` to see other available options.
-
-If you have Vue CLI installed, you can also run `vue ui` to control, configure, serve, and analyze your application using a nice local web user interface.
-
-If you are only developing the frontend (e.g. other team members are developing the backend) and there is a staging environment already deployed, you can make your local development code use that staging API instead of a full local Docker Compose stack.
-
-To do that, modify the file `./frontend/.env`, there's a section with:
-
-```
-VUE_APP_ENV=development
-# VUE_APP_ENV=staging
-```
-
-- Switch the comment, to:
-
-```
-# VUE_APP_ENV=development
-VUE_APP_ENV=staging
-```
-
-### Removing the frontend
-
-If you are developing an API-only app and want to remove the frontend, you can do it easily:
-
-- Remove the `./frontend` directory.
-- In the `docker-compose.yml` file, remove the whole service / section `frontend`.
-- In the `docker-compose.override.yml` file, remove the whole service / section `frontend`.
-
-Done, you have a frontend-less (api-only) app. 🔥 🚀
-
----
-
-If you want, you can also remove the `FRONTEND` environment variables from:
-
-- `.env`
-- `.gitlab-ci.yml`
-- `./scripts/*.sh`
-
-But it would be only to clean them up, leaving them won't really have any effect either way.
-
-## Deployment
-
-You can deploy the stack to a Docker Swarm mode cluster with a main Traefik proxy, set up using the ideas from DockerSwarm.rocks, to get automatic HTTPS certificates, etc.
-
-And you can use CI (continuous integration) systems to do it automatically.
-
-But you have to configure a couple things first.
-
-### Traefik network
-
-This stack expects the public Traefik network to be named `traefik-public`, just as in the tutorials in DockerSwarm.rocks.
-
-If you need to use a different Traefik public network name, update it in the `docker-compose.yml` files, in the section:
-
-```YAML
-networks:
-  traefik-public:
-    external: true
-```
-
-Change `traefik-public` to the name of the used Traefik network. And then update it in the file `.env`:
-
-```bash
-TRAEFIK_PUBLIC_NETWORK=traefik-public
-```
-
-### Persisting Docker named volumes
-
-You need to make sure that each service (Docker container) that uses a volume is always deployed to the same Docker "node" in the cluster, that way it will preserve the data. Otherwise, it could be deployed to a different node each time, and each time the volume would be created in that new node before starting the service. As a result, it would look like your service was starting from scratch every time, losing all the previous data.
-
-That's specially important for a service running a database. But the same problem would apply if you were saving files in your main backend service (for example, if those files were uploaded by your users, or if they were created by your system).
-
-To solve that, you can put constraints in the services that use one or more data volumes (like databases) to make them be deployed to a Docker node with a specific label. And of course, you need to have that label assigned to one (only one) of your nodes.
-
-#### Adding services with volumes
-
-For each service that uses a volume (databases, services with uploaded files, etc) you should have a label constraint in your `docker-compose.yml` file.
-
-To make sure that your labels are unique per volume per stack (for example, that they are not the same for `prod` and `stag`) you should prefix them with the name of your stack and then use the same name of the volume.
-
-Then you need to have those constraints in your `docker-compose.yml` file for the services that need to be fixed with each volume.
-
-To be able to use different environments, like `prod` and `stag`, you should pass the name of the stack as an environment variable. Like:
-
-```bash
-STACK_NAME=stag-grid-openmined-org sh ./scripts/deploy.sh
-```
-
-To use and expand that environment variable inside the `docker-compose.yml` files you can add the constraints to the services like:
-
-```yaml
-version: "3"
-services:
-  db:
-    volumes:
-      - "app-db-data:/var/lib/postgresql/data/pgdata"
-    deploy:
-      placement:
-        constraints:
-          - node.labels.${STACK_NAME?Variable not set}.app-db-data == true
-```
-
-note the `${STACK_NAME?Variable not set}`. In the script `./scripts/deploy.sh`, the `docker-compose.yml` would be converted, and saved to a file `docker-stack.yml` containing:
-
-```yaml
-version: "3"
-services:
-  db:
-    volumes:
-      - "app-db-data:/var/lib/postgresql/data/pgdata"
-    deploy:
-      placement:
-        constraints:
-          - node.labels.grid-openmined-org.app-db-data == true
-```
-
-**Note**: The `${STACK_NAME?Variable not set}` means "use the environment variable `STACK_NAME`, but if it is not set, show an error `Variable not set`".
-
-If you add more volumes to your stack, you need to make sure you add the corresponding constraints to the services that use that named volume.
-
-Then you have to create those labels in some nodes in your Docker Swarm mode cluster. You can use `docker-auto-labels` to do it automatically.
-
-#### `docker-auto-labels`
-
-You can use [`docker-auto-labels`](https://github.com/tiangolo/docker-auto-labels) to automatically read the placement constraint labels in your Docker stack (Docker Compose file) and assign them to a random Docker node in your Swarm mode cluster if those labels don't exist yet.
-
-To do that, you can install `docker-auto-labels`:
-
-```bash
-pip install docker-auto-labels
-```
-
-And then run it passing your `docker-stack.yml` file as a parameter:
-
-```bash
-docker-auto-labels docker-stack.yml
-```
-
-You can run that command every time you deploy, right before deploying, as it doesn't modify anything if the required labels already exist.
-
-#### (Optionally) adding labels manually
-
-If you don't want to use `docker-auto-labels` or for any reason you want to manually assign the constraint labels to specific nodes in your Docker Swarm mode cluster, you can do the following:
-
-- First, connect via SSH to your Docker Swarm mode cluster.
-
-- Then check the available nodes with:
-
-```console
-$ docker node ls
-
-
-// you would see an output like:
-
-ID                            HOSTNAME               STATUS              AVAILABILITY        MANAGER STATUS
-nfa3d4df2df34as2fd34230rm *   dog.example.com        Ready               Active              Reachable
-2c2sd2342asdfasd42342304e     cat.example.com        Ready               Active              Leader
-c4sdf2342asdfasd4234234ii     snake.example.com      Ready               Active              Reachable
-```
-
-then chose a node from the list. For example, `dog.example.com`.
-
-- Add the label to that node. Use as label the name of the stack you are deploying followed by a dot (`.`) followed by the named volume, and as value, just `true`, e.g.:
-
-```bash
-docker node update --label-add grid-openmined-org.app-db-data=true dog.example.com
-```
-
-- Then you need to do the same for each stack version you have. For example, for staging you could do:
-
-```bash
-docker node update --label-add stag-grid-openmined-org.app-db-data=true cat.example.com
-```
-
-### Deploy to a Docker Swarm mode cluster
-
-There are 3 steps:
-
-1. **Build** your app images
-2. Optionally, **push** your custom images to a Docker Registry
-3. **Deploy** your stack
-
----
-
-Here are the steps in detail:
-
-1. **Build your app images**
-
-- Set these environment variables, right before the next command:
-  - `TAG=prod`
-  - `FRONTEND_ENV=production`
-- Use the provided `scripts/build.sh` file with those environment variables:
-
-```bash
-TAG=prod FRONTEND_ENV=production bash ./scripts/build.sh
-```
-
-2. **Optionally, push your images to a Docker Registry**
-
-**Note**: if the deployment Docker Swarm mode "cluster" has more than one server, you will have to push the images to a registry or build the images in each server, so that when each of the servers in your cluster tries to start the containers it can get the Docker images for them, pulling them from a Docker Registry or because it has them already built locally.
-
-If you are using a registry and pushing your images, you can omit running the previous script and instead using this one, in a single shot.
-
-- Set these environment variables:
-  - `TAG=prod`
-  - `FRONTEND_ENV=production`
-- Use the provided `scripts/build-push.sh` file with those environment variables:
-
-```bash
-TAG=prod FRONTEND_ENV=production bash ./scripts/build-push.sh
-```
-
-3. **Deploy your stack**
-
-- Set these environment variables:
-  - `DOMAIN=grid.openmined.org`
-  - `TRAEFIK_TAG=grid.openmined.org`
-  - `STACK_NAME=grid-openmined-org`
-  - `TAG=prod`
-- Use the provided `scripts/deploy.sh` file with those environment variables:
-
-```bash
-DOMAIN=grid.openmined.org \
-TRAEFIK_TAG=grid.openmined.org \
-STACK_NAME=grid-openmined-org \
-TAG=prod \
-bash ./scripts/deploy.sh
-```
-
----
-
-If you change your mind and, for example, want to deploy everything to a different domain, you only have to change the `DOMAIN` environment variable in the previous commands. If you wanted to add a different version / environment of your stack, like "`preproduction`", you would only have to set `TAG=preproduction` in your command and update these other environment variables accordingly. And it would all work, that way you could have different environments and deployments of the same app in the same cluster.
-
-#### Deployment Technical Details
-
-Building and pushing is done with the `docker-compose.yml` file, using the `docker-compose` command. The file `docker-compose.yml` uses the file `.env` with default environment variables. And the scripts set some additional environment variables as well.
-
-The deployment requires using `docker stack` instead of `docker-swarm`, and it can't read environment variables or `.env` files. Because of that, the `deploy.sh` script generates a file `docker-stack.yml` with the configurations from `docker-compose.yml` and injecting the environment variables in it. And then uses it to deploy the stack.
-
-You can do the process by hand based on those same scripts if you wanted. The general structure is like this:
-
-```bash
-# Use the environment variables passed to this script, as TAG and FRONTEND_ENV
-# And re-create those variables as environment variables for the next command
-TAG=${TAG?Variable not set} \
-# Set the environment variable FRONTEND_ENV to the same value passed to this script with
-# a default value of "production" if nothing else was passed
-FRONTEND_ENV=${FRONTEND_ENV-production?Variable not set} \
-# The actual comand that does the work: docker-compose
-docker-compose \
-# Pass the file that should be used, setting explicitly docker-compose.yml avoids the
-# default of also using docker-compose.override.yml
--f docker-compose.yml \
-# Use the docker-compose sub command named "config", it just uses the docker-compose.yml
-# file passed to it and prints their combined contents
-# Put those contents in a file "docker-stack.yml", with ">"
-config > docker-stack.yml
-
-# The previous only generated a docker-stack.yml file,
-# but didn't do anything with it yet
-
-# docker-auto-labels makes sure the labels used for constraints exist in the cluster
-docker-auto-labels docker-stack.yml
-
-# Now this command uses that same file to deploy it
-docker stack deploy -c docker-stack.yml --with-registry-auth "${STACK_NAME?Variable not set}"
-```
-
-### Continuous Integration / Continuous Delivery
-
-If you use GitLab CI, the included `.gitlab-ci.yml` can automatically deploy it. You may need to update it according to your GitLab configurations.
-
-If you use any other CI / CD provider, you can base your deployment from that `.gitlab-ci.yml` file, as all the actual script steps are performed in `bash` scripts that you can easily re-use.
-
-GitLab CI is configured assuming 2 environments following GitLab flow:
-
-- `prod` (production) from the `production` branch.
-- `stag` (staging) from the `master` branch.
-
-If you need to add more environments, for example, you could imagine using a client-approved `preprod` branch, you can just copy the configurations in `.gitlab-ci.yml` for `stag` and rename the corresponding variables. The Docker Compose file and environment variables are configured to support as many environments as you need, so that you only need to modify `.gitlab-ci.yml` (or whichever CI system configuration you are using).
-
-## Docker Compose files and env vars
-
-There is a main `docker-compose.yml` file with all the configurations that apply to the whole stack, it is used automatically by `docker-compose`.
-
-And there's also a `docker-compose.override.yml` with overrides for development, for example to mount the source code as a volume. It is used automatically by `docker-compose` to apply overrides on top of `docker-compose.yml`.
-
-These Docker Compose files use the `.env` file containing configurations to be injected as environment variables in the containers.
-
-They also use some additional configurations taken from environment variables set in the scripts before calling the `docker-compose` command.
-
-It is all designed to support several "stages", like development, building, testing, and deployment. Also, allowing the deployment to different environments like staging and production (and you can add more environments very easily).
-
-They are designed to have the minimum repetition of code and configurations, so that if you need to change something, you have to change it in the minimum amount of places. That's why files use environment variables that get auto-expanded. That way, if for example, you want to use a different domain, you can call the `docker-compose` command with a different `DOMAIN` environment variable instead of having to change the domain in several places inside the Docker Compose files.
-
-Also, if you want to have another deployment environment, say `preprod`, you just have to change environment variables, but you can keep using the same Docker Compose files.
-
-### The .env file
-
-The `.env` file is the one that contains all your configurations, generated keys and passwords, etc.
-
-Depending on your workflow, you could want to exclude it from Git, for example if your project is public. In that case, you would have to make sure to set up a way for your CI tools to obtain it while building or deploying your project.
-
-One way to do it could be to add each environment variable to your CI/CD system, and updating the `docker-compose.yml` file to read that specific env var instead of reading the `.env` file.
-
-## URLs
-
-These are the URLs that will be used and generated by the project.
-
-### Production URLs
-
-Production URLs, from the branch `production`.
-
-Frontend: https://grid.openmined.org
-
-Backend: https://grid.openmined.org/api/
-
-Automatic Interactive Docs (Swagger UI): https://grid.openmined.org/docs
-
-Automatic Alternative Docs (ReDoc): https://grid.openmined.org/redoc
-
-PGAdmin: https://pgadmin.grid.openmined.org
-
-Flower: https://flower.grid.openmined.org
-
-### Staging URLs
-
-Staging URLs, from the branch `master`.
-
-Frontend: https://stag.grid.openmined.org
-
-Backend: https://stag.grid.openmined.org/api/
-
-Automatic Interactive Docs (Swagger UI): https://stag.grid.openmined.org/docs
-
-Automatic Alternative Docs (ReDoc): https://stag.grid.openmined.org/redoc
-
-PGAdmin: https://pgadmin.stag.grid.openmined.org
-
-Flower: https://flower.stag.grid.openmined.org
-
-### Development URLs
-
-Development URLs, for local development.
-
-Frontend: http://localhost
-
-Backend: http://localhost/api/
-
-Automatic Interactive Docs (Swagger UI): https://localhost/docs
-
-Automatic Alternative Docs (ReDoc): https://localhost/redoc
-
-PGAdmin: http://localhost:5050
-
-Flower: http://localhost:5555
-
-Traefik UI: http://localhost:8090
-
-### Development with Docker Toolbox URLs
-
-Development URLs, for local development.
-
-Frontend: http://local.dockertoolbox.tiangolo.com
-
-Backend: http://local.dockertoolbox.tiangolo.com/api/
-
-Automatic Interactive Docs (Swagger UI): https://local.dockertoolbox.tiangolo.com/docs
-
-Automatic Alternative Docs (ReDoc): https://local.dockertoolbox.tiangolo.com/redoc
-
-PGAdmin: http://local.dockertoolbox.tiangolo.com:5050
-
-Flower: http://local.dockertoolbox.tiangolo.com:5555
-
-Traefik UI: http://local.dockertoolbox.tiangolo.com:8090
-
-### Development with a custom IP URLs
-
-Development URLs, for local development.
-
-Frontend: http://dev.grid.openmined.org
-
-Backend: http://dev.grid.openmined.org/api/
-
-Automatic Interactive Docs (Swagger UI): https://dev.grid.openmined.org/docs
-
-Automatic Alternative Docs (ReDoc): https://dev.grid.openmined.org/redoc
-
-PGAdmin: http://dev.grid.openmined.org:5050
-
-Flower: http://dev.grid.openmined.org:5555
-
-Traefik UI: http://dev.grid.openmined.org:8090
-
-### Development in localhost with a custom domain URLs
-
-Development URLs, for local development.
-
-Frontend: http://localhost.tiangolo.com
-
-Backend: http://localhost.tiangolo.com/api/
-
-Automatic Interactive Docs (Swagger UI): https://localhost.tiangolo.com/docs
-
-Automatic Alternative Docs (ReDoc): https://localhost.tiangolo.com/redoc
-
-PGAdmin: http://localhost.tiangolo.com:5050
-
-Flower: http://localhost.tiangolo.com:5555
-
-Traefik UI: http://localhost.tiangolo.com:8090
-
-## Project generation and updating, or re-generating
-
-This project was generated using https://github.com/tiangolo/full-stack-fastapi-postgresql with:
-
-```bash
-pip install cookiecutter
-cookiecutter https://github.com/tiangolo/full-stack-fastapi-postgresql
-```
-
-You can check the variables used during generation in the file `cookiecutter-config-file.yml`.
-
-You can generate the project again with the same configurations used the first time.
-
-That would be useful if, for example, the project generator (`tiangolo/full-stack-fastapi-postgresql`) was updated and you wanted to integrate or review the changes.
-
-You could generate a new project with the same configurations as this one in a parallel directory. And compare the differences between the two, without having to overwrite your current code but being able to use the same variables used for your current project.
-
-To achieve that, the generated project includes the file `cookiecutter-config-file.yml` with the current variables used.
-
-You can use that file while generating a new project to reuse all those variables.
-
-For example, run:
-
-```console
-$ cookiecutter --config-file ./cookiecutter-config-file.yml --output-dir ../project-copy https://github.com/tiangolo/full-stack-fastapi-postgresql
-```
-
-That will use the file `cookiecutter-config-file.yml` in the current directory (in this project) to generate a new project inside a sibling directory `project-copy`.
diff --git a/packages/grid/VERSION b/packages/grid/VERSION
index 7e08b6f2c0c..368606c3066 100644
--- a/packages/grid/VERSION
+++ b/packages/grid/VERSION
@@ -1,5 +1,5 @@
 # Mono Repo Global Version
-__version__ = "0.8.6-beta.1"
+__version__ = "0.9.6-beta.6"
 # elsewhere we can call this file: `python VERSION` and simply take the stdout
 
 # stdlib
diff --git a/packages/grid/Vagrantfile b/packages/grid/Vagrantfile
deleted file mode 100644
index 04ab6f7a212..00000000000
--- a/packages/grid/Vagrantfile
+++ /dev/null
@@ -1,57 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-  config.ssh.insert_key = true
-  config.vm.provision :ansible do |ansible|
-    ansible.extra_vars = { vagrant: true}
-    ansible.raw_arguments = Shellwords.shellsplit(ENV['ANSIBLE_ARGS']) if ENV['ANSIBLE_ARGS']
-    ansible.compatibility_mode = "auto"
-    ansible.playbook = "./ansible/site.yml"
-    ansible.groups = {
-      "domain" => [
-        "ubuntu",
-      ],
-    }
-  end
-
-  config.vm.define "ubuntu-22-04-arm64" do |node|
-    node.vm.box = "bento/ubuntu-22.04-arm64"
-
-    node.vm.box_check_update = false
-
-    node.vm.hostname = "ubuntu-22-04-arm64.openmined.grid"
-    node.vm.network :private_network, ip: "192.168.56.2"
-
-    node.vm.synced_folder "../../", "/home/om/PySyft",
-      mount_options: ["dmode=775,fmode=774"]
-
-    node.vm.provider "parallels" do |vb, override|
-      vb.memory = "8096"
-      vb.cpus = "4"
-      vb.name = "ubuntu-22-04-arm64"
-      override.vm.synced_folder "../../", "/home/om/PySyft", owner: "vagrant", group: "vagrant", create: true, mount_options: [ "share" ]
-    end
-  end
-
-  config.vm.define "ubuntu-22-04-x86" do |node|
-    node.vm.box = "bento/ubuntu-22.04"
-
-    node.vm.box_check_update = false
-
-    node.vm.hostname = "ubuntu-22-04-x86.openmined.grid"
-    node.vm.network :private_network, ip: "192.168.56.2"
-
-    node.vm.synced_folder "../../", "/home/om/PySyft",
-      mount_options: ["dmode=775,fmode=774"]
-
-    node.vm.provider "virtualbox" do |vb|
-      vb.memory = "4096"
-      vb.cpus = "2"
-      vb.name = "ubuntu-22-04-x86"
-    end
-  end
-
-end
diff --git a/packages/grid/ansible.cfg b/packages/grid/ansible.cfg
deleted file mode 100644
index 007cefdba32..00000000000
--- a/packages/grid/ansible.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[defaults]
-# allow_world_readable_tmpfiles=true
-pipelining = True
-host_key_checking = false
-interpreter_python = auto
diff --git a/packages/grid/ansible/group_vars/all/vars.yml b/packages/grid/ansible/group_vars/all/vars.yml
deleted file mode 100755
index bebf2f16bb9..00000000000
--- a/packages/grid/ansible/group_vars/all/vars.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-om_user: om
-om_group: om
-om_homedir: "/home/{{ om_user }}"
-syft_dir: "/home/om/PySyft"
-build_dir: "/home/om/build"
-github_repo: OpenMined/PySyft.git
-repo_branch: "dev"
-docker_compose_plugin_dir: ".docker/cli-plugins"
-node_name: node
-node_type: domain
-root_user: root
-root_homedir: "/{{ root_user }}"
-docker_compose_url_x86: https://github.com/docker/compose/releases/download/v2.17.1/docker-compose-linux-x86_64
-docker_compose_url_arm64: https://github.com/docker/compose/releases/download/v2.17.1/docker-compose-linux-aarch64
-tls: "false"
-release: "production"
-cert_store_path: "{{ om_homedir }}/certs"
-upload_tls_key: ""
-upload_tls_cert: ""
-install: "true"
-jupyter: "false"
-docker_tag: "local"
-node_side_type: "high"
-root_user_email: "info@openmined.org"
-root_user_password: "changethis"
diff --git a/packages/grid/ansible/roles/aa_demo/tasks/main.yml b/packages/grid/ansible/roles/aa_demo/tasks/main.yml
deleted file mode 100644
index 81b62321fe8..00000000000
--- a/packages/grid/ansible/roles/aa_demo/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: Install System Packages
-  package:
-    name: "{{ item }}"
-    state: present
-    autoclean: yes
-    update_cache: yes
-  loop:
-    - python3-pip
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Install HAGrid
-  shell: "runuser -l {{ om_user }} -c 'pip install -U hagrid'"
-  become: yes
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Install Syft
-  shell: "runuser -l {{ om_user }} -c 'pip install -U syft'"
-  become: yes
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Create HAGrid PySyft src
-  file:
-    path: "{{ syft_dir }}/.tox/syft.jupyter/lib/python3.8/site-packages/hagrid"
-    state: directory
-    mode: "0775"
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Clone PySyft Repo
-  git:
-    repo: "https://github.com/{{ github_repo }}"
-    dest: "{{ syft_dir }}/.tox/syft.jupyter/lib/python3.8/site-packages/hagrid/PySyft"
-    version: "{{ repo_branch }}"
-    force: yes
-  become_user: "{{ om_user }}"
-  ignore_errors: yes
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Kill Docker Containers
-  shell: "docker rm $(docker ps -qa) --force || true"
-  become: yes
-  ignore_errors: yes
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Start Docker Containers
-  shell:
-    cmd: runuser -l {{ om_user }} -c 'hagrid launch domain to docker:80 --tag=latest'
-  become: yes
-  when: aa_demo is defined and aa_demo == "true"
-
-- name: Kill Docker Containers
-  shell: "docker rm $(docker ps -qa) --force || true"
-  become: yes
-  ignore_errors: yes
-  when: aa_demo is defined and aa_demo == "true"
diff --git a/packages/grid/ansible/roles/containers/handlers/main.yml b/packages/grid/ansible/roles/containers/handlers/main.yml
deleted file mode 100755
index 2965b21f99b..00000000000
--- a/packages/grid/ansible/roles/containers/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart docker
-  service:
-    name: docker
-    state: restarted
-    daemon_reload: yes
diff --git a/packages/grid/ansible/roles/containers/tasks/containers.yml b/packages/grid/ansible/roles/containers/tasks/containers.yml
deleted file mode 100755
index 123caea95b5..00000000000
--- a/packages/grid/ansible/roles/containers/tasks/containers.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-# - name: Build Docker Images
-#   shell: "docker compose build"
-#   args:
-#     chdir: "{{ build_dir }}/packages/grid"
-#   become_user: "{{ om_user }}"
-#   when: vagrant is not defined and install == "true" and docker_tag == "local"
-
-# - name: Build Docker Images
-#   shell: "docker compose build"
-#   args:
-#     chdir: "{{ syft_dir }}/packages/grid"
-#   become_user: "{{ om_user }}"
-#   when: vagrant is defined and install == "true" and docker_tag == "local"
-
-- name: Remove old Docker Images
-  shell: "docker rmi $(docker images -qa -f 'dangling=true') || true"
-  become_user: "{{ om_user }}"
-  ignore_errors: yes
-  when: install == "true" and docker_tag == "local"
-
-# - name: Remove Docker Volumes
-#   shell: "docker volume prune -f"
-#   become_user: "{{ om_user }}"
-#   ignore_errors: yes
-#   when: docker_volume_destroy is defined
-
-- name: Restart Docker Service
-  service:
-    name: docker
-    state: restarted
-    daemon_reload: yes
-
-- name: Start Docker Containers
-  shell:
-    cmd: runuser -l {{ om_user }} -c 'hagrid launch {{ node_name }} {{ node_type }} to docker:80 --release={{ release }} --tag={{ docker_tag }} --set-root-email={{ root_user_email }} --set-root-password={{ root_user_password }}'
-  become: yes
-  when: tls == "false" and install == "true" and node_side_type == "high"
-
-- name: Start Low Side Docker Containers
-  shell:
-    cmd: runuser -l {{ om_user }} -c 'hagrid launch {{ node_name }} {{ node_type }} to docker:80 --release={{ release }} --tag={{ docker_tag }} --low-side --set-root-email={{ root_user_email }} --set-root-password={{ root_user_password }}'
-  become: yes
-  when: tls == "false" and install == "true" and node_side_type == "low"
-
-- name: Start Docker Containers with TLS
-  shell:
-    cmd: runuser -l {{ om_user }} -c 'hagrid launch {{ node_name }} {{ node_type }} to docker:80 --release={{ release }} --tag={{ docker_tag }} --set-root-email={{ root_user_email }} --set-root-password={{ root_user_password }} --tls --cert-store-path={{ cert_store_path }}'
-  become: yes
-  when: tls == "true" and install == "true" and node_side_type == "high"
-
-- name: Start Low Docker Containers with TLS
-  shell:
-    cmd: runuser -l {{ om_user }} -c 'hagrid launch {{ node_name }} {{ node_type }} to docker:80 --release={{ release }} --tag={{ docker_tag }} --low-side --set-root-email={{ root_user_email }} --set-root-password={{ root_user_password }} --tls --cert-store-path={{ cert_store_path }}'
-  become: yes
-  when: tls == "true" and install == "true" and node_side_type == "low"
diff --git a/packages/grid/ansible/roles/containers/tasks/hagrid.yml b/packages/grid/ansible/roles/containers/tasks/hagrid.yml
deleted file mode 100755
index b715e9e6d96..00000000000
--- a/packages/grid/ansible/roles/containers/tasks/hagrid.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Add /home/{{ om_user }}/.local/bin to PATH in .profile
-  ansible.builtin.lineinfile:
-    create: true
-    path: "/home/{{ om_user }}/.profile"
-    line: 'export PATH="$HOME/.local/bin:$PATH"'
-    insertafter: EOF
-    state: present
-  become: yes
-  become_user: om
-  become_method: sudo
-
-- name: Install HAGrid
-  shell: "runuser -l {{ om_user }} -c 'pip install -e {{ build_dir }}/packages/hagrid'"
-  become: yes
-  when: vagrant is not defined and install == "true"
-
-- name: Install HAGrid
-  shell: "runuser -l {{ om_user }} -c 'pip install -e {{ syft_dir }}/packages/hagrid'"
-  become: yes
-  when: vagrant is defined and install == "true"
diff --git a/packages/grid/ansible/roles/containers/tasks/main.yml b/packages/grid/ansible/roles/containers/tasks/main.yml
deleted file mode 100755
index 154d4933d37..00000000000
--- a/packages/grid/ansible/roles/containers/tasks/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- import_tasks: src.yml
-- import_tasks: hagrid.yml
-- import_tasks: tls.yml
-- import_tasks: containers.yml
diff --git a/packages/grid/ansible/roles/containers/tasks/src.yml b/packages/grid/ansible/roles/containers/tasks/src.yml
deleted file mode 100644
index 1096fb4eced..00000000000
--- a/packages/grid/ansible/roles/containers/tasks/src.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Kill Docker Containers
-  shell: "docker rm $(docker ps -qa) --force || true"
-  become_user: "{{ om_user }}"
-  ignore_errors: yes
-
-- name: Check Build Directory
-  stat:
-    path: "{{ build_dir }}"
-    get_checksum: no
-    get_md5: no
-    mime: no
-  register: build_dir_exists
-
-- name: Delete build directory
-  shell: "rm -rf {{ build_dir }} || true"
-  become: yes
-  ignore_errors: yes
-  when: build_dir_exists.stat.exists == True and vagrant is not defined and install == "true"
-
-- name: Copy code checkout to build
-  ansible.builtin.copy:
-    src: "{{ syft_dir }}/"
-    dest: "{{ build_dir }}"
-    force: yes
-    remote_src: yes
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-  become_user: "{{ om_user }}"
-  ignore_errors: yes
-  when: vagrant is not defined and install == "true"
diff --git a/packages/grid/ansible/roles/containers/tasks/tls.yml b/packages/grid/ansible/roles/containers/tasks/tls.yml
deleted file mode 100644
index 10910c9cce9..00000000000
--- a/packages/grid/ansible/roles/containers/tasks/tls.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# these should only be run from your host to the target machine not during localhost
-- name: Create cert_store_path
-  file:
-    path: "{{ cert_store_path }}"
-    state: directory
-    mode: 0600
-    owner: "{{ om_user }}"
-    group: "{{ om_group }}"
-  when: install == "true"
-
-- name: Install key.pem
-  copy:
-    src: "{{ upload_tls_key }}"
-    dest: "{{ cert_store_path }}/key.pem"
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-    mode: 0600
-  when: upload_tls_key != "" and install == "true"
-
-- name: Install cert.pem
-  copy:
-    src: "{{ upload_tls_cert }}"
-    dest: "{{ cert_store_path }}/cert.pem"
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-    mode: 0600
-  when: upload_tls_cert != "" and install == "true"
diff --git a/packages/grid/ansible/roles/jupyter/tasks/main.yml b/packages/grid/ansible/roles/jupyter/tasks/main.yml
deleted file mode 100644
index 929f975b678..00000000000
--- a/packages/grid/ansible/roles/jupyter/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Install Tox
-  shell: "runuser -l {{ om_user }} -c 'pip install -U tox'"
-  become: yes
-  when: vagrant is not defined and jupyter == "true"
-
-- name: Keep Jupyter Notebooks server running
-  ansible.builtin.cron:
-    disabled: "{{ (jupyter == 'true') | ternary('false', 'true') }}"
-    name: "Jupyter Notebooks server"
-    job: "{{ syft_dir }}/packages/grid/scripts/jupyter.sh {{ syft_dir }} {{ om_user }} {{ jupyter_token }}"
-  become: yes
-  when: vagrant is not defined and jupyter == "true"
diff --git a/packages/grid/ansible/roles/network/tasks/main.yml b/packages/grid/ansible/roles/network/tasks/main.yml
deleted file mode 100644
index 81ac22ceff4..00000000000
--- a/packages/grid/ansible/roles/network/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# ---
-# - name: Display all variables/facts known for a host
-#   ansible.builtin.debug:
-#     var: hostvars[inventory_hostname]
-#     verbosity: 1
-
-# - name: Network Stuff
-#   shell: "echo network_stuff"
-#   become_user: "{{ om_user }}"
-#   ignore_errors: yes
-#   when: "'network' in group_names"
-
-# - name: Domain Stuff
-#   shell: "echo domain_stuff"
-#   become_user: "{{ om_user }}"
-#   ignore_errors: yes
-#   when: "'domain' in group_names"
-# ## todo
-# # apt install wireguard
diff --git a/packages/grid/ansible/roles/node/handlers/main.yml b/packages/grid/ansible/roles/node/handlers/main.yml
deleted file mode 100755
index 2965b21f99b..00000000000
--- a/packages/grid/ansible/roles/node/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart docker
-  service:
-    name: docker
-    state: restarted
-    daemon_reload: yes
diff --git a/packages/grid/ansible/roles/node/tasks/docker.yml b/packages/grid/ansible/roles/node/tasks/docker.yml
deleted file mode 100755
index 49aa2c335b4..00000000000
--- a/packages/grid/ansible/roles/node/tasks/docker.yml
+++ /dev/null
@@ -1,105 +0,0 @@
----
-- name: Install Docker GPG Key
-  apt_key:
-    id: 7EA0A9C3F273FCD8
-    url: https://download.docker.com/linux/ubuntu/gpg
-    state: present
-
-- name: Install Docker Repo
-  apt_repository:
-    repo: deb https://download.docker.com/linux/ubuntu focal stable
-    state: present
-
-- name: Install Docker Packages
-  package:
-    name: "{{ item }}"
-    state: present
-    update_cache: yes
-    autoclean: yes
-  loop:
-    - apt-transport-https
-    - ca-certificates
-    - curl
-    - gnupg
-    - lsb-release
-    - docker-ce
-    - docker-ce-cli
-    - containerd.io
-
-- name: Get the system architecture
-  ansible.builtin.setup:
-    gather_subset: hardware
-  register: system_info
-
-- name: Set docker compose arm64 binary URL
-  set_fact:
-    docker_compose_binary_url: "{{ docker_compose_url_arm64 }}"
-  when: "'aarch64' in system_info['ansible_facts']['ansible_architecture']"
-
-- name: Set docker compose x86 binary URL
-  set_fact:
-    docker_compose_binary_url: "{{ docker_compose_url_x86 }}"
-  when: "'x86' in system_info['ansible_facts']['ansible_architecture']"
-
-- name: Install Docker Compose
-  stat:
-    path: "{{ om_homedir }}/{{ docker_compose_plugin_dir }}"
-    get_checksum: no
-    get_md5: no
-    mime: no
-  register: docker_cli_dir_present
-
-- name: Create Docker Compose Plugin Dir
-  file:
-    path: "{{ om_homedir }}/{{ docker_compose_plugin_dir }}"
-    state: directory
-    mode: "0755"
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-  become: yes
-
-- name: Create Docker Compose Plugin Dir root
-  file:
-    path: "{{ root_homedir }}/{{ docker_compose_plugin_dir }}"
-    state: directory
-    mode: "0770"
-    owner: "{{ root_user }}"
-    group: "{{ root_user }}"
-  become: yes
-
-- name: Download Docker CLI
-  get_url:
-    url: "{{ docker_compose_binary_url }}"
-    dest: "{{ om_homedir }}/{{ docker_compose_plugin_dir }}/docker-compose"
-  when: docker_cli_dir_present.stat.exists == False
-
-- name: Download Docker CLI root
-  get_url:
-    url: "{{ docker_compose_binary_url }}"
-    dest: "{{ root_homedir }}/{{ docker_compose_plugin_dir }}/docker-compose"
-  become: yes
-  when: docker_cli_dir_present.stat.exists == False
-
-- name: Change permissions
-  file:
-    path: "{{ om_homedir }}/{{ docker_compose_plugin_dir }}/docker-compose"
-    state: touch
-    mode: a+x
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-  become: yes
-
-- name: Change permissions for root
-  file:
-    path: "{{ root_homedir }}/{{ docker_compose_plugin_dir }}/docker-compose"
-    state: touch
-    mode: a+x
-    owner: "{{ root_user }}"
-    group: "{{ root_user }}"
-  become: yes
-
-- name: Ensuring docker service is always running
-  systemd:
-    enabled: yes
-    state: started
-    name: docker
diff --git a/packages/grid/ansible/roles/node/tasks/main.yml b/packages/grid/ansible/roles/node/tasks/main.yml
deleted file mode 100755
index 929d975537c..00000000000
--- a/packages/grid/ansible/roles/node/tasks/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Display all variables/facts known for a host
-  ansible.builtin.debug:
-    var: hostvars[inventory_hostname]
-    verbosity: 0
-
-- name: Stop existing cronjobs if provisioning from outside
-  shell: (crontab -r  || true) && sudo kill -9 $(pgrep cron)
-  become: yes
-  when: inventory_hostname != "localhost"
-
-- name: Stop existing ansible provisioning running against localhost
-  shell: pgrep ansible | xargs -I {} bash -c "ps -o cmd fp {} | grep 'connection=local' | kill {}"
-  become: yes
-  when: inventory_hostname != "localhost"
-
-- name: Apply system changes
-  import_tasks: system.yml
-  when: deploy_only is not defined and install == "true"
-
-- name: Apply security updates
-  import_tasks: security.yml
-  when: deploy_only is not defined and install == "true"
-
-- name: Install docker
-  import_tasks: docker.yml
-  when: deploy_only is not defined and install == "true"
-
-- name: Archive Logs
-  ansible.builtin.cron:
-    disabled: "{{ not install | bool }}"
-    minute: "0"
-    hour: "0"
-    name: "Archive Logs"
-    job: "{{ syft_dir }}/packages/grid/scripts/rotate_logs.sh 2>&1 | logger -t cron"
-  become: yes
-  when: vagrant is not defined
-
-- name: Clone PySyft Repo
-  git:
-    repo: "https://github.com/{{ github_repo }}"
-    dest: "{{ syft_dir }}"
-    version: "{{ repo_branch }}"
-    force: yes
-  become_user: "{{ om_user }}"
-  ignore_errors: yes
-  when: vagrant is not defined
diff --git a/packages/grid/ansible/roles/node/tasks/security.yml b/packages/grid/ansible/roles/node/tasks/security.yml
deleted file mode 100644
index 38fc1ccc655..00000000000
--- a/packages/grid/ansible/roles/node/tasks/security.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-- name: Install Security Updates
-  package:
-    name: "{{ item }}"
-    state: present
-    autoclean: yes
-    update_cache: yes
-  loop:
-    - policykit-1
diff --git a/packages/grid/ansible/roles/node/tasks/system.yml b/packages/grid/ansible/roles/node/tasks/system.yml
deleted file mode 100755
index 91ecbbf1a6b..00000000000
--- a/packages/grid/ansible/roles/node/tasks/system.yml
+++ /dev/null
@@ -1,133 +0,0 @@
----
-# - name: Install security updates
-#   shell: "sudo apt list --upgradable | grep security |cut -d\/ -f1|xargs sudo apt-get install -y --only-upgrade"
-#   become: yes
-#   args:
-#     executable: /bin/bash
-
-- name: Allow sudo without password
-  ansible.builtin.lineinfile:
-    path: /etc/sudoers
-    state: present
-    regexp: "^%sudo"
-    line: "%sudo ALL=(ALL) NOPASSWD: ALL"
-    validate: "visudo -cf %s"
-
-- name: Create docker group
-  group:
-    name: "docker"
-    state: present
-
-- name: Create om group
-  group:
-    name: "{{ om_group }}"
-    state: present
-
-- name: Create om user
-  user:
-    name: "{{ om_user }}"
-    append: yes
-    createhome: yes
-
-- name: Add user to om and docker
-  user:
-    name: "{{ om_user }}"
-    comment: "OpenMined user for running node"
-    groups:
-      - "{{ om_group }}"
-      - sudo
-      - docker
-    shell: /bin/bash
-
-- name: Give permission to home dir
-  file:
-    path: "{{ om_homedir }}"
-    state: directory
-    mode: "0775"
-    owner: "{{ om_user }}"
-    group: "{{ om_group }}"
-    # recurse: yes
-
-- name: Add user to vagrant group
-  user:
-    name: "{{ om_user }}"
-    groups:
-      - vagrant
-    append: yes
-  when: vagrant is defined
-
-- name: Add vagrant user to docker
-  user:
-    name: "vagrant"
-    groups:
-      - vagrant
-      - docker
-      - om
-    append: yes
-  when: vagrant is defined
-
-- name: Install System Packages
-  package:
-    name: "{{ item }}"
-    state: present
-    autoclean: yes
-    update_cache: yes
-  loop:
-    - acl
-    - python3-pip
-    - ntp
-    - tmux
-    - vim
-    - ufw
-    - git
-    - python-is-python3
-    - net-tools
-    - ifupdown
-    - python3-venv
-    - cron
-
-- name: Upgrade pip and some packages
-  pip:
-    name: pip
-    extra_args: --upgrade
-
-- name: Check ctop exists
-  stat:
-    path: /usr/local/bin/ctop
-  register: ctop_exists
-
-- name: Get the system architecture
-  ansible.builtin.setup:
-    gather_subset: hardware
-  register: system_info
-
-- name: Install ctop Docker CLI Utility - arm64
-  shell: sudo wget https://github.com/bcicen/ctop/releases/download/v0.7.7/ctop-0.7.7-linux-arm64 -O /usr/local/bin/ctop && sudo chmod +x /usr/local/bin/ctop
-  become: yes
-  when: "not ctop_exists.stat.exists and 'aarch64' in system_info['ansible_facts']['ansible_architecture']"
-
-- name: Install ctop Docker CLI Utility - x86
-  shell: sudo wget https://github.com/bcicen/ctop/releases/download/v0.7.7/ctop-0.7.7-linux-amd64 -O /usr/local/bin/ctop && sudo chmod +x /usr/local/bin/ctop
-  become: yes
-  when: "not ctop_exists.stat.exists and 'x86' in system_info['ansible_facts']['ansible_architecture']"
-
-- name: Ensuring ufw service is always running
-  systemd:
-    enabled: yes
-    state: started
-    name: ufw
-  when: wsl is not defined
-
-- name: Set timezone to UTC
-  timezone:
-    name: UTC
-
-- name: Ensuring ntp service is always running
-  systemd:
-    enabled: yes
-    state: started
-    name: ntp
-
-- name: Set git dir as safe
-  shell: git config --global --add safe.directory {{ syft_dir }}
-  become: yes
diff --git a/packages/grid/ansible/roles/update/tasks/main.yml b/packages/grid/ansible/roles/update/tasks/main.yml
deleted file mode 100644
index 170ca4f26ae..00000000000
--- a/packages/grid/ansible/roles/update/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# cronjob logs: $ tail -f /var/log/syslog | grep -i cron
-# - name: PySyft Repo Update Cron Job
-#   ansible.builtin.cron:
-#     disabled: "{{ not install | bool }}"
-#     minute: "*/2"
-#     name: "Update PySyft Repo"
-#     job: "{{ syft_dir }}/packages/grid/scripts/cron.sh {{ syft_dir }} {{ github_repo }} {{ repo_branch }} {{ om_user }} {{ om_group }} {{ node_type }} {{ node_name }} {{ build_dir }} {{ tls }} {{ cert_store_path }} {{ release }} {{ docker_tag }} 2>&1 | logger -t cron"
-#   become: yes
-#   when: vagrant is not defined
-
-- name: Keep Containers Running
-  ansible.builtin.cron:
-    disabled: "{{ not install | bool }}"
-    name: "Restart Containers"
-    job: "{{ syft_dir }}/packages/grid/scripts/containers.sh"
-  become: yes
-  when: vagrant is not defined
-
-- name:
-  copy:
-    src: "../../../../scripts/"
-    dest: "{{ cert_store_path }}/cert.pem"
-    owner: "{{ om_user }}"
-    group: "{{ om_user }}"
-    mode: 0600
-  when: upload_tls_cert != "" and install == "true"
-
-- name: Run containers.sh on startup
-  ansible.builtin.template:
-    src: "../../../../scripts/rc.local.j2"
-    dest: /etc/rc.local
-    owner: root
-    group: root
-    mode: "0655"
-  become: yes
-  when: vagrant is not defined
diff --git a/packages/grid/ansible/site.yml b/packages/grid/ansible/site.yml
deleted file mode 100755
index f0c14773983..00000000000
--- a/packages/grid/ansible/site.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- hosts: all
-  gather_facts: False
-  environment:
-    LC_ALL: en_US.UTF-8
-  become: yes
-  roles:
-    - node
-    - jupyter
-    - containers
-    - update
-    - aa_demo
diff --git a/packages/grid/backend/backend.dockerfile b/packages/grid/backend/backend.dockerfile
index 1520190f0e1..f612d690958 100644
--- a/packages/grid/backend/backend.dockerfile
+++ b/packages/grid/backend/backend.dockerfile
@@ -1,119 +1,93 @@
 ARG PYTHON_VERSION="3.12"
-ARG TZ="Etc/UTC"
+ARG UV_VERSION="0.2.13-r0"
+ARG TORCH_VERSION="2.2.2"
 
-# change to USER="syftuser", UID=1000 and HOME="/home/$USER" for rootless
-ARG USER="root"
-ARG UID=0
-ARG USER_GRP=$USER:$USER
-ARG HOME="/root"
-ARG APPDIR="$HOME/app"
+# wolfi-os pkg definition links
+# https://github.com/wolfi-dev/os/blob/main/python-3.12.yaml
+# https://github.com/wolfi-dev/os/blob/main/py3-pip.yaml
+# https://github.com/wolfi-dev/os/blob/main/uv.yaml
 
 # ==================== [BUILD STEP] Python Dev Base ==================== #
 
-FROM cgr.dev/chainguard/wolfi-base as python_dev
+FROM cgr.dev/chainguard/wolfi-base AS syft_deps
 
 ARG PYTHON_VERSION
-ARG TZ
-ARG USER
-ARG UID
+ARG UV_VERSION
+ARG TORCH_VERSION
 
 # Setup Python DEV
-RUN --mount=type=cache,target=/var/cache/apk,sharing=locked \
-    apk update && \
-    apk upgrade && \
-    apk add build-base gcc tzdata python-$PYTHON_VERSION-dev-default py$PYTHON_VERSION-pip && \
-    ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
-# uncomment for creating rootless user
-# && adduser -D -u $UID $USER
-
-# ==================== [BUILD STEP] Install Syft Dependency ==================== #
-
-FROM python_dev as syft_deps
-
-ARG APPDIR
-ARG HOME
-ARG UID
-ARG USER
-ARG USER_GRP
-
-USER $USER
-WORKDIR $APPDIR
-ENV PATH=$PATH:$HOME/.local/bin
-
-# copy skeleton to do package install
-COPY --chown=$USER_GRP \
-    syft/setup.py \
-    syft/setup.cfg \
-    syft/pyproject.toml \
-    syft/MANIFEST.in \
-    syft/
-
-COPY --chown=$USER_GRP \
-    syft/src/syft/VERSION \
-    syft/src/syft/capnp \
-    syft/src/syft/
-
-# Install all dependencies together here to avoid any version conflicts across pkgs
-RUN --mount=type=cache,id=pip-$UID,target=$HOME/.cache/pip,uid=$UID,gid=$UID,sharing=locked \
-    pip install --user --default-timeout=300 torch==2.2.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html && \
-    pip install --user pip-autoremove jupyterlab -e ./syft[data_science] && \
-    pip-autoremove ansible ansible-core -y
+RUN apk update && apk upgrade && \
+    apk add build-base gcc python-$PYTHON_VERSION-dev uv=$UV_VERSION && \
+    # preemptive fix for wolfi-os breaking python entrypoint
+    (test -f /usr/bin/python || ln -s /usr/bin/python3.12 /usr/bin/python)
+
+WORKDIR /root/app
+
+ENV UV_HTTP_TIMEOUT=600
+
+# keep static deps separate to have each layer cached independently
+# if amd64 then we need to append +cpu to the torch version
+# uv issues: https://github.com/astral-sh/uv/issues/3437 & https://github.com/astral-sh/uv/issues/2541
+RUN --mount=type=cache,target=/root/.cache,sharing=locked \
+    uv venv && \
+    ARCH=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \
+    if [[ "$ARCH" = "amd64" ]]; then TORCH_VERSION="$TORCH_VERSION+cpu"; fi && \
+    uv pip install torch==$TORCH_VERSION --index-url https://download.pytorch.org/whl/cpu
+
+COPY syft/setup.py syft/setup.cfg syft/pyproject.toml ./syft/
+
+COPY syft/src/syft/VERSION ./syft/src/syft/
+
+RUN --mount=type=cache,target=/root/.cache,sharing=locked \
+    # remove torch because we already have the cpu version pre-installed
+    sed --in-place /torch==/d ./syft/setup.cfg && \
+    uv pip install -e ./syft[data_science,telemetry]
 
 # ==================== [Final] Setup Syft Server ==================== #
 
-FROM cgr.dev/chainguard/wolfi-base as backend
+FROM cgr.dev/chainguard/wolfi-base AS backend
 
-# inherit from global
-ARG APPDIR
-ARG HOME
 ARG PYTHON_VERSION
-ARG TZ
-ARG USER
-ARG USER_GRP
-
-# Setup Python
-RUN --mount=type=cache,target=/var/cache/apk,sharing=locked \
-    apk update && \
-    apk upgrade && \
-    apk add tzdata git bash python-$PYTHON_VERSION-default py$PYTHON_VERSION-pip && \
-    ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
-    # Uncomment for rootless user
-    # adduser -D -u 1000 $USER && \
-    mkdir -p /var/log/pygrid $HOME/data/creds $HOME/data/db $HOME/.cache $HOME/.local
-# chown -R $USER_GRP /var/log/pygrid $HOME/
-
-USER $USER
-WORKDIR $APPDIR
+ARG UV_VERSION
+
+RUN apk update && apk upgrade && \
+    apk add --no-cache git bash python-$PYTHON_VERSION py$PYTHON_VERSION-pip uv=$UV_VERSION && \
+    # preemptive fix for wolfi-os breaking python entrypoint
+    (test -f /usr/bin/python || ln -s /usr/bin/python3.12 /usr/bin/python)
+
+WORKDIR /root/app/
+
+# Copy pre-built syft dependencies
+COPY --from=syft_deps /root/app/.venv .venv
+
+# copy server
+COPY grid/backend/grid ./grid/
+
+# copy syft
+COPY syft ./syft/
 
 # Update environment variables
-ENV PATH=$PATH:$HOME/.local/bin \
-    PYTHONPATH=$APPDIR \
-    APPDIR=$APPDIR \
-    NODE_NAME="default_node_name" \
-    NODE_TYPE="domain" \
-    SERVICE_NAME="backend" \
+ENV \
+    # "activate" venv
+    PATH="/root/app/.venv/bin/:$PATH" \
+    VIRTUAL_ENV="/root/app/.venv" \
+    # Syft
+    APPDIR="/root/app" \
+    SERVER_NAME="default_server_name" \
+    SERVER_TYPE="datasite" \
+    SERVER_SIDE_TYPE="high" \
     RELEASE="production" \
     DEV_MODE="False" \
     DEBUGGER_ENABLED="False" \
+    TRACING="False" \
     CONTAINER_HOST="docker" \
-    OBLV_ENABLED="False" \
-    OBLV_LOCALHOST_PORT=3030 \
     DEFAULT_ROOT_EMAIL="info@openmined.org" \
     DEFAULT_ROOT_PASSWORD="changethis" \
     STACK_API_KEY="changeme" \
-    MONGO_HOST="localhost" \
-    MONGO_PORT="27017" \
-    MONGO_USERNAME="root" \
-    MONGO_PASSWORD="example" \
-    CREDENTIALS_PATH="$HOME/data/creds/credentials.json"
-
-# Copy pre-built jupyterlab, syft dependencies
-COPY --chown=$USER_GRP --from=syft_deps $HOME/.local $HOME/.local
-
-# copy grid
-COPY --chown=$USER_GRP grid/backend/grid grid/backend/worker_cpu.dockerfile ./grid/
-
-# copy syft
-COPY --chown=$USER_GRP syft/ ./syft/
+    POSTGRESQL_DBNAME="syftdb_postgres" \
+    POSTGRESQL_HOST="localhost" \
+    POSTGRESQL_PORT="5432" \
+    POSTGRESQL_USERNAME="syft_postgres" \
+    POSTGRESQL_PASSWORD="example"
 
 CMD ["bash", "./grid/start.sh"]
diff --git a/packages/grid/backend/backend.dockerfile.dockerignore b/packages/grid/backend/backend.dockerfile.dockerignore
new file mode 100644
index 00000000000..2c06567a214
--- /dev/null
+++ b/packages/grid/backend/backend.dockerfile.dockerignore
@@ -0,0 +1,63 @@
+# Paths should be against the docker root context dir i.e. /packages
+
+# Syft
+**/tests/
+**/*.md
+
+# Byte-compiled / optimized / DLL files
+**/__pycache__/
+**/*.py[cod]
+**/*$py.class
+
+# Distribution / packaging
+**/.Python
+**/build/
+**/develop-eggs/
+**/dist/
+**/downloads/
+**/eggs/
+**/.eggs/
+**/lib/
+**/lib64/
+**/parts/
+**/sdist/
+**/var/
+**/wheels/
+**/share/python-wheels/
+**/*.egg-info/
+**/.installed.cfg
+**/*.egg
+**/MANIFEST
+
+# Jupyter Notebook
+**/.ipynb_checkpoints
+
+# Environments
+**/.env
+**/.venv
+**/env/
+**/venv/
+**/ENV/
+**/env.bak/
+**/venv.bak/
+
+# Unit test / coverage reports
+**/htmlcov/
+**/.tox/
+**/.nox/
+**/.coverage
+**/.coverage.*
+**/.cache
+**/nosetests.xml
+**/coverage.xml
+**/*.cover
+**/*.py,cover
+**/.hypothesis/
+**/.pytest_cache/
+**/cover/
+
+# vim
+**/*.swp
+
+# macOS
+**/.DS_Store
diff --git a/packages/grid/backend/build_tensorstore.dockerfile b/packages/grid/backend/build_tensorstore.dockerfile
deleted file mode 100644
index f580916774d..00000000000
--- a/packages/grid/backend/build_tensorstore.dockerfile
+++ /dev/null
@@ -1,42 +0,0 @@
-FROM python:3.12-slim as build
-RUN apt-get -y update --allow-insecure-repositories
-RUN apt-get -y upgrade
-RUN apt-get -y dist-upgrade
-RUN apt-get -y install git wget gcc g++ curl make sudo
-RUN git clone https://github.com/google/tensorstore /tf_store
-WORKDIR /tf_store
-RUN git checkout tags/v0.1.25 -b v0.1.25
-RUN python -m pip install -U pip setuptools wheel
-RUN export BAZEL_VERSION=5.1.0 && wget -O bazel "https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-linux-arm64" && chmod +x bazel
-RUN mv bazelisk-linux-arm64 bazel
-RUN chmod +x bazel
-RUN export PATH=`pwd`:$PATH
-RUN pip install -r third_party/pypa/cibuildwheel_requirements_frozen.txt
-RUN cp .bazelrc /root/ci_bazelrc
-RUN apt-get install libavif-dev
-
-# use bazel 5.1
-
-# ignore bazelisk because 5.0 doesnt work on aarch64
-
-# replace perl third party rules with arm64 version
-# def repo():
-#     maybe(
-#         third_party_http_archive,
-#         name = "rules_perl",
-#         urls = [
-#             "https://github.com/bazelbuild/rules_perl/archive/022b8daf2bb4836ac7a50e4a1d8ea056a3e1e403.tar.gz",
-#         ],
-#         sha256 = "7d4e17a5850446388ab74a3d884d80731d45931aa6ac93edb9efbd500628fdcb",
-#         strip_prefix = "rules_perl-022b8daf2bb4836ac7a50e4a1d8ea056a3e1e403",
-#     )
-
-# change setup.py to use bazel (not bazelisk python)
-
-# manually patch version string in setup.py
-# version="0.1.25"
-# comment out use_scm_version
-
-# run
-# export TENSORSTORE_SYSTEM_LIBS=org_aomedia_avif
-# python -m pip wheel ./ --wheel-dir=./ --no-deps -v
diff --git a/packages/grid/backend/build_tf_compression.dockerfile b/packages/grid/backend/build_tf_compression.dockerfile
deleted file mode 100644
index 50d374a633c..00000000000
--- a/packages/grid/backend/build_tf_compression.dockerfile
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM python:3.12-slim as build
-RUN apt-get -y update --allow-insecure-repositories
-RUN apt-get -y upgrade
-RUN apt-get -y dist-upgrade
-RUN apt-get -y install git
-RUN git clone https://github.com/tensorflow/compression.git /tensorflow_compression
-WORKDIR /tensorflow_compression
-RUN git checkout tags/v2.10.0 -b v2.10.0
-RUN apt-get -y install wget
-RUN wget https://raw.githubusercontent.com/OpenMined/PySyft/dev/packages/grid/backend/wheels/dm-tree-0.1.7.tar.gz
-RUN tar -xf dm-tree-0.1.7.tar.gz --strip-components=6
-RUN mv site-packages/* /usr/local/lib/python3.11/site-packages
-RUN python -m pip install -U pip setuptools wheel
-RUN python -m pip install scipy --only-binary=:all:
-RUN python -m pip install tensorflow-probability~=0.15
-RUN python -m pip install tensorflow~=2.10.0
-RUN wget https://github.com/bazelbuild/bazel/releases/download/5.3.1/bazel-5.3.1-linux-arm64
-RUN mv bazel-5.3.1-linux-arm64 bazel
-RUN chmod +x bazel
-RUN export PATH=`pwd`:$PATH
-RUN apt-get -y install gcc g++
-RUN ./bazel build -c opt :build_pip_pkg --verbose_failures
-RUN python build_pip_pkg.py bazel-bin/build_pip_pkg.runfiles/tensorflow_compression /tmp/tensorflow_compression 2.10.0
diff --git a/packages/grid/backend/grid/api/new/new.py b/packages/grid/backend/grid/api/new/new.py
index 2f0a6910bdf..3ff61bb041b 100644
--- a/packages/grid/backend/grid/api/new/new.py
+++ b/packages/grid/backend/grid/api/new/new.py
@@ -1,7 +1,7 @@
 # syft absolute
-from syft.node.routes import make_routes
+from syft.server.routes import make_routes
 
-# grid absolute
-from grid.core.node import worker
+# server absolute
+from grid.core.server import worker
 
 router = make_routes(worker=worker)
diff --git a/packages/grid/backend/grid/api/router.py b/packages/grid/backend/grid/api/router.py
index 020491323a6..8412869db53 100644
--- a/packages/grid/backend/grid/api/router.py
+++ b/packages/grid/backend/grid/api/router.py
@@ -7,7 +7,7 @@
 # third party
 from fastapi import APIRouter
 
-# grid absolute
+# server absolute
 from grid.api.new.new import router as new_router
 
 api_router = APIRouter()
diff --git a/packages/grid/backend/grid/bootstrap.py b/packages/grid/backend/grid/bootstrap.py
index 84fedc36fdf..3da7eb9b600 100644
--- a/packages/grid/backend/grid/bootstrap.py
+++ b/packages/grid/backend/grid/bootstrap.py
@@ -9,7 +9,7 @@
 from nacl.encoding import HexEncoder
 from nacl.signing import SigningKey
 
-# we want to bootstrap nodes with persistent uids and keys and allow a variety of ways
+# we want to bootstrap servers with persistent uids and keys and allow a variety of ways
 # to resolve these at startup
 
 # first we check the environment variables
@@ -26,10 +26,9 @@ def get_env(key: str, default: str = "") -> str | None:
     return None
 
 
-DEFAULT_CREDENTIALS_PATH = os.path.expandvars("$HOME/data/creds/credentials.json")
-CREDENTIALS_PATH = str(get_env("CREDENTIALS_PATH", DEFAULT_CREDENTIALS_PATH))
-NODE_PRIVATE_KEY = "NODE_PRIVATE_KEY"
-NODE_UID = "NODE_UID"
+CREDENTIALS_PATH = str(get_env("CREDENTIALS_PATH", "credentials.json"))
+SERVER_PRIVATE_KEY = "SERVER_PRIVATE_KEY"
+SERVER_UID = "SERVER_UID"
 
 
 def get_credentials_file() -> dict[str, str]:
@@ -65,7 +64,7 @@ def save_credential(key: str, value: str) -> str:
     return value
 
 
-def generate_node_uid() -> str:
+def generate_server_uid() -> str:
     return str(uuid.uuid4())
 
 
@@ -78,11 +77,11 @@ def generate_private_key() -> str:
 
 
 def get_private_key_env() -> str | None:
-    return get_env(NODE_PRIVATE_KEY)
+    return get_env(SERVER_PRIVATE_KEY)
 
 
-def get_node_uid_env() -> str | None:
-    return get_env(NODE_UID)
+def get_server_uid_env() -> str | None:
+    return get_env(SERVER_UID)
 
 
 def validate_private_key(private_key: str | bytes) -> str:
@@ -96,17 +95,17 @@ def validate_private_key(private_key: str | bytes) -> str:
             return str_key
     except Exception:
         pass
-    raise Exception(f"{NODE_PRIVATE_KEY} is invalid")
+    raise Exception(f"{SERVER_PRIVATE_KEY} is invalid")
 
 
-def validate_uid(node_uid: str) -> str:
+def validate_uid(server_uid: str) -> str:
     try:
-        uid = uuid.UUID(node_uid)
-        if node_uid == str(uid):
+        uid = uuid.UUID(server_uid)
+        if server_uid == uid.hex or server_uid == str(uid):
             return str(uid)
     except Exception:
         pass
-    raise Exception(f"{NODE_PRIVATE_KEY} is invalid")
+    raise Exception(f"{SERVER_UID} is invalid")
 
 
 def get_credential(
@@ -122,9 +121,9 @@ def get_credential(
 
     # supplying a different key means something has gone wrong so raise Exception
     if (
-        file_credential != env_credential
-        and file_credential is not None
+        file_credential is not None
         and env_credential is not None
+        and validation_func(file_credential) != validation_func(env_credential)
     ):
         raise Exception(f"{key} from ENV must match {key} in {CREDENTIALS_PATH}")
 
@@ -141,11 +140,13 @@ def get_credential(
 
 
 def get_private_key() -> str:
-    return get_credential(NODE_PRIVATE_KEY, validate_private_key, generate_private_key)
+    return get_credential(
+        SERVER_PRIVATE_KEY, validate_private_key, generate_private_key
+    )
 
 
-def get_node_uid() -> str:
-    return get_credential(NODE_UID, validate_uid, generate_node_uid)
+def get_server_uid() -> str:
+    return get_credential(SERVER_UID, validate_uid, generate_server_uid)
 
 
 def delete_credential_file() -> None:
@@ -169,15 +170,15 @@ def delete_credential_file() -> None:
         if args.private_key:
             print(get_private_key())
         elif args.uid:
-            print(get_node_uid())
+            print(get_server_uid())
     elif args.file:
         delete_credential_file()
         get_private_key()
-        get_node_uid()
+        get_server_uid()
         print(f"Generated credentials file at '{CREDENTIALS_PATH}'")
     elif args.debug:
         print("Credentials File", get_credentials_file())
-        print(NODE_PRIVATE_KEY, "=", get_private_key_env())
-        print(NODE_UID, "=", get_node_uid_env())
+        print(SERVER_PRIVATE_KEY, "=", get_private_key_env())
+        print(SERVER_UID, "=", get_server_uid_env())
     else:
         parser.print_help()
diff --git a/packages/grid/backend/grid/core/celery_config.py b/packages/grid/backend/grid/core/celery_config.py
deleted file mode 100644
index 20fa3f55544..00000000000
--- a/packages/grid/backend/grid/core/celery_config.py
+++ /dev/null
@@ -1,13 +0,0 @@
-worker_send_task_event = False
-task_ignore_result = True
-task_time_limit = 1500  # Rasswanth: should modify after optimizing PC
-task_acks_late = True
-broker_pool_limit = 500
-worker_prefetch_multiplier = 1
-task_routes = {
-    "grid.worker.msg_without_reply": "main-queue",
-    "delivery_mode": "transient",
-}
-accept_content = ["application/syft"]
-task_serializer = "syft"
-result_serializer = "syft"
diff --git a/packages/grid/backend/grid/core/celery_serde.py b/packages/grid/backend/grid/core/celery_serde.py
deleted file mode 100644
index 005e4a4f514..00000000000
--- a/packages/grid/backend/grid/core/celery_serde.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# stdlib
-from typing import Any
-
-# third party
-from kombu import serialization
-
-# syft absolute
-import syft as sy
-from syft.logger import error
-
-
-def loads(data: bytes) -> Any:
-    # original payload might have nested bytes in the args
-    org_payload = sy.deserialize(data, from_bytes=True)
-    # original payload is found at org_payload[0][0]
-    if (
-        len(org_payload) > 0
-        and len(org_payload[0]) > 0
-        and isinstance(org_payload[0][0], bytes)
-    ):
-        try:
-            nested_data = org_payload[0][0]
-            org_obj = sy.deserialize(nested_data, from_bytes=True)
-            org_payload[0][0] = org_obj
-        except Exception as e:
-            error(f"Unable to deserialize nested payload. {e}")
-            raise e
-
-    return org_payload
-
-
-def dumps(obj: Any) -> bytes:
-    # this is usually a Tuple of args where the first one is what we send to the task
-    # but it can also get other arbitrary data which we need to serde
-    # since we might get bytes directly from the web endpoint we can avoid double
-    # unserializing it by keeping it inside the nested args list org_payload[0][0]
-    return sy.serialize(obj, to_bytes=True)
-
-
-serialization.register(
-    "syft",
-    dumps,
-    loads,
-    content_type="application/syft",
-    content_encoding="binary",
-)
diff --git a/packages/grid/backend/grid/core/config.py b/packages/grid/backend/grid/core/config.py
index a4d6642ae38..e92d6783ae7 100644
--- a/packages/grid/backend/grid/core/config.py
+++ b/packages/grid/backend/grid/core/config.py
@@ -97,13 +97,13 @@ def get_emails_enabled(self) -> Self:
     DEFAULT_ROOT_PASSWORD: str = "changethis"
     USERS_OPEN_REGISTRATION: bool = False
 
-    NODE_NAME: str = "default_node_name"
+    SERVER_NAME: str = "default_server_name"
     STREAM_QUEUE: bool = False
-    NODE_TYPE: str = "domain"
+    SERVER_TYPE: str = "datasite"
 
     OPEN_REGISTRATION: bool = True
 
-    # DOMAIN_ASSOCIATION_REQUESTS_AUTOMATICALLY_ACCEPTED: bool = True
+    # DATASITE_ASSOCIATION_REQUESTS_AUTOMATICALLY_ACCEPTED: bool = True
     USE_BLOB_STORAGE: bool = (
         True if os.getenv("USE_BLOB_STORAGE", "false").lower() == "true" else False
     )
@@ -124,12 +124,13 @@ def get_emails_enabled(self) -> Self:
     # STORE_DB_ID: int = int(os.getenv("STORE_DB_ID", 0))
     # LEDGER_DB_ID: int = int(os.getenv("LEDGER_DB_ID", 1))
     # NETWORK_CHECK_INTERVAL: int = int(os.getenv("NETWORK_CHECK_INTERVAL", 60))
-    # DOMAIN_CHECK_INTERVAL: int = int(os.getenv("DOMAIN_CHECK_INTERVAL", 60))
+    # DATASITE_CHECK_INTERVAL: int = int(os.getenv("DATASITE_CHECK_INTERVAL", 60))
     CONTAINER_HOST: str = str(os.getenv("CONTAINER_HOST", "docker"))
-    MONGO_HOST: str = str(os.getenv("MONGO_HOST", ""))
-    MONGO_PORT: int = int(os.getenv("MONGO_PORT", 27017))
-    MONGO_USERNAME: str = str(os.getenv("MONGO_USERNAME", ""))
-    MONGO_PASSWORD: str = str(os.getenv("MONGO_PASSWORD", ""))
+    POSTGRESQL_DBNAME: str = str(os.getenv("POSTGRESQL_DBNAME", ""))
+    POSTGRESQL_HOST: str = str(os.getenv("POSTGRESQL_HOST", ""))
+    POSTGRESQL_PORT: int = int(os.getenv("POSTGRESQL_PORT", 5432))
+    POSTGRESQL_USERNAME: str = str(os.getenv("POSTGRESQL_USERNAME", ""))
+    POSTGRESQL_PASSWORD: str = str(os.getenv("POSTGRESQL_PASSWORD", ""))
     DEV_MODE: bool = True if os.getenv("DEV_MODE", "false").lower() == "true" else False
     # ZMQ stuff
     QUEUE_PORT: int = int(os.getenv("QUEUE_PORT", 5556))
@@ -137,7 +138,7 @@ def get_emails_enabled(self) -> Self:
         True if os.getenv("CREATE_PRODUCER", "false").lower() == "true" else False
     )
     N_CONSUMERS: int = int(os.getenv("N_CONSUMERS", 1))
-    SQLITE_PATH: str = os.path.expandvars("$HOME/data/db/")
+    SQLITE_PATH: str = os.path.expandvars("/tmp/data/db")
     SINGLE_CONTAINER_MODE: bool = str_to_bool(os.getenv("SINGLE_CONTAINER_MODE", False))
     CONSUMER_SERVICE_NAME: str | None = os.getenv("CONSUMER_SERVICE_NAME")
     INMEMORY_WORKERS: bool = str_to_bool(os.getenv("INMEMORY_WORKERS", True))
@@ -152,6 +153,15 @@ def get_emails_enabled(self) -> Self:
         True if os.getenv("TEST_MODE", "false").lower() == "true" else False
     )
     ASSOCIATION_TIMEOUT: int = 10
+    ASSOCIATION_REQUEST_AUTO_APPROVAL: bool = str_to_bool(
+        os.getenv("ASSOCIATION_REQUEST_AUTO_APPROVAL", "False")
+    )
+    MIN_SIZE_BLOB_STORAGE_MB: int = int(os.getenv("MIN_SIZE_BLOB_STORAGE_MB", 1))
+    REVERSE_TUNNEL_ENABLED: bool = str_to_bool(
+        os.getenv("REVERSE_TUNNEL_ENABLED", "false")
+    )
+    TRACING_ENABLED: bool = str_to_bool(os.getenv("TRACING", "False"))
+
     model_config = SettingsConfigDict(case_sensitive=True)
 
 
diff --git a/packages/grid/backend/grid/core/node.py b/packages/grid/backend/grid/core/node.py
deleted file mode 100644
index cad81336407..00000000000
--- a/packages/grid/backend/grid/core/node.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# syft absolute
-from syft.abstract_node import NodeType
-from syft.node.domain import Domain
-from syft.node.domain import Node
-from syft.node.enclave import Enclave
-from syft.node.gateway import Gateway
-from syft.node.node import get_enable_warnings
-from syft.node.node import get_node_name
-from syft.node.node import get_node_side_type
-from syft.node.node import get_node_type
-from syft.node.node import get_node_uid_env
-from syft.service.queue.zmq_queue import ZMQClientConfig
-from syft.service.queue.zmq_queue import ZMQQueueConfig
-from syft.store.blob_storage.seaweedfs import SeaweedFSClientConfig
-from syft.store.blob_storage.seaweedfs import SeaweedFSConfig
-from syft.store.mongo_client import MongoStoreClientConfig
-from syft.store.mongo_document_store import MongoStoreConfig
-from syft.store.sqlite_document_store import SQLiteStoreClientConfig
-from syft.store.sqlite_document_store import SQLiteStoreConfig
-from syft.types.uid import UID
-
-# grid absolute
-from grid.core.config import settings
-
-
-def queue_config() -> ZMQQueueConfig:
-    queue_config = ZMQQueueConfig(
-        client_config=ZMQClientConfig(
-            create_producer=settings.CREATE_PRODUCER,
-            queue_port=settings.QUEUE_PORT,
-            n_consumers=settings.N_CONSUMERS,
-            consumer_service=settings.CONSUMER_SERVICE_NAME,
-        )
-    )
-    return queue_config
-
-
-def mongo_store_config() -> MongoStoreConfig:
-    mongo_client_config = MongoStoreClientConfig(
-        hostname=settings.MONGO_HOST,
-        port=settings.MONGO_PORT,
-        username=settings.MONGO_USERNAME,
-        password=settings.MONGO_PASSWORD,
-    )
-
-    return MongoStoreConfig(client_config=mongo_client_config)
-
-
-def sql_store_config() -> SQLiteStoreConfig:
-    client_config = SQLiteStoreClientConfig(
-        filename=f"{UID.from_string(get_node_uid_env())}.sqlite",
-        path=settings.SQLITE_PATH,
-    )
-    return SQLiteStoreConfig(client_config=client_config)
-
-
-def seaweedfs_config() -> SeaweedFSConfig:
-    seaweed_client_config = SeaweedFSClientConfig(
-        host=settings.S3_ENDPOINT,
-        port=settings.S3_PORT,
-        access_key=settings.S3_ROOT_USER,
-        secret_key=settings.S3_ROOT_PWD,
-        region=settings.S3_REGION,
-        default_bucket_name=get_node_uid_env(),
-        mount_port=settings.SEAWEED_MOUNT_PORT,
-    )
-
-    return SeaweedFSConfig(client_config=seaweed_client_config)
-
-
-node_type = NodeType(get_node_type())
-node_name = get_node_name()
-
-node_side_type = get_node_side_type()
-enable_warnings = get_enable_warnings()
-
-worker_classes = {
-    NodeType.DOMAIN: Domain,
-    NodeType.GATEWAY: Gateway,
-    NodeType.ENCLAVE: Enclave,
-}
-
-worker_class = worker_classes[node_type]
-
-single_container_mode = settings.SINGLE_CONTAINER_MODE
-store_config = sql_store_config() if single_container_mode else mongo_store_config()
-blob_storage_config = None if single_container_mode else seaweedfs_config()
-queue_config = queue_config()
-
-worker: Node = worker_class(
-    name=node_name,
-    node_side_type=node_side_type,
-    action_store_config=store_config,
-    document_store_config=store_config,
-    enable_warnings=enable_warnings,
-    blob_storage_config=blob_storage_config,
-    local_db=single_container_mode,
-    queue_config=queue_config,
-    migrate=True,
-    in_memory_workers=settings.INMEMORY_WORKERS,
-    smtp_username=settings.SMTP_USERNAME,
-    smtp_password=settings.SMTP_PASSWORD,
-    email_sender=settings.EMAIL_SENDER,
-    smtp_port=settings.SMTP_PORT,
-    smtp_host=settings.SMTP_HOST,
-)
diff --git a/packages/grid/backend/grid/core/server.py b/packages/grid/backend/grid/core/server.py
new file mode 100644
index 00000000000..fa674d94dcb
--- /dev/null
+++ b/packages/grid/backend/grid/core/server.py
@@ -0,0 +1,115 @@
+# stdlib
+from pathlib import Path
+
+# syft absolute
+from syft.abstract_server import ServerType
+from syft.server.datasite import Datasite
+from syft.server.datasite import Server
+from syft.server.enclave import Enclave
+from syft.server.env import get_default_bucket_name
+from syft.server.env import get_enable_warnings
+from syft.server.env import get_server_name
+from syft.server.env import get_server_side_type
+from syft.server.env import get_server_type
+from syft.server.env import get_server_uid_env
+from syft.server.gateway import Gateway
+from syft.service.queue.zmq_client import ZMQClientConfig
+from syft.service.queue.zmq_client import ZMQQueueConfig
+from syft.store.blob_storage.seaweedfs import SeaweedFSClientConfig
+from syft.store.blob_storage.seaweedfs import SeaweedFSConfig
+from syft.store.db.postgres import PostgresDBConfig
+from syft.store.db.sqlite import SQLiteDBConfig
+from syft.types.uid import UID
+
+# server absolute
+from grid.core.config import settings
+
+
+def queue_config() -> ZMQQueueConfig:
+    queue_config = ZMQQueueConfig(
+        client_config=ZMQClientConfig(
+            create_producer=settings.CREATE_PRODUCER,
+            queue_port=settings.QUEUE_PORT,
+            n_consumers=settings.N_CONSUMERS,
+            consumer_service=settings.CONSUMER_SERVICE_NAME,
+        )
+    )
+    return queue_config
+
+
+def sql_store_config() -> SQLiteDBConfig:
+    # Check if the directory exists, and create it if it doesn't
+    sqlite_path = Path(settings.SQLITE_PATH)
+    if not sqlite_path.exists():
+        sqlite_path.mkdir(parents=True, exist_ok=True)
+
+    return SQLiteDBConfig(
+        filename=f"{UID.from_string(get_server_uid_env())}.sqlite",
+        path=settings.SQLITE_PATH,
+    )
+
+
+def postgresql_store_config() -> PostgresDBConfig:
+    return PostgresDBConfig(
+        host=settings.POSTGRESQL_HOST,
+        port=settings.POSTGRESQL_PORT,
+        user=settings.POSTGRESQL_USERNAME,
+        password=settings.POSTGRESQL_PASSWORD,
+        database=settings.POSTGRESQL_DBNAME,
+    )
+
+
+def seaweedfs_config() -> SeaweedFSConfig:
+    seaweed_client_config = SeaweedFSClientConfig(
+        host=settings.S3_ENDPOINT,
+        port=settings.S3_PORT,
+        access_key=settings.S3_ROOT_USER,
+        secret_key=settings.S3_ROOT_PWD,
+        region=settings.S3_REGION,
+        default_bucket_name=get_default_bucket_name(),
+        mount_port=settings.SEAWEED_MOUNT_PORT,
+    )
+
+    return SeaweedFSConfig(
+        client_config=seaweed_client_config,
+        min_blob_size=settings.MIN_SIZE_BLOB_STORAGE_MB,
+    )
+
+
+server_type = ServerType(get_server_type())
+server_name = get_server_name()
+
+server_side_type = get_server_side_type()
+enable_warnings = get_enable_warnings()
+
+worker_classes = {
+    ServerType.DATASITE: Datasite,
+    ServerType.GATEWAY: Gateway,
+    ServerType.ENCLAVE: Enclave,
+}
+
+worker_class = worker_classes[server_type]
+
+single_container_mode = settings.SINGLE_CONTAINER_MODE
+db_config = sql_store_config() if single_container_mode else postgresql_store_config()
+
+blob_storage_config = None if single_container_mode else seaweedfs_config()
+queue_config = queue_config()
+
+worker: Server = worker_class(
+    name=server_name,
+    server_side_type=server_side_type,
+    enable_warnings=enable_warnings,
+    blob_storage_config=blob_storage_config,
+    queue_config=queue_config,
+    migrate=False,
+    in_memory_workers=settings.INMEMORY_WORKERS,
+    smtp_username=settings.SMTP_USERNAME,
+    smtp_password=settings.SMTP_PASSWORD,
+    email_sender=settings.EMAIL_SENDER,
+    smtp_port=settings.SMTP_PORT,
+    smtp_host=settings.SMTP_HOST,
+    association_request_auto_approval=settings.ASSOCIATION_REQUEST_AUTO_APPROVAL,
+    background_tasks=True,
+    db_config=db_config,
+)
diff --git a/packages/grid/backend/grid/images/worker_cpu.dockerfile b/packages/grid/backend/grid/images/worker_cpu.dockerfile
new file mode 100644
index 00000000000..2b35beba58a
--- /dev/null
+++ b/packages/grid/backend/grid/images/worker_cpu.dockerfile
@@ -0,0 +1,28 @@
+# Syft Worker
+# Build as-is to create a base worker image
+# Build with args to create a custom worker image
+
+# NOTE: This dockerfile will be built inside a syft-backend container in PROD
+# Hence COPY will not work the same way in DEV vs. PROD
+
+ARG SYFT_VERSION_TAG="0.9.6-beta.6"
+FROM openmined/syft-backend:${SYFT_VERSION_TAG}
+
+# should match base image python version
+ARG PYTHON_VERSION="3.12"
+ARG SYSTEM_PACKAGES=""
+ARG PIP_PACKAGES="pip --dry-run"
+ARG CUSTOM_CMD='echo "No custom commands passed"'
+
+# Worker specific environment variables go here
+ENV SYFT_WORKER="true" \
+    SYFT_VERSION_TAG=${SYFT_VERSION_TAG} \
+    UV_HTTP_TIMEOUT=600
+
+# dont run `apk upgrade` here, as it runs upgrades on the base image
+# which may break syft or carry over breaking changes by wolfi-os
+RUN apk update && \
+    apk add --no-cache ${SYSTEM_PACKAGES} && \
+    # if uv is present then run uv pip install else simple pip install
+    if [ -x "$(command -v uv)" ]; then uv pip install --no-cache ${PIP_PACKAGES}; else pip install --user ${PIP_PACKAGES}; fi && \
+    bash -c "$CUSTOM_CMD"
diff --git a/packages/grid/backend/grid/logger/config.py b/packages/grid/backend/grid/logger/config.py
deleted file mode 100644
index 000a9c9c713..00000000000
--- a/packages/grid/backend/grid/logger/config.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""This file defines the configuration for `loguru` which is used as the python logging client.
-For more information refer to `loguru` documentation: https://loguru.readthedocs.io/en/stable/overview.html
-"""
-
-# stdlib
-from datetime import time
-from datetime import timedelta
-from enum import Enum
-from functools import lru_cache
-
-# third party
-from pydantic_settings import BaseSettings
-
-
-# LOGURU_LEVEL type for version>3.8
-class LogLevel(Enum):
-    """Types of logging levels."""
-
-    TRACE = "TRACE"
-    DEBUG = "DEBUG"
-    INFO = "INFO"
-    SUCCESS = "SUCCESS"
-    WARNING = "WARNING"
-    ERROR = "ERROR"
-    CRITICAL = "CRITICAL"
-
-
-class LogConfig(BaseSettings):
-    """Configuration for the logging client."""
-
-    # Logging format
-    LOGURU_FORMAT: str = (
-        "{time:YYYY-MM-DD HH:mm:ss} | "
-        "{level: <8} | "
-        "{name}:{function}:{line}: "
-        "{message}"
-    )
-
-    LOGURU_LEVEL: str = LogLevel.INFO.value
-    LOGURU_SINK: str | None = "/var/log/pygrid/grid.log"
-    LOGURU_COMPRESSION: str | None = None
-    LOGURU_ROTATION: str | int | time | timedelta | None = None
-    LOGURU_RETENTION: str | int | timedelta | None = None
-    LOGURU_COLORIZE: bool | None = True
-    LOGURU_SERIALIZE: bool | None = False
-    LOGURU_BACKTRACE: bool | None = True
-    LOGURU_DIAGNOSE: bool | None = False
-    LOGURU_ENQUEUE: bool | None = True
-    LOGURU_AUTOINIT: bool | None = False
-
-
-@lru_cache
-def get_log_config() -> LogConfig:
-    """Returns the configuration for the logging client.
-
-    Returns:
-        LogConfig: configuration for the logging client.
-    """
-    return LogConfig()
diff --git a/packages/grid/backend/grid/logger/handler.py b/packages/grid/backend/grid/logger/handler.py
deleted file mode 100644
index 7f198bbcece..00000000000
--- a/packages/grid/backend/grid/logger/handler.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# future
-from __future__ import annotations
-
-# stdlib
-from functools import lru_cache
-import logging
-from pprint import pformat
-import sys
-
-# third party
-import loguru
-from loguru import logger
-
-# relative
-from .config import get_log_config
-
-
-class LogHandler:
-    def __init__(self) -> None:
-        self.config = get_log_config()
-
-    def format_record(self, record: loguru.Record) -> str:
-        """
-        Custom loguru log message format for handling JSON (in record['extra'])
-        """
-        format_string: str = self.config.LOGURU_FORMAT
-
-        if record["extra"] is not None:
-            for key in record["extra"].keys():
-                record["extra"][key] = pformat(
-                    record["extra"][key], indent=2, compact=False, width=88
-                )
-                format_string += "\n{extra[" + key + "]}"
-
-        format_string += "{exception}\n"
-
-        return format_string
-
-    def init_logger(self) -> None:
-        """
-        Redirects all registered std logging handlers to a loguru sink.
-        Call init_logger() on fastapi startup.
-        """
-        intercept_handler = InterceptHandler()
-
-        # Generalizes log level for all root loggers, including third party
-        logging.root.setLevel(self.config.LOGURU_LEVEL)
-        logging.root.handlers = [intercept_handler]
-
-        for log in logging.root.manager.loggerDict.keys():
-            log_instance = logging.getLogger(log)
-            log_instance.handlers = []
-            log_instance.propagate = True
-
-        logger.configure(
-            handlers=[
-                {
-                    "sink": sys.stdout,
-                    "level": self.config.LOGURU_LEVEL,
-                    "serialize": self.config.LOGURU_SERIALIZE,
-                    "format": self.format_record,
-                }
-            ],
-        )
-
-        try:
-            if (
-                self.config.LOGURU_SINK is not ("sys.stdout" or "sys.stderr")
-                and self.config.LOGURU_SINK is not None
-            ):
-                logger.add(
-                    self.config.LOGURU_SINK,
-                    retention=self.config.LOGURU_RETENTION,
-                    rotation=self.config.LOGURU_ROTATION,
-                    compression=self.config.LOGURU_COMPRESSION,
-                )
-                logger.debug(f"Logging to {self.config.LOGURU_SINK}")
-
-        except Exception as err:
-            logger.debug(
-                f"Failed creating a new sink. Check your log config. error: {err}"
-            )
-
-
-class InterceptHandler(logging.Handler):
-    """
-    Check https://loguru.readthedocs.io/en/stable/overview.html#entirely-compatible-with-standard-logging
-    """
-
-    def emit(self, record: logging.LogRecord) -> None:
-        try:
-            level = logger.level(record.levelname).name
-        except ValueError:
-            level = record.levelno
-
-        frame, depth = logging.currentframe(), 2
-        while frame.f_code.co_filename == logging.__file__:
-            frame = frame.f_back  # type: ignore
-            depth += 1
-
-        logger.opt(depth=depth, exception=record.exc_info).log(
-            level, record.getMessage()
-        )
-
-
-@lru_cache
-def get_log_handler() -> LogHandler:
-    return LogHandler()
diff --git a/packages/grid/backend/grid/logging.yaml b/packages/grid/backend/grid/logging.yaml
new file mode 100644
index 00000000000..9d5d3954f55
--- /dev/null
+++ b/packages/grid/backend/grid/logging.yaml
@@ -0,0 +1,55 @@
+version: 1
+disable_existing_loggers: True
+
+formatters:
+  default:
+    format: "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
+  uvicorn.default:
+    "()": uvicorn.logging.DefaultFormatter
+    format: "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
+  uvicorn.access:
+    "()": "uvicorn.logging.AccessFormatter"
+    format: "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
+
+handlers:
+  default:
+    formatter: default
+    class: logging.StreamHandler
+    stream: ext://sys.stdout
+  uvicorn.default:
+    formatter: uvicorn.default
+    class: logging.StreamHandler
+    stream: ext://sys.stdout
+  uvicorn.access:
+    formatter: uvicorn.access
+    class: logging.StreamHandler
+    stream: ext://sys.stdout
+
+loggers:
+  # uvicorn loggers
+  uvicorn.error:
+    level: INFO
+    handlers:
+      - uvicorn.default
+    propagate: false
+  uvicorn.access:
+    level: INFO
+    handlers:
+      - uvicorn.access
+    propagate: false
+  # syft & grid loggers
+  syft:
+    level: INFO
+    handlers:
+      - default
+    propagate: false
+  grid:
+    level: INFO
+    handlers:
+      - default
+    propagate: false
+  # root logger
+  # do not set level, else pip packages will be affected
+  "":
+    handlers:
+      - default
diff --git a/packages/grid/backend/grid/main.py b/packages/grid/backend/grid/main.py
index 2974ea29b61..659e27bd9ed 100644
--- a/packages/grid/backend/grid/main.py
+++ b/packages/grid/backend/grid/main.py
@@ -1,53 +1,100 @@
 # stdlib
+from contextlib import asynccontextmanager
+import logging
+from typing import Any
 
 # third party
 from fastapi import FastAPI
 from fastapi.responses import JSONResponse
+from starlette.middleware import Middleware
 from starlette.middleware.cors import CORSMiddleware
 
 # syft absolute
-from syft.protocol.data_protocol import stage_protocol_changes
+from syft.util.telemetry import instrument_fastapi
 
-# grid absolute
+# server absolute
 from grid.api.router import api_router
 from grid.core.config import settings
-from grid.core.node import worker
-from grid.logger.handler import get_log_handler
+from grid.core.server import worker
 
-app = FastAPI(
-    title=settings.PROJECT_NAME,
-    openapi_url=f"{settings.API_V2_STR}/openapi.json",
-)
+# logger => grid.main
+logger = logging.getLogger(__name__)
 
-app.add_event_handler("startup", get_log_handler().init_logger)
 
-# Set all CORS enabled origins
-if settings.BACKEND_CORS_ORIGINS:
-    app.add_middleware(
-        CORSMiddleware,
-        allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
-        allow_credentials=True,
-        allow_methods=["*"],
-        allow_headers=["*"],
-    )
+class FastAPILogFilter(logging.Filter):
+    HEALTHCHECK_ENDPOINT = f"{settings.API_V2_STR}/?probe="
+
+    def filter(self, record: logging.LogRecord) -> bool:
+        return record.getMessage().find(self.HEALTHCHECK_ENDPOINT) == -1
 
-app.include_router(api_router, prefix=settings.API_V2_STR)
-print("Included routes, app should now be reachable")
 
+def on_app_startup(app: FastAPI) -> None:
+    if settings.DEV_MODE:
+        # syft absolute
+        from syft.protocol.data_protocol import stage_protocol_changes
 
-if settings.DEV_MODE:
-    print("Staging protocol changes...")
-    status = stage_protocol_changes()
-    print(status)
+        logger.info("Staging protocol changes...")
+        status = stage_protocol_changes()
+        logger.info(f"Staging protocol result: {status}")
 
 
-@app.on_event("shutdown")
-def shutdown() -> None:
+def on_app_shutdown(app: FastAPI) -> None:
     worker.stop()
-    print("Worker Stop !!!")
+    logger.info("Worker Stopped")
+
+
+def get_middlewares() -> FastAPI:
+    middlewares = []
+
+    # Set all CORS enabled origins
+    if settings.BACKEND_CORS_ORIGINS:
+        middlewares.append(
+            Middleware(
+                CORSMiddleware,
+                allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
+                allow_credentials=True,
+                allow_methods=["*"],
+                allow_headers=["*"],
+            )
+        )
+
+    return middlewares
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI) -> Any:
+    try:
+        on_app_startup(app)
+        yield
+    finally:
+        on_app_shutdown(app)
+
+
+def create_app() -> FastAPI:
+    app = FastAPI(
+        title=settings.PROJECT_NAME,
+        openapi_url=f"{settings.API_V2_STR}/openapi.json",
+        lifespan=lifespan,
+        middleware=get_middlewares(),
+        docs_url=None,
+        redoc_url=None,
+    )
+
+    # instrument app
+    instrument_fastapi(app)
+
+    # patch logger to ignore healthcheck logs
+    logging.getLogger("uvicorn.access").addFilter(FastAPILogFilter())
+
+    # add Syft API routes
+    app.include_router(api_router, prefix=settings.API_V2_STR)
+
+    return app
+
+
+app = create_app()
 
 
-# needed for Google Kubernetes Engine LoadBalancer Healthcheck
 @app.get(
     "/",
     name="healthcheck",
@@ -55,10 +102,4 @@ def shutdown() -> None:
     response_class=JSONResponse,
 )
 def healthcheck() -> dict[str, str]:
-    """
-    Currently, all service backends must satisfy either of the following requirements to
-    pass the HTTP health checks sent to it from the GCE loadbalancer: 1. Respond with a
-    200 on '/'. The content does not matter. 2. Expose an arbitrary url as a readiness
-    probe on the pods backing the Service.
-    """
     return {"status": "ok"}
diff --git a/packages/grid/backend/grid/start.sh b/packages/grid/backend/grid/start.sh
index 2880800eee4..2831efa5b6f 100755
--- a/packages/grid/backend/grid/start.sh
+++ b/packages/grid/backend/grid/start.sh
@@ -1,45 +1,61 @@
 #! /usr/bin/env bash
 set -e
 
-echo "Running start.sh with RELEASE=${RELEASE} and $(id)"
-export GEVENT_MONKEYPATCH="False"
+echo "Running Syft with RELEASE=${RELEASE}"
 
 APP_MODULE=grid.main:app
 LOG_LEVEL=${LOG_LEVEL:-info}
 HOST=${HOST:-0.0.0.0}
 PORT=${PORT:-80}
-NODE_TYPE=${NODE_TYPE:-domain}
 APPDIR=${APPDIR:-$HOME/app}
-
 RELOAD=""
-DEBUG_CMD=""
+ROOT_PROC=""
 
-# For debugging permissions
-ls -lisa $HOME/data
-ls -lisa $APPDIR/syft/
-ls -lisa $APPDIR/grid/
+export CREDENTIALS_PATH=${CREDENTIALS_PATH:-$HOME/data/creds/credentials.json}
+export SERVER_PRIVATE_KEY=$(python $APPDIR/grid/bootstrap.py --private_key)
+export SERVER_UID=$(python $APPDIR/grid/bootstrap.py --uid)
 
 if [[ ${DEV_MODE} == "True" ]];
 then
-    echo "DEV_MODE Enabled"
+    echo "Hot-reload Enabled"
     RELOAD="--reload"
-    pip install --user -e "$APPDIR/syft[telemetry,data_science]"
 fi
 
 # only set by kubernetes to avoid conflict with docker tests
 if [[ ${DEBUGGER_ENABLED} == "True" ]];
 then
-    pip install --user debugpy
-    DEBUG_CMD="python -m debugpy --listen 0.0.0.0:5678 -m"
+    echo "Debugger Enabled"
+    uv pip install debugpy
+    ROOT_PROC="python -m debugpy --listen 0.0.0.0:5678 -m"
 fi
 
-set +e
-export NODE_PRIVATE_KEY=$(python $APPDIR/grid/bootstrap.py --private_key)
-export NODE_UID=$(python $APPDIR/grid/bootstrap.py --uid)
-export NODE_TYPE=$NODE_TYPE
-set -e
+if [[ ${TRACING} == "true" ]];
+then
+    # TODO: Polish these values up
+    DEPLOYMENT_ENV="$SERVER_TYPE-$SERVER_SIDE_TYPE"
+    RESOURCE_ATTRS=(
+        "deployment.environment=$DEPLOYMENT_ENV"
+        "service.namespace=$DEPLOYMENT_ENV"
+        "service.instance.id=$SERVER_UID"
+        "k8s.pod.name=${K8S_POD_NAME:-"none"}"
+        "k8s.namespace.name=${K8S_NAMESPACE:"none"}"
+        "syft.server.uid=$SERVER_UID"
+        "syft.server.type=$SERVER_TYPE"
+        "syft.server.side.type=$SERVER_SIDE_TYPE"
+    )
+
+    # environ is always prefixed with the server type
+    export OTEL_SERVICE_NAME="${DEPLOYMENT_ENV}-${OTEL_SERVICE_NAME:-"backend"}"
+    export OTEL_RESOURCE_ATTRIBUTES=$(IFS=, ; echo "${RESOURCE_ATTRS[*]}")
+
+    echo "OpenTelemetry Enabled"
+    env | grep OTEL_
+else
+    echo "OpenTelemetry Disabled"
+fi
 
-echo "NODE_UID=$NODE_UID"
-echo "NODE_TYPE=$NODE_TYPE"
+echo "SERVER_UID=$SERVER_UID"
+echo "SERVER_TYPE=$SERVER_TYPE"
+echo "SERVER_SIDE_TYPE=$SERVER_SIDE_TYPE"
 
-exec $DEBUG_CMD uvicorn $RELOAD --host $HOST --port $PORT --log-level $LOG_LEVEL "$APP_MODULE"
+exec $ROOT_PROC uvicorn $RELOAD --host $HOST --port $PORT --log-config=$APPDIR/grid/logging.yaml "$APP_MODULE"
diff --git a/packages/grid/backend/install_oblivious.sh b/packages/grid/backend/install_oblivious.sh
deleted file mode 100755
index 486b812ac9e..00000000000
--- a/packages/grid/backend/install_oblivious.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#! /usr/bin/env bash
-
-echo "Running install_oblivious.sh with RELEASE=${RELEASE}"
-
-if [[ ("${OBLV_ENABLED}" == "true")  &&  ("${SERVICE_NAME}" == "backend"  ||  "${SERVICE_NAME}" == "celeryworker" ) ]]; then
-    echo "Allowed to install Oblv CLI"
-    # Oblivious Proxy Client Installation
-    mkdir -p oblv-ccli-0.4.0-x86_64-unknown-linux-musl
-    tar -xf /app/wheels/oblv-ccli-0.4.0-x86_64-unknown-linux-musl.tar.gz -C oblv-ccli-0.4.0-x86_64-unknown-linux-musl
-    chmod +x $(pwd)/oblv-ccli-0.4.0-x86_64-unknown-linux-musl/oblv
-    ln -sf $(pwd)/oblv-ccli-0.4.0-x86_64-unknown-linux-musl/oblv /usr/local/bin/oblv  #-f is for force
-    echo "Installed Oblivious CLI: $(/usr/local/bin/oblv --version)"
-else
-    echo "Oblivious CLI not installed OBLV_ENABLED:${OBLV_ENABLED} , SERVICE_NAME:${SERVICE_NAME} "
-fi
diff --git a/packages/grid/backend/wheels/oblv-ccli-0.4.0-x86_64-unknown-linux-musl.tar.gz b/packages/grid/backend/wheels/oblv-ccli-0.4.0-x86_64-unknown-linux-musl.tar.gz
deleted file mode 100644
index 622ee32898c..00000000000
Binary files a/packages/grid/backend/wheels/oblv-ccli-0.4.0-x86_64-unknown-linux-musl.tar.gz and /dev/null differ
diff --git a/packages/grid/backend/worker_cpu.dockerfile b/packages/grid/backend/worker_cpu.dockerfile
deleted file mode 100644
index 31383a7a016..00000000000
--- a/packages/grid/backend/worker_cpu.dockerfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# Syft Worker
-# Build as-is to create a base worker image
-# Build with args to create a custom worker image
-
-# NOTE: This dockerfile will be built inside a grid-backend container in PROD
-# Hence COPY will not work the same way in DEV vs. PROD
-
-# FIXME: Due to dependency on grid-backend base, python can only be changed from 3.11 to 3.11-dev
-# Later we'd want to uninstall old python, and then install a new python runtime...
-# ... but pre-built syft deps may break!
-
-ARG SYFT_VERSION_TAG="0.8.6-beta.1"
-FROM openmined/grid-backend:${SYFT_VERSION_TAG}
-
-ARG PYTHON_VERSION="3.12"
-ARG SYSTEM_PACKAGES=""
-ARG PIP_PACKAGES="pip --dry-run"
-ARG CUSTOM_CMD='echo "No custom commands passed"'
-
-# Worker specific environment variables go here
-ENV SYFT_WORKER="true"
-ENV SYFT_VERSION_TAG=${SYFT_VERSION_TAG}
-
-# Commenting this until we support built using python docker sdk or find any other alternative.
-# RUN --mount=type=cache,target=/var/cache/apk,sharing=locked \
-#     --mount=type=cache,target=$HOME/.cache/pip,sharing=locked \
-RUN apk update && \
-    apk add ${SYSTEM_PACKAGES} && \
-    pip install --user ${PIP_PACKAGES} && \
-    bash -c "$CUSTOM_CMD"
diff --git a/packages/grid/default.env b/packages/grid/default.env
index f5b42ff6323..49697538cd8 100644
--- a/packages/grid/default.env
+++ b/packages/grid/default.env
@@ -1,8 +1,8 @@
 #!/bin/bash
-DOMAIN=localhost
-NODE_NAME=default_node_name
-NODE_TYPE=domain
-FRONTEND_TARGET=grid-ui-development
+DATASITE=localhost
+SERVER_NAME=default_server_name
+SERVER_TYPE=datasite
+FRONTEND_TARGET=syft-ui-development
 PORT=80
 HTTP_PORT=80
 HTTPS_PORT=443
@@ -15,28 +15,26 @@ IGNORE_TLS_ERRORS=False
 TRAEFIK_TLS_CONF=./traefik/dynamic-configurations
 TRAEFIK_TLS_CERTS=./traefik/certs
 TRAEFIK_PUBLIC_NETWORK=traefik-public
-TRAEFIK_TAG=grid.openmined.org
+TRAEFIK_TAG=syft.openmined.org
 TRAEFIK_PUBLIC_TAG=traefik-public
 
-STACK_NAME=grid-openmined-org
-DOCKER_IMAGE_BACKEND=openmined/grid-backend
-DOCKER_IMAGE_FRONTEND=openmined/grid-frontend
-DOCKER_IMAGE_SVELTE=openmined/grid-svelte
+STACK_NAME=syft-openmined-org
+DOCKER_IMAGE_BACKEND=openmined/syft-backend
+DOCKER_IMAGE_FRONTEND=openmined/syft-frontend
+DOCKER_IMAGE_RATHOLE=openmined/syft-rathole
 DOCKER_IMAGE_TRAEFIK=traefik
-TRAEFIK_VERSION=v2.10
+TRAEFIK_VERSION=v2.11.0
 REDIS_VERSION=6.2
 RABBITMQ_VERSION=3
-SEAWEEDFS_VERSION=3.62
-DOCKER_IMAGE_SEAWEEDFS=openmined/grid-seaweedfs
+DOCKER_IMAGE_SEAWEEDFS=openmined/syft-seaweedfs
 VERSION=latest
 VERSION_HASH=unknown
 STACK_API_KEY=""
 
 # Backend
-BACKEND_CORS_ORIGINS='["http://localhost","http://localhost:4200","http://localhost:3000","http://localhost:8080","https://localhost","https://localhost:4200","https://localhost:3000","https://localhost:8080","http://dev.grid.openmined.org","https://stag.grid.openmined.org","https://grid.openmined.org"]'
-BACKEND_STORAGE_PATH=credentials-data
+BACKEND_CORS_ORIGINS='["http://localhost","http://localhost:4200","http://localhost:3000","http://localhost:8080","https://localhost","https://localhost:4200","https://localhost:3000","https://localhost:8080","http://dev.syft.openmined.org","https://stag.syft.openmined.org","https://syft.openmined.org"]'
 SEAWEED_MOUNT_PORT=4001
-PROJECT_NAME=grid
+PROJECT_NAME=syft
 SECRET_KEY=changethis
 DEFAULT_ROOT_EMAIL=info@openmined.org
 DEFAULT_ROOT_PASSWORD=changethis
@@ -46,9 +44,9 @@ SMTP_HOST=
 SMTP_USERNAME=
 SMTP_PASSWORD=
 EMAIL_SENDER=
-SERVER_HOST="https://${DOMAIN}"
+SERVER_HOST="https://${DATASITE}"
 NETWORK_CHECK_INTERVAL=60
-DOMAIN_CHECK_INTERVAL=60
+DATASITE_CHECK_INTERVAL=60
 ASSOCIATION_TIMEOUT=10
 USERS_OPEN_REGISTRATION=False
 DEV_MODE=False
@@ -56,6 +54,8 @@ QUEUE_PORT=5556
 CREATE_PRODUCER=False
 N_CONSUMERS=1
 INMEMORY_WORKERS=True
+ASSOCIATION_REQUEST_AUTO_APPROVAL=False
+MIN_SIZE_BLOB_STORAGE_MB=16
 
 # New Service Flag
 USE_NEW_SERVICE=False
@@ -65,26 +65,18 @@ BACKEND_API_BASE_URL="/api/v2"
 
 # SeaweedFS
 S3_ENDPOINT="seaweedfs"
-S3_PORT=8333
 S3_ROOT_USER="admin"
 S3_ROOT_PWD="admin" # needs randomizing
 S3_REGION="us-east-1"
  #not-using
 S3_PRESIGNED_TIMEOUT_SECS=1800
-S3_VOLUME_SIZE_MB=1024
 
+# Kaniko
+KANIKO_VERSION="v1.23.2"
 
 # Jax
 JAX_ENABLE_X64=True
 
-# Mongo
-MONGO_IMAGE=mongo
-MONGO_VERSION="7.0.4"
-MONGO_HOST=mongo
-MONGO_PORT=27017
-MONGO_USERNAME=root
-MONGO_PASSWORD=example
-
 # Redis
 REDIS_PORT=6379
 REDIS_STORE_DB_ID=0
@@ -95,26 +87,28 @@ REDIS_HOST=redis
 CONTAINER_HOST=docker
 RELATIVE_PATH=""
 
-# Jaeger
-TRACE=False
-JAEGER_HOST=localhost
-JAEGER_PORT=14268
-
 # Syft
 SYFT_TUTORIAL_MODE=False
 ENABLE_WARNINGS=True
-NODE_SIDE_TYPE=high
+SERVER_SIDE_TYPE=high
 
 # Worker
 USE_BLOB_STORAGE=False
 
-#Oblivious
-OBLV_ENABLED=false
-OBLV_KEY_PATH="~/.oblv"
-OBLV_LOCALHOST_PORT=3030
-
 # Registation
 ENABLE_SIGNUP=False
 
-# Veilid
-DOCKER_IMAGE_VEILID=openmined/grid-veilid
+# Enclave Attestation
+DOCKER_IMAGE_ENCLAVE_ATTESTATION=openmined/syft-enclave-attestation
+
+# Rathole Config
+RATHOLE_PORT=2333
+
+# PostgresSQL Config
+# POSTGRESQL_IMAGE=postgres
+# export POSTGRESQL_VERSION="15"
+POSTGRESQL_DBNAME=syftdb_postgres
+POSTGRESQL_HOST=postgres
+POSTGRESQL_PORT=5432
+POSTGRESQL_USERNAME=syft_postgres
+POSTGRESQL_PASSWORD=example
diff --git a/packages/grid/devspace.yaml b/packages/grid/devspace.yaml
index db991251c05..54a390b56e3 100644
--- a/packages/grid/devspace.yaml
+++ b/packages/grid/devspace.yaml
@@ -22,46 +22,47 @@ pipelines:
       create_deployments --all
 
 vars:
-  DEVSPACE_ENV_FILE: "default.env"
+  DOCKER_IMAGE_BACKEND: openmined/syft-backend
+  DOCKER_IMAGE_FRONTEND: openmined/syft-frontend
+  DOCKER_IMAGE_SEAWEEDFS: openmined/syft-seaweedfs
+  DOCKER_IMAGE_RATHOLE: openmined/syft-rathole
+  DOCKER_IMAGE_ENCLAVE_ATTESTATION: openmined/syft-enclave-attestation
   CONTAINER_REGISTRY: "docker.io"
-  NODE_NAME: "mynode"
-  VERSION: "0.8.6-beta.1"
+  VERSION: "0.9.6-beta.6"
+  PLATFORM: $(uname -m | grep -q 'arm64' && echo "arm64" || echo "amd64")
 
 # This is a list of `images` that DevSpace can build for this project
 # We recommend to skip image building during development (devspace dev) as much as possible
 images:
   backend:
     image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_BACKEND}"
-    buildKit: {}
+    buildKit:
+      args: ["--platform", "linux/${PLATFORM}"]
     dockerfile: ./backend/backend.dockerfile
+    target: "backend"
     context: ../
     tags:
+      - dev-latest
       - dev-${DEVSPACE_TIMESTAMP}
   frontend:
     image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_FRONTEND}"
     buildKit:
-      args: ["--target", "grid-ui-production"]
+      args: ["--platform", "linux/${PLATFORM}"]
     dockerfile: ./frontend/frontend.dockerfile
-    target: "grid-ui-production"
+    target: "syft-ui-production"
     context: ./frontend
     tags:
       - dev-${DEVSPACE_TIMESTAMP}
+      - dev-latest
   seaweedfs:
     image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_SEAWEEDFS}"
-    buildKit: {}
-    buildArgs:
-      SEAWEEDFS_VERSION: ${SEAWEEDFS_VERSION}
+    buildKit:
+      args: ["--platform", "linux/${PLATFORM}"]
     dockerfile: ./seaweedfs/seaweedfs.dockerfile
     context: ./seaweedfs
     tags:
       - dev-${DEVSPACE_TIMESTAMP}
-  veilid:
-    image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_VEILID}"
-    buildKit: {}
-    dockerfile: ./veilid/veilid.dockerfile
-    context: ./veilid
-    tags:
-      - dev-${DEVSPACE_TIMESTAMP}
+      - dev-latest
 
 # This is a list of `deployments` that DevSpace can create for this project
 deployments:
@@ -70,23 +71,24 @@ deployments:
       releaseName: syft-dev
       chart:
         name: ./helm/syft
+      # values that need to be templated go here
       values:
         global:
           registry: ${CONTAINER_REGISTRY}
           version: dev-${DEVSPACE_TIMESTAMP}
-        node:
-          name: ${NODE_NAME}
-      # anything that does not need devspace $env vars should go in values.dev.yaml
+          workerVersion: dev-latest
+      # anything that does not need templating should go in helm/examples/dev/base.yaml
+      # or profile specific values files
       valuesFiles:
-        - ./helm/values.dev.yaml
+        - ./helm/examples/dev/base.yaml
 
 dev:
-  mongo:
+  postgres:
     labelSelector:
       app.kubernetes.io/name: syft
-      app.kubernetes.io/component: mongo
+      app.kubernetes.io/component: postgres
     ports:
-      - port: "27017"
+      - port: "5432"
   seaweedfs:
     labelSelector:
       app.kubernetes.io/name: syft
@@ -96,41 +98,196 @@ dev:
       - port: "8888" # filer
       - port: "8333" # S3
       - port: "4001" # mount azure
+      - port: "5432" # mount postgres
   backend:
     labelSelector:
       app.kubernetes.io/name: syft
       app.kubernetes.io/component: backend
-    env:
-      - name: RELEASE
-        value: development
-      - name: DEV_MODE
-        value: "True"
-      - name: DEBUGGER_ENABLED
-        value: "True"
     ports:
       - port: "5678" # debugpy
-    sync:
-      - path: ./backend/grid:/root/app/grid
-      - path: ../syft:/root/app/syft
-    ssh: {}
-  veilid:
-    labelSelector:
-      app.kubernetes.io/name: syft
-      app.kubernetes.io/component: veilid
-    env:
-      - name: DEV_MODE
-        value: "True"
-    logs: {}
-    sync:
-      - path: ./veilid/server:/app/server
+    containers:
+      backend-container:
+        env:
+          - name: RELEASE
+            value: development
+          - name: DEV_MODE
+            value: "True"
+          - name: DEBUGGER_ENABLED
+            value: "True"
+        sync:
+          - path: ./backend/grid:/root/app/grid
+          - path: ../syft:/root/app/syft
+        ssh:
+          localPort: 3480
 
 profiles:
+  - name: datasite-low
+    description: "Deploy a low-side datasite"
+    patches:
+      - op: add
+        path: deployments.syft.helm.values.server
+        value:
+          side: low
+
+  - name: tracing
+    description: "Enable Tracing"
+    patches:
+      - op: add
+        path: deployments.syft.helm.values.server
+        value:
+          tracing:
+            enabled: true
+            otlpEndpoint: "http://host.k3d.internal:4317"
+            otelProtocol: "grpc"
+
+  - name: bigquery-scenario-tests
+    description: "Deploy a datasite for bigquery scenario testing"
+    patches:
+      - op: add
+        path: deployments.syft.helm.valuesFiles
+        value: ./helm/examples/dev/bigquery.scenario.yaml
+
+  - name: migrated-datasite
+    description: "Deploy a migrated datasite"
+    patches:
+      - op: add
+        path: deployments.syft.helm.valuesFiles
+        value: ./helm/examples/dev/migration.yaml
+
+  - name: datasite-tunnel
+    description: "Deploy a datasite with tunneling enabled"
+    patches:
+      # enable rathole image
+      - op: add
+        path: images
+        value:
+          rathole:
+            image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_RATHOLE}"
+            buildKit:
+              args: ["--platform", "linux/${PLATFORM}"]
+            dockerfile: ./rathole/rathole.dockerfile
+            context: ./rathole
+            tags:
+              - dev-${DEVSPACE_TIMESTAMP}
+              - dev-latest
+      # use rathole client-specific chart values
+      - op: add
+        path: deployments.syft.helm.valuesFiles
+        value: ./helm/examples/dev/datasite.tunnel.yaml
+
   - name: gateway
+    description: "Deploy a Gateway Server with tunnel enabled"
     patches:
-      - op: replace
-        path: deployments.syft.helm.values.node.type
-        value: "gateway"
+      # enable rathole image
+      - op: add
+        path: images
+        value:
+          rathole:
+            image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_RATHOLE}"
+            buildKit:
+              args: ["--platform", "linux/${PLATFORM}"]
+            dockerfile: ./rathole/rathole.dockerfile
+            context: ./rathole
+            tags:
+              - dev-${DEVSPACE_TIMESTAMP}
+              - dev-latest
+      # enable rathole `devspace dev` config
+      - op: add
+        path: dev
+        value:
+          rathole:
+            labelSelector:
+              app.kubernetes.io/name: syft
+              app.kubernetes.io/component: rathole
+            ports:
+              - port: "2333"
+      # use gateway-specific chart values
+      - op: add
+        path: deployments.syft.helm.valuesFiles
+        value: ./helm/examples/dev/gateway.yaml
+      # remove unused images
       - op: remove
         path: images.seaweedfs
       - op: remove
         path: dev.seaweedfs
+      # Port Re-Mapping
+      - op: replace
+        path: dev.postgres.ports[0].port
+        value: 5433:5432
+      - op: replace
+        path: dev.backend.ports[0].port
+        value: 5679:5678
+      - op: replace
+        path: dev.backend.containers.backend-container.ssh.localPort
+        value: 3481
+      - op: replace
+        path: dev.rtunnel.ports[0].port
+        value: 2334:2333
+
+  - name: gcp
+    description: "Deploy a high-side datasite on GCP"
+    patches:
+      - op: replace
+        path: deployments.syft.helm.valuesFiles
+        value:
+          - ./helm/examples/gcp/gcp.high.yaml
+
+  - name: gcp-low
+    description: "Deploy a low-side datasite on GCP"
+    patches:
+      - op: replace
+        path: deployments.syft.helm.valuesFiles
+        value:
+          - ./helm/examples/gcp/gcp.low.yaml
+
+  - name: azure
+    description: "Deploy a high-side datasite on AKS"
+    patches:
+      - op: replace
+        path: deployments.syft.helm.valuesFiles
+        value:
+          - ./helm/examples/azure/azure.high.yaml
+
+  - name: enclave
+    description: "Deploy an enclave server"
+    patches:
+      # enable image build for enclave-attestation
+      - op: add
+        path: images
+        value:
+          enclave-attestation:
+            image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_ENCLAVE_ATTESTATION}"
+            buildKit:
+              args: ["--platform", "linux/amd64"]
+            dockerfile: ./enclave/attestation/attestation.dockerfile
+            context: ./enclave/attestation
+            tags:
+              - dev-latest
+              - dev-${DEVSPACE_TIMESTAMP}
+      - op: add
+        path: dev.backend.containers
+        value:
+          enclave-attestation:
+            sync:
+              - path: ./enclave/attestation/server:/app/server
+      # use gateway-specific chart values
+      - op: add
+        path: deployments.syft.helm.valuesFiles
+        value: ./helm/examples/dev/enclave.yaml
+      # Port Re-Mapping
+      - op: replace
+        path: dev.postgres.ports[0].port
+        value: 5434:5432
+      - op: replace
+        path: dev.backend.ports[0].port
+        value: 5680:5678
+      - op: replace
+        path: dev.backend.containers.backend-container.ssh.localPort
+        value: 3482
+      - op: replace
+        path: dev.seaweedfs.ports
+        value:
+          - port: "9334:9333" # admin
+          - port: "8889:8888" # filer
+          - port: "8334:8333" # S3
+          - port: "4002:4001" # mount api
diff --git a/packages/grid/docker-compose.build.yml b/packages/grid/docker-compose.build.yml
deleted file mode 100644
index 5e87adfd1f4..00000000000
--- a/packages/grid/docker-compose.build.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-version: "3.8"
-services:
-  frontend:
-    build:
-      context: ${RELATIVE_PATH}./frontend
-      dockerfile: frontend.dockerfile
-      target: "${FRONTEND_TARGET:-grid-ui-development}"
-
-  backend:
-    build:
-      context: ${RELATIVE_PATH}../
-      dockerfile: ./grid/backend/backend.dockerfile
-      target: "backend"
-
-  seaweedfs:
-    build:
-      context: ${RELATIVE_PATH}./seaweedfs
-      dockerfile: seaweedfs.dockerfile
-      args:
-        - SEAWEEDFS_VERSION=${SEAWEEDFS_VERSION}
-
-  worker:
-    build:
-      context: ${RELATIVE_PATH}../
-      dockerfile: ./grid/backend/backend.dockerfile
-      target: "backend"
diff --git a/packages/grid/docker-compose.dev.yml b/packages/grid/docker-compose.dev.yml
deleted file mode 100644
index d2b1f142053..00000000000
--- a/packages/grid/docker-compose.dev.yml
+++ /dev/null
@@ -1,77 +0,0 @@
-version: "3.8"
-services:
-  proxy:
-    ports:
-      - "8080"
-    command:
-      - "--api" # admin panel
-      - "--api.insecure=true" # admin panel no password
-
-  frontend:
-    volumes:
-      - ${RELATIVE_PATH}./frontend/src:/app/src
-      - ${RELATIVE_PATH}./frontend/static:/app/static
-      - ${RELATIVE_PATH}./frontend/svelte.config.js:/app/svelte.config.js
-      - ${RELATIVE_PATH}./frontend/tsconfig.json:/app/tsconfig.json
-      - ${RELATIVE_PATH}./frontend/vite.config.ts:/app/vite.config.ts
-    environment:
-      - FRONTEND_TARGET=grid-ui-development
-
-  # redis:
-  #   ports:
-  #     - "6379"
-
-  # queue:
-  #   image: rabbitmq:3-management
-  #   ports:
-  #     - "15672" # admin web port
-  #     # - "5672" # AMQP port
-
-  mongo:
-    ports:
-      - "27017"
-
-  backend:
-    volumes:
-      - ${RELATIVE_PATH}./backend/grid:/root/app/grid
-      - ${RELATIVE_PATH}../syft:/root/app/syft
-      - ${RELATIVE_PATH}./data/package-cache:/root/.cache
-    environment:
-      - DEV_MODE=True
-    stdin_open: true
-    tty: true
-
-  worker:
-    volumes:
-      - ${RELATIVE_PATH}./backend/grid:/root/app/grid
-      - ${RELATIVE_PATH}../syft:/root/app/syft
-      - ${RELATIVE_PATH}./data/package-cache:/root/.cache
-    environment:
-      - DEV_MODE=True
-      - WATCHFILES_FORCE_POLLING=true
-    stdin_open: true
-    tty: true
-
-  # backend_stream:
-  #   volumes:
-  #     - ${RELATIVE_PATH}./backend/grid:/root/app/grid
-  #     - ${RELATIVE_PATH}../syft:/root/app/syft
-  #     - ${RELATIVE_PATH}./data/package-cache:/root/.cache
-  #   environment:
-  #     - DEV_MODE=True
-
-  # celeryworker:
-  #   volumes:
-  #     - ${RELATIVE_PATH}./backend/grid:/root/app/grid
-  #     - ${RELATIVE_PATH}../syft/:/root/app/syft
-  #     - ${RELATIVE_PATH}./data/package-cache:/root/.cache
-  #   environment:
-  #     - DEV_MODE=True
-
-  seaweedfs:
-    volumes:
-      - ./data/seaweedfs:/data
-    ports:
-      - "9333" # admin web port
-      - "8888" # filer web port
-      - "8333" # S3 API port
diff --git a/packages/grid/docker-compose.pull.yml b/packages/grid/docker-compose.pull.yml
deleted file mode 100644
index db2329b04df..00000000000
--- a/packages/grid/docker-compose.pull.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-version: "3.8"
-services:
-  # redis:
-  #   image: redis:${REDIS_VERSION?Variable not set}
-
-  # queue:
-  #   image: rabbitmq:${RABBITMQ_VERSION?Variable not Set}${RABBITMQ_MANAGEMENT:-}
-
-  seaweedfs:
-    image: "${DOCKER_IMAGE_SEAWEEDFS?Variable not set}:${VERSION-latest}"
-
-  # docker-host:
-  #   image: qoomon/docker-host
-
-  proxy:
-    image: ${DOCKER_IMAGE_TRAEFIK?Variable not set}:${TRAEFIK_VERSION?Variable not set}
-
-  mongo:
-    image: "${MONGO_IMAGE}:${MONGO_VERSION}"
-
-  jaeger:
-    image: jaegertracing/all-in-one:1.37
-
-  # Temporary fix until we refactor pull, build, launch UI step during hagrid launch
-  worker:
-    image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${VERSION-latest}"
diff --git a/packages/grid/docker-compose.test.yml b/packages/grid/docker-compose.test.yml
deleted file mode 100644
index a9e323831bb..00000000000
--- a/packages/grid/docker-compose.test.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-version: "3.8"
-services:
-  proxy:
-    ports:
-      - "8080"
-
-  # redis:
-  #   ports:
-  #     - "6379"
-
-  # queue:
-  #   image: rabbitmq:3-management
-  #   ports:
-  #     - "5672"
-  #     - "15672"
-
-  seaweedfs:
-    ports:
-      - "9333" # admin
-      - "8888" # filer
-      - "8333" # S3
-
-  backend:
-    environment:
-      - TEST_MODE=1
-
-  worker:
-    environment:
-      - TEST_MODE=1
-
-  # backend_stream:
-  #   environment:
-  #     - TEST_MODE=1
-
-  # celeryworker:
-  #   environment:
-  #     - TEST_MODE=1
diff --git a/packages/grid/docker-compose.tls.yml b/packages/grid/docker-compose.tls.yml
deleted file mode 100644
index 6bfa5d7d1d7..00000000000
--- a/packages/grid/docker-compose.tls.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-version: "3.8"
-services:
-  proxy:
-    ports:
-      - "${HTTPS_PORT}:${HTTPS_PORT}"
-    environment:
-      - TRAEFIK_TLS_CONF=${TRAEFIK_TLS_CONF}
-      - TRAEFIK_TLS_CERTS=${TRAEFIK_TLS_CERTS}
-    volumes:
-      - "${TRAEFIK_TLS_CONF}:/etc/traefik/conf/certs.yaml"
-      - "${TRAEFIK_TLS_CERTS}:/etc/traefik/certs"
-      - "./traefik/docker/traefik-tls.template.yml:/etc/traefik/traefik-tls.template.yml"
-      - "./traefik/docker/dynamic-tls.yml:/etc/traefik/conf/dynamic.yml"
-    command: /bin/ash -c "apk add gettext && envsubst < /etc/traefik/traefik-tls.template.yml > /etc/traefik/traefik-tls.yml && traefik --configFile=/etc/traefik/traefik-tls.yml"
diff --git a/packages/grid/docker-compose.yml b/packages/grid/docker-compose.yml
deleted file mode 100644
index 4108d23f634..00000000000
--- a/packages/grid/docker-compose.yml
+++ /dev/null
@@ -1,320 +0,0 @@
-version: "3.8"
-services:
-  # docker-host:
-  #   image: qoomon/docker-host
-  #   cap_add:
-  #     - net_admin
-  #     - net_raw
-
-  proxy:
-    restart: always
-    hostname: ${NODE_NAME?Variable not set}
-    image: ${DOCKER_IMAGE_TRAEFIK?Variable not set}:${TRAEFIK_VERSION?Variable not set}
-    profiles:
-      - proxy
-    networks:
-      - "${TRAEFIK_PUBLIC_NETWORK?Variable not set}"
-      - default
-    volumes:
-      - "./traefik/docker/traefik.yml:/etc/traefik/traefik.yml"
-      - "./traefik/docker/dynamic.yml:/etc/traefik/conf/dynamic.yml"
-    environment:
-      - SERVICE_NAME=proxy
-      - RELEASE=${RELEASE:-production}
-      - HOSTNAME=${NODE_NAME?Variable not set}
-      - HTTP_PORT=${HTTP_PORT}
-      - HTTPS_PORT=${HTTPS_PORT}
-    ports:
-      - "${HTTP_PORT}:81"
-    extra_hosts:
-      - "host.docker.internal:host-gateway"
-    labels:
-      - "orgs.openmined.syft=this is a syft proxy container"
-
-    # depends_on:
-    #   - "docker-host"
-
-  frontend:
-    restart: always
-    image: "${DOCKER_IMAGE_FRONTEND?Variable not set}:${VERSION-latest}"
-    profiles:
-      - frontend
-    depends_on:
-      - proxy
-    environment:
-      - SERVICE_NAME=frontend
-      - RELEASE=${RELEASE:-production}
-      - NODE_TYPE=${NODE_TYPE?Variable not set}
-      - FRONTEND_TARGET=${FRONTEND_TARGET}
-      - VERSION=${VERSION}
-      - VERSION_HASH=${VERSION_HASH}
-      - PORT=80
-      - HTTP_PORT=${HTTP_PORT}
-      - HTTPS_PORT=${HTTPS_PORT}
-      - BACKEND_API_BASE_URL=${BACKEND_API_BASE_URL}
-    extra_hosts:
-      - "host.docker.internal:host-gateway"
-    labels:
-      - "orgs.openmined.syft=this is a syft frontend container"
-
-  # redis:
-  #   restart: always
-  #   image: redis:${REDIS_VERSION?Variable not set}
-  #   volumes:
-  #     - app-redis-data:/data
-  #     - ./redis/redis.conf:/usr/local/etc/redis/redis.conf
-  #   environment:
-  #     - SERVICE_NAME=redis
-  #     - RELEASE=${RELEASE:-production}
-  #   env_file:
-  #     - .env
-
-  # queue:
-  #   restart: always
-  #   image: rabbitmq:3
-  #   environment:
-  #     - SERVICE_NAME=queue
-  #     - RELEASE=${RELEASE:-production}
-  #   volumes:
-  #     - ./rabbitmq/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf
-
-  worker:
-    restart: always
-    image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${VERSION-latest}"
-    hostname: ${NODE_NAME?Variable not set}
-    profiles:
-      - worker
-    env_file:
-      - .env
-    environment:
-      - SERVICE_NAME=worker
-      - RELEASE=${RELEASE:-production}
-      - VERSION=${VERSION}
-      - VERSION_HASH=${VERSION_HASH}
-      - NODE_TYPE=${NODE_TYPE?Variable not set}
-      - NODE_NAME=${NODE_NAME?Variable not set}
-      - STACK_API_KEY=${STACK_API_KEY}
-      - PORT=${HTTP_PORT}
-      - IGNORE_TLS_ERRORS=${IGNORE_TLS_ERRORS?False}
-      - HTTP_PORT=${HTTP_PORT}
-      - HTTPS_PORT=${HTTPS_PORT}
-      - USE_BLOB_STORAGE=${USE_BLOB_STORAGE}
-      - CONTAINER_HOST=${CONTAINER_HOST}
-      - TRACE=False # TODO: Trace Mode is set to False, until jaegar is integrated
-      - JAEGER_HOST=${JAEGER_HOST}
-      - JAEGER_PORT=${JAEGER_PORT}
-      - ASSOCIATION_TIMEOUT=${ASSOCIATION_TIMEOUT}
-      - DEV_MODE=${DEV_MODE}
-      - QUEUE_PORT=${QUEUE_PORT}
-      - CREATE_PRODUCER=true
-      - NODE_SIDE_TYPE=${NODE_SIDE_TYPE}
-      - ENABLE_WARNINGS=${ENABLE_WARNINGS}
-      - INMEMORY_WORKERS=True # hardcoding is intentional, since single_container don't share databases
-    ports:
-      - "${HTTP_PORT}:${HTTP_PORT}"
-    volumes:
-      - credentials-data:/root/data/creds/
-      - /var/run/docker.sock:/var/run/docker.sock
-    extra_hosts:
-      - "host.docker.internal:host-gateway"
-    labels:
-      - "orgs.openmined.syft=this is a syft worker container"
-
-  backend:
-    restart: always
-    image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${VERSION-latest}"
-    profiles:
-      - backend
-    depends_on:
-      - proxy
-      - mongo
-    env_file:
-      - .env
-    environment:
-      - SERVICE_NAME=backend
-      - RELEASE=${RELEASE:-production}
-      - VERSION=${VERSION}
-      - VERSION_HASH=${VERSION_HASH}
-      - NODE_TYPE=${NODE_TYPE?Variable not set}
-      - NODE_NAME=${NODE_NAME?Variable not set}
-      - STACK_API_KEY=${STACK_API_KEY}
-      - PORT=8001
-      - IGNORE_TLS_ERRORS=${IGNORE_TLS_ERRORS?False}
-      - HTTP_PORT=${HTTP_PORT}
-      - HTTPS_PORT=${HTTPS_PORT}
-      - USE_BLOB_STORAGE=${USE_BLOB_STORAGE}
-      - CONTAINER_HOST=${CONTAINER_HOST}
-      - TRACE=${TRACE}
-      - JAEGER_HOST=${JAEGER_HOST}
-      - JAEGER_PORT=${JAEGER_PORT}
-      - ASSOCIATION_TIMEOUT=${ASSOCIATION_TIMEOUT}
-      - DEV_MODE=${DEV_MODE}
-      - OBLV_LOCALHOST_PORT=${OBLV_LOCALHOST_PORT}
-      - OBLV_ENABLED=${OBLV_ENABLED}
-      - DEFAULT_ROOT_EMAIL=${DEFAULT_ROOT_EMAIL}
-      - DEFAULT_ROOT_PASSWORD=${DEFAULT_ROOT_PASSWORD}
-      - BACKEND_STORAGE_PATH=${BACKEND_STORAGE_PATH}
-      - QUEUE_PORT=${QUEUE_PORT}
-      - CREATE_PRODUCER=true
-      - N_CONSUMERS=1
-      - INMEMORY_WORKERS=${INMEMORY_WORKERS}
-      - HOST_GRID_PATH=${PWD}
-    command: "./grid/start.sh"
-    network_mode: service:proxy
-    volumes:
-      - ${BACKEND_STORAGE_PATH}:/storage
-      - ${CREDENTIALS_VOLUME}:/root/data/creds/
-      - /var/run/docker.sock:/var/run/docker.sock
-    stdin_open: true
-    tty: true
-    labels:
-      - "orgs.openmined.syft=this is a syft backend container"
-
-  # backend_stream:
-  #   restart: always
-  #   image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${VERSION-latest}"
-  #   depends_on:
-  #     - proxy
-  #   env_file:
-  #     - .env
-  #   environment:
-  #     - SERVICE_NAME=backend_stream
-  #     - RELEASE=${RELEASE:-production}
-  #     - VERSION=${VERSION}
-  #     - VERSION_HASH=${VERSION_HASH}
-  #     - NODE_TYPE=${NODE_TYPE?Variable not set}
-  #     - DOMAIN_NAME=${DOMAIN_NAME?Variable not set}
-  #     - STACK_API_KEY=${STACK_API_KEY}
-  #     - PORT=8011
-  #     - STREAM_QUEUE=1
-  #     - IGNORE_TLS_ERRORS=${IGNORE_TLS_ERRORS?False}
-  #     - HTTP_PORT=${HTTP_PORT}
-  #     - HTTPS_PORT=${HTTPS_PORT}
-  #     - USE_BLOB_STORAGE=${USE_BLOB_STORAGE}
-  #     - CONTAINER_HOST=${CONTAINER_HOST}
-  #     - TRACE=${TRACE}
-  #     - JAEGER_HOST=${JAEGER_HOST}
-  #     - JAEGER_PORT=${JAEGER_PORT}
-  #     - DEV_MODE=${DEV_MODE}
-  #     - OBLV_LOCALHOST_PORT=${OBLV_LOCALHOST_PORT}
-  #     - OBLV_ENABLED=${OBLV_ENABLED}
-  #   network_mode: service:proxy
-  #   volumes:
-  #     - credentials-data:/root/data/creds/
-
-  # celeryworker:
-  #   restart: always
-  #   image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${VERSION-latest}"
-  #   depends_on:
-  #     - proxy
-  #     - queue
-  #   env_file:
-  #     - .env
-  #   environment:
-  #     - SERVICE_NAME=celeryworker
-  #     - RELEASE=${RELEASE:-production}
-  #     - VERSION=${VERSION}
-  #     - VERSION_HASH=${VERSION_HASH}
-  #     - NODE_TYPE=${NODE_TYPE?Variable not set}
-  #     - DOMAIN_NAME=${DOMAIN_NAME?Variable not set}
-  #     - C_FORCE_ROOT=1
-  #     - STACK_API_KEY=${STACK_API_KEY}
-  #     - IGNORE_TLS_ERRORS=${IGNORE_TLS_ERRORS?False}
-  #     - HTTP_PORT=${HTTP_PORT}
-  #     - HTTPS_PORT=${HTTPS_PORT}
-  #     - USE_BLOB_STORAGE=${USE_BLOB_STORAGE}
-  #     - CONTAINER_HOST=${CONTAINER_HOST}
-  #     - NETWORK_CHECK_INTERVAL=${NETWORK_CHECK_INTERVAL}
-  #     - DOMAIN_CHECK_INTERVAL=${DOMAIN_CHECK_INTERVAL}
-  #     - TRACE=${TRACE}
-  #     - JAEGER_HOST=${JAEGER_HOST}
-  #     - JAEGER_PORT=${JAEGER_PORT}
-  #     - DEV_MODE=${DEV_MODE}
-  #     - OBLV_LOCALHOST_PORT=${OBLV_LOCALHOST_PORT}
-  #     - OBLV_ENABLED=${OBLV_ENABLED}
-  #   command: "/app/grid/worker-start.sh"
-  #   network_mode: service:proxy
-  #   volumes:
-  #     - credentials-data:/storage
-
-  seaweedfs:
-    profiles:
-      - blob-storage
-    depends_on:
-      - proxy
-    env_file:
-      - .env
-    image: "${DOCKER_IMAGE_SEAWEEDFS?Variable not set}:${VERSION-latest}"
-    environment:
-      - S3_VOLUME_SIZE_MB=${S3_VOLUME_SIZE_MB:-1024}
-      - S3_ROOT_USER=${S3_ROOT_USER:-admin}
-      - S3_ROOT_PWD=${S3_ROOT_PWD:-admin}
-      - S3_PORT=${S3_PORT:-8888}
-      - SEAWEED_MOUNT_PORT=${SEAWEED_MOUNT_PORT:-4001}
-    volumes:
-      - seaweedfs-data:/data
-      - ./seaweedfs/filer.toml:/etc/seaweedfs/filer.toml
-      - ./seaweedfs/start.sh:/etc/seaweedfs/start.sh
-    labels:
-      - "orgs.openmined.syft=this is a syft seaweedfs container"
-
-  mongo:
-    image: "${MONGO_IMAGE}:${MONGO_VERSION}"
-    profiles:
-      - mongo
-    restart: always
-    environment:
-      - MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME}
-      - MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD}
-    volumes:
-      - mongo-data:/data/db
-      - mongo-config-data:/data/configdb
-    labels:
-      - "orgs.openmined.syft=this is a syft mongo container"
-
-  jaeger:
-    profiles:
-      - telemetry
-    image: jaegertracing/all-in-one:1.37
-    environment:
-      - COLLECTOR_ZIPKIN_HOST_PORT=9411
-      - COLLECTOR_OTLP_ENABLED=true
-    extra_hosts:
-      - "host.docker.internal:host-gateway"
-    ports:
-      - "${JAEGER_PORT}:14268" # http collector
-      - "16686" # ui
-      # - "6831:6831/udp"
-      # - "6832:6832/udp"
-      # - "5778:5778"
-      # - "4317:4317"
-      # - "4318:4318"
-      # - "14250:14250"
-      # - "14269:14269"
-      # - "9411:9411"
-    volumes:
-      - jaeger-data:/tmp
-    labels:
-      - "orgs.openmined.syft=this is a syft jaeger container"
-
-volumes:
-  credentials-data:
-    labels:
-      orgs.openmined.syft: "this is a syft credentials volume"
-  seaweedfs-data:
-    labels:
-      orgs.openmined.syft: "this is a syft seaweedfs volume"
-  mongo-data:
-    labels:
-      orgs.openmined.syft: "this is a syft mongo volume"
-  mongo-config-data:
-    labels:
-      orgs.openmined.syft: "this is a syft mongo volume"
-  jaeger-data:
-    labels:
-      orgs.openmined.syft: "this is a syft jaeger volume"
-
-networks:
-  traefik-public:
-    # Allow setting it to false for testing
-    external: ${TRAEFIK_PUBLIC_NETWORK_IS_EXTERNAL-true}
diff --git a/packages/grid/enclave/attestation/attestation.dockerfile b/packages/grid/enclave/attestation/attestation.dockerfile
new file mode 100644
index 00000000000..3ddb0377ca0
--- /dev/null
+++ b/packages/grid/enclave/attestation/attestation.dockerfile
@@ -0,0 +1,85 @@
+ARG AZ_GUEST_LIB_VERSION="1.0.5"
+ARG AZ_CLIENT_COMMIT="b613bcd"
+ARG PYTHON_VERSION="3.10"
+ARG NVTRUST_VERSION="1.3.0"
+
+
+FROM ubuntu:22.04 as builder
+ARG AZ_GUEST_LIB_VERSION
+ARG AZ_CLIENT_COMMIT
+
+# ======== [Stage 1] Install Dependencies ========== #
+
+ENV DEBIAN_FRONTEND=noninteractive
+RUN --mount=type=cache,target=/var/cache/apt/archives \
+    apt update && apt upgrade -y && \
+    apt-get  install -y \
+    build-essential \
+    libcurl4-openssl-dev \
+    libjsoncpp-dev \
+    libboost-all-dev \
+    nlohmann-json3-dev \
+    cmake \
+    wget \
+    git
+
+RUN wget https://packages.microsoft.com/repos/azurecore/pool/main/a/azguestattestation1/azguestattestation1_${AZ_GUEST_LIB_VERSION}_amd64.deb && \
+    dpkg -i azguestattestation1_${AZ_GUEST_LIB_VERSION}_amd64.deb
+
+# ======== [Stage 2] Build Attestation Client ========== #
+
+RUN git clone https://github.com/Azure/confidential-computing-cvm-guest-attestation.git && \
+    cd confidential-computing-cvm-guest-attestation && \
+    git checkout ${AZ_CLIENT_COMMIT} && \
+    cd cvm-attestation-sample-app && \
+    cmake . && make && cp ./AttestationClient /
+
+
+# ======== [Step 3] Build Final Image ========== #
+FROM python:${PYTHON_VERSION}-slim
+ARG AZ_GUEST_LIB_VERSION
+ARG NVTRUST_VERSION
+ENV DEBIAN_FRONTEND=noninteractive
+
+RUN apt-get update && apt-get install -y \
+    wget \
+    git
+
+WORKDIR /app
+
+RUN wget https://packages.microsoft.com/repos/azurecore/pool/main/a/azguestattestation1/azguestattestation1_${AZ_GUEST_LIB_VERSION}_amd64.deb && \
+    dpkg -i azguestattestation1_${AZ_GUEST_LIB_VERSION}_amd64.deb
+
+COPY --from=builder /AttestationClient /app
+
+# Clone Nvidia nvtrust Repo
+RUN git clone -b v${NVTRUST_VERSION} https://github.com/NVIDIA/nvtrust.git
+
+
+# Install Nvidia Local Verifier
+RUN --mount=type=cache,target=/root/.cache \
+    cd nvtrust/guest_tools/gpu_verifiers/local_gpu_verifier && \
+    pip install .
+
+# Install Nvidia Attestation SDK
+RUN --mount=type=cache,target=/root/.cache \
+    cd nvtrust/guest_tools/attestation_sdk/dist && \
+    pip install ./nv_attestation_sdk-${NVTRUST_VERSION}-py3-none-any.whl
+
+
+COPY ./requirements.txt /app/requirements.txt
+RUN --mount=type=cache,target=/root/.cache \
+    pip install --user -r requirements.txt
+
+COPY ./start.sh /app/start.sh
+RUN chmod +x /app/start.sh
+COPY ./server /app/server
+
+# ========== [Step 4] Start  Python Web Server ========== #
+
+CMD ["sh", "-c", "/app/start.sh"]
+EXPOSE 4455
+
+# Cleanup
+RUN rm -rf /var/lib/apt/lists/* && \
+    rm -rf /app/nvtrust
\ No newline at end of file
diff --git a/packages/grid/enclave/attestation/enclave-development.md b/packages/grid/enclave/attestation/enclave-development.md
new file mode 100644
index 00000000000..9e1563504b6
--- /dev/null
+++ b/packages/grid/enclave/attestation/enclave-development.md
@@ -0,0 +1,209 @@
+# Enclave Development
+
+## Building Attestion Containers
+
+NOTE: Even on Arm machines, we build x64 images.
+As some dependent packages in the dockerfile do not have arm64 equivalent.
+It would take 10 minutes to build the image in emulation for the first time
+in Arm machines.After which , the subsequent builds would be instant.
+
+```sh
+cd packages/grid/enclave/attestation && \
+docker build -f attestation.dockerfile  . -t attestation:0.1 --platform linux/amd64
+```
+
+## Running the container in development mode
+
+```sh
+cd packages/grid/enclave/attestation && \
+docker run -it --rm -e DEV_MODE=True -p 4455:4455 -v $(pwd)/server:/app/server attestation:0.1
+```
+
+## For fetching attestation report by FastAPI
+
+### CPU Attestation
+
+```sh
+docker run -it --rm --privileged \
+  -p 4455:4455 \
+  -v /sys/kernel/security:/sys/kernel/security \
+  -v /dev/tpmrm0:/dev/tpmrm0 attestation:0.1
+```
+
+```sh
+curl localhost:4455/attest/cpu
+```
+
+### GPU Attestation
+
+#### Nvidia GPU Requirements
+
+We would need to install Nvidia Container Toolkit on host system and ensure we have CUDA Drivers installed.
+Link: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html
+
+```sh
+docker run -it --rm --privileged --gpus all --runtime=nvidia \
+  -p 4455:4455 \
+  -v /sys/kernel/security:/sys/kernel/security \
+  -v /dev/tpmrm0:/dev/tpmrm0 attestation:0.1
+```
+
+```sh
+curl localhost:4455/attest/gpu
+```
+
+## For fetching attestation report directly by docker
+
+### CPU Attestation
+
+```sh
+docker run -it --rm --privileged \
+  -v /sys/kernel/security:/sys/kernel/security \
+  -v /dev/tpmrm0:/dev/tpmrm0 attestation:0.1 /bin/bash
+```
+
+In the shell run
+
+```sh
+./AttestationClient
+```
+
+This would return either True or False indicating status of attestation
+
+This could also be customized with Appraisal Policy
+
+To retrieve JWT from Microsoft Azure Attestation (MAA)
+
+```sh
+./AttestationClient -o token
+```
+
+### For GPU Attestation
+
+```sh
+docker run -it --rm --privileged --gpus all --runtime=nvidia \
+  -v /sys/kernel/security:/sys/kernel/security \
+  -v /dev/tpmrm0:/dev/tpmrm0 attestation:0.1 /bin/bash
+```
+
+Invoke python shell
+In the python shell run
+
+```python3
+from nv_attestation_sdk import attestation
+
+
+NRAS_URL="https://nras.attestation.nvidia.com/v1/attest/gpu"
+client = attestation.Attestation()
+client.set_name("thisServer1")
+client.set_nonce("931d8dd0add203ac3d8b4fbde75e115278eefcdceac5b87671a748f32364dfcb")
+print ("[RemoteGPUTest] server name :", client.get_name())
+
+client.add_verifier(attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, "")
+client.attest()
+```
+
+### Instructions for Development (Devspace)
+
+We could launch an enclave stack by the command.
+
+```sh
+tox -e dev.k8s.launch.enclave
+```
+
+### Local Client-side Verification
+
+Use the following function to perform local, client-side verification of tokens. They expire quick.
+
+```python3
+def verify_token(token: str, token_type: str):
+    """
+    Verifies a JSON Web Token (JWT) using a public key obtained from a JWKS (JSON Web Key Set) endpoint,
+    based on the specified type of token ('cpu' or 'gpu'). The function handles two distinct processes
+    for token verification depending on the type specified:
+
+    - 'cpu': Fetches the JWKS from the 'jku' URL specified in the JWT's unverified header,
+             finds the key by 'kid', and converts the JWK to a PEM format public key for verification.
+
+    - 'gpu': Directly uses a fixed JWKS URL to retrieve the keys, finds the key by 'kid', and uses the
+             'x5c' field to extract a certificate which is then used to verify the token.
+
+    Parameters:
+        token (str): The JWT that needs to be verified.
+        type (str): Type of the token which dictates the verification process; expected values are 'cpu' or 'gpu'.
+
+    Returns:
+        bool: True if the JWT is successfully verified, False otherwise.
+
+    Raises:
+        Exception: Raises various exceptions internally but catches them to return False, except for
+                   printing error messages related to the specific failures (e.g., key not found, invalid certificate).
+
+    Example usage:
+        verify_token('your.jwt.token', 'cpu')
+        verify_token('your.jwt.token', 'gpu')
+
+    Note:
+        - The function prints out details about the verification process and errors, if any.
+        - Ensure that the cryptography and PyJWT libraries are properly installed and updated in your environment.
+    """
+    import jwt
+    import json
+    import base64
+    import requests
+    from jwt.algorithms import RSAAlgorithm
+    from cryptography.x509 import load_der_x509_certificate
+    from cryptography.hazmat.primitives import serialization
+
+
+    # Determine JWKS URL based on the token type
+    if token_type.lower() == "gpu":
+        jwks_url = 'https://nras.attestation.nvidia.com/.well-known/jwks.json'
+    else:
+        unverified_header = jwt.get_unverified_header(token)
+        jwks_url = unverified_header['jku']
+
+    # Fetch the JWKS from the endpoint
+    jwks = requests.get(jwks_url).json()
+
+    # Get the key ID from the JWT header
+    header = jwt.get_unverified_header(token)
+    kid = header['kid']
+
+    # Find the key with the matching kid in the JWKS
+    key = next((item for item in jwks["keys"] if item["kid"] == kid), None)
+    if not key:
+        print("Public key not found in JWKS list.")
+        return False
+
+    # Convert the key based on the token type
+    if token_type.lower() == "gpu" and "x5c" in key:
+        try:
+            cert_bytes = base64.b64decode(key['x5c'][0])
+            cert = load_der_x509_certificate(cert_bytes)
+            public_key = cert.public_key()
+        except Exception as e:
+            print("Failed to process certificate:", str(e))
+            return False
+    elif token_type.lower() == "cpu":
+        try:
+            public_key = RSAAlgorithm.from_jwk(key)
+        except Exception as e:
+            print("Failed to convert JWK to PEM:", str(e))
+            return False
+    else:
+        print("Invalid token_type or key information.")
+        return False
+
+    # Verify the JWT using the public key
+    try:
+        payload = jwt.decode(token, public_key, algorithms=[header['alg']], options={"verify_exp": True})
+        print("JWT Payload:", json.dumps(payload, indent=2))
+        return True
+    except jwt.ExpiredSignatureError:
+        print("JWT token has expired.")
+    except jwt.InvalidTokenError as e:
+        print("JWT token signature is invalid:", str(e))
+
+    return False
+```
diff --git a/packages/grid/enclave/attestation/requirements.txt b/packages/grid/enclave/attestation/requirements.txt
new file mode 100644
index 00000000000..bd5059ad68d
--- /dev/null
+++ b/packages/grid/enclave/attestation/requirements.txt
@@ -0,0 +1,3 @@
+fastapi==0.110.0
+loguru==0.7.2
+uvicorn[standard]==0.27.1
diff --git a/packages/grid/enclave/attestation/server/attestation_constants.py b/packages/grid/enclave/attestation/server/attestation_constants.py
new file mode 100644
index 00000000000..721b435cecc
--- /dev/null
+++ b/packages/grid/enclave/attestation/server/attestation_constants.py
@@ -0,0 +1 @@
+NRAS_URL = "https://nras.attestation.nvidia.com/v1/attest/gpu"
diff --git a/packages/grid/enclave/attestation/server/attestation_main.py b/packages/grid/enclave/attestation/server/attestation_main.py
new file mode 100644
index 00000000000..fb0658d7151
--- /dev/null
+++ b/packages/grid/enclave/attestation/server/attestation_main.py
@@ -0,0 +1,38 @@
+# stdlib
+import os
+import sys
+
+# third party
+from fastapi import FastAPI
+from loguru import logger
+
+# relative
+from .attestation_models import CPUAttestationResponseModel
+from .attestation_models import GPUAttestationResponseModel
+from .attestation_models import ResponseModel
+from .cpu_attestation import attest_cpu
+from .gpu_attestation import attest_gpu
+
+# Logging Configuration
+log_level = os.getenv("APP_LOG_LEVEL", "INFO").upper()
+logger.remove()
+logger.add(sys.stderr, colorize=True, level=log_level)
+
+app = FastAPI(title="Attestation API")
+
+
+@app.get("/", response_model=ResponseModel)
+async def read_root() -> ResponseModel:
+    return ResponseModel(message="Server is running")
+
+
+@app.get("/attest/cpu", response_model=CPUAttestationResponseModel)
+async def attest_cpu_endpoint() -> CPUAttestationResponseModel:
+    cpu_attest_res, cpu_attest_token = attest_cpu()
+    return CPUAttestationResponseModel(result=cpu_attest_res, token=cpu_attest_token)
+
+
+@app.get("/attest/gpu", response_model=GPUAttestationResponseModel)
+async def attest_gpu_endpoint() -> GPUAttestationResponseModel:
+    gpu_attest_res, gpu_attest_token = attest_gpu()
+    return GPUAttestationResponseModel(result=gpu_attest_res, token=gpu_attest_token)
diff --git a/packages/grid/enclave/attestation/server/attestation_models.py b/packages/grid/enclave/attestation/server/attestation_models.py
new file mode 100644
index 00000000000..9ac9de21294
--- /dev/null
+++ b/packages/grid/enclave/attestation/server/attestation_models.py
@@ -0,0 +1,18 @@
+# third party
+from pydantic import BaseModel
+
+
+class ResponseModel(BaseModel):
+    message: str
+
+
+class CPUAttestationResponseModel(BaseModel):
+    result: str
+    token: str = ""
+    vendor: str | None = None  # Hardware Manufacturer
+
+
+class GPUAttestationResponseModel(BaseModel):
+    result: str
+    token: str = ""
+    vendor: str | None = None  # Hardware Manufacturer
diff --git a/packages/grid/enclave/attestation/server/cpu_attestation.py b/packages/grid/enclave/attestation/server/cpu_attestation.py
new file mode 100644
index 00000000000..356157a2e0f
--- /dev/null
+++ b/packages/grid/enclave/attestation/server/cpu_attestation.py
@@ -0,0 +1,29 @@
+# stdlib
+import subprocess
+
+# third party
+from loguru import logger
+
+
+def attest_cpu() -> tuple[str, str]:
+    # Fetch report from Micrsoft Attestation library
+    cpu_report = subprocess.run(
+        ["/app/AttestationClient"], capture_output=True, text=True
+    )
+    logger.debug(f"Stdout: {cpu_report.stdout}")
+    logger.debug(f"Stderr: {cpu_report.stderr}")
+
+    logger.info("Attestation Return Code: {}", cpu_report.returncode)
+    res = "False"
+    if cpu_report.returncode == 0 and cpu_report.stdout == "true":
+        res = "True"
+
+    # Fetch token from Micrsoft Attestation library
+    cpu_token = subprocess.run(
+        ["/app/AttestationClient", "-o", "token"], capture_output=True, text=True
+    )
+    logger.debug(f"Stdout: {cpu_token.stdout}")
+    logger.debug(f"Stderr: {cpu_token.stderr}")
+
+    logger.info("Attestation Token Return Code: {}", cpu_token.returncode)
+    return res, cpu_token.stdout
diff --git a/packages/grid/enclave/attestation/server/gpu_attestation.py b/packages/grid/enclave/attestation/server/gpu_attestation.py
new file mode 100644
index 00000000000..38eccc8a6df
--- /dev/null
+++ b/packages/grid/enclave/attestation/server/gpu_attestation.py
@@ -0,0 +1,51 @@
+# stdlib
+import io
+import re
+import sys
+
+# third party
+from loguru import logger
+from nv_attestation_sdk import attestation
+
+# relative
+from .attestation_constants import NRAS_URL
+
+
+# Function to process captured output to extract the token
+def extract_token(captured_value: str) -> str:
+    match = re.search(r"Entity Attestation Token is (\S+)", captured_value)
+    if match:
+        token = match.group(1)  # Extract the token, which is in group 1 of the match
+        return token
+    else:
+        return "Token not found"
+
+
+def attest_gpu() -> tuple[str, str]:
+    # Fetch report from Nvidia Attestation SDK
+    client = attestation.Attestation("Attestation Server")
+
+    # TODO: Add the ability to generate nonce later.
+    logger.info("[RemoteGPUTest] server name : {}", client.get_name())
+
+    client.add_verifier(
+        attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, ""
+    )
+
+    # Step 1: Redirect stdout
+    original_stdout = sys.stdout  # Save a reference to the original standard output
+    captured_output = io.StringIO()  # Create a StringIO object to capture output
+    sys.stdout = captured_output  # Redirect stdout to the StringIO object
+
+    # Step 2: Call the function
+    gpu_report = client.attest()
+
+    # Step 3: Get the content of captured output and reset stdout
+    captured_value = captured_output.getvalue()
+    sys.stdout = original_stdout  # Reset stdout to its original state
+
+    # Step 4: Extract the token from the captured output
+    token = extract_token(captured_value)
+
+    logger.info("[RemoteGPUTest] report : {}, {}", gpu_report, type(gpu_report))
+    return str(gpu_report), token
diff --git a/packages/grid/enclave/attestation/start.sh b/packages/grid/enclave/attestation/start.sh
new file mode 100644
index 00000000000..54c025271b3
--- /dev/null
+++ b/packages/grid/enclave/attestation/start.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -e
+export PATH="/root/.local/bin:${PATH}"
+
+APP_MODULE=server.attestation_main:app
+APP_LOG_LEVEL=${APP_LOG_LEVEL:-info}
+UVICORN_LOG_LEVEL=${UVICORN_LOG_LEVEL:-info}
+HOST=${HOST:-0.0.0.0}
+PORT=${PORT:-4455}
+RELOAD=""
+
+if [[ ${DEV_MODE} == "True" ]];
+then
+    echo "DEV_MODE Enabled"
+    RELOAD="--reload"
+fi
+
+
+exec uvicorn $RELOAD --host $HOST --port $PORT --log-level $UVICORN_LOG_LEVEL "$APP_MODULE"
\ No newline at end of file
diff --git a/packages/grid/frontend/.dockerignore b/packages/grid/frontend/.dockerignore
deleted file mode 100644
index 00df28f40b9..00000000000
--- a/packages/grid/frontend/.dockerignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.DS_Store
-node_modules
-.svelte-kit
-.pnpm-store
\ No newline at end of file
diff --git a/packages/grid/frontend/README.md b/packages/grid/frontend/README.md
index e912f1dd172..6684d9fcba0 100644
--- a/packages/grid/frontend/README.md
+++ b/packages/grid/frontend/README.md
@@ -1,12 +1,12 @@
-# PyGrid UI
+# Syft UI
 
-The PyGrid UI is the user interface that allows data owners to manage their
-**deployed** PyGrid domains and networks.
+The Syft UI is the user interface that allows data owners to manage their
+**deployed** Syft datasites and gateways.
 
 ## Installation
 
 ```bash
-cd /packages/grid/frontend
+cd /packages/grid/frontend
 pnpm install
 ```
 
diff --git a/packages/grid/frontend/frontend.dockerfile b/packages/grid/frontend/frontend.dockerfile
index f05aae7e410..7dd46d4a888 100644
--- a/packages/grid/frontend/frontend.dockerfile
+++ b/packages/grid/frontend/frontend.dockerfile
@@ -4,12 +4,15 @@ ARG BACKEND_API_BASE_URL="/api/v2/"
 ENV BACKEND_API_BASE_URL ${BACKEND_API_BASE_URL}
 
 RUN apk update && \
-    apk upgrade && \
-    apk add --no-cache nodejs-20 pnpm corepack
+  apk upgrade && \
+  apk add --no-cache nodejs-20 pnpm corepack
+
+ENV PNPM_HOME="/pnpm"
+ENV PATH="$PNPM_HOME:$PATH"
 
 WORKDIR /app
 
-RUN corepack enable && corepack prepare pnpm@latest --activate
+RUN corepack enable
 
 COPY .npmrc ./
 COPY package.json ./
@@ -17,18 +20,18 @@ COPY pnpm-lock.yaml ./
 
 FROM base AS dependencies
 
-RUN pnpm i --frozen-lockfile
+RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile
 
-FROM dependencies as grid-ui-tests
+FROM dependencies as syft-ui-tests
 COPY vite.config.ts ./
 COPY ./tests ./tests
 COPY ./src/ ./src
 
 CMD pnpm test:unit
 
-FROM dependencies as grid-ui-development
+FROM dependencies as syft-ui-development
 
-ENV NODE_ENV=development
+ENV SERVER_ENV=development
 
 COPY . .
 CMD pnpm dev
@@ -38,9 +41,9 @@ FROM dependencies AS builder
 COPY . .
 RUN pnpm build
 
-FROM base AS grid-ui-production
+FROM base AS syft-ui-production
 
-ENV NODE_ENV=production
+ENV SERVER_ENV=production
 
 COPY --from=dependencies /app/node_modules ./node_modules
 COPY --from=builder /app ./
diff --git a/packages/grid/frontend/frontend.dockerfile.dockerignore b/packages/grid/frontend/frontend.dockerfile.dockerignore
new file mode 100644
index 00000000000..449ac1c92ef
--- /dev/null
+++ b/packages/grid/frontend/frontend.dockerfile.dockerignore
@@ -0,0 +1,15 @@
+# Paths should be relative to the context dir of this image i.e. /packages/grid/frontend/
+
+# Frontend
+**/*.md
+
+# Dependency directories
+**/node_modules
+**/.svelte-kit
+**/.pnpm-store
+
+# vim
+**/*.swp
+
+# macOS
+**/.DS_Store
diff --git a/packages/grid/frontend/package.json b/packages/grid/frontend/package.json
index 7d726f1c6b3..9a68a1fceb6 100644
--- a/packages/grid/frontend/package.json
+++ b/packages/grid/frontend/package.json
@@ -1,6 +1,6 @@
 {
-  "name": "pygrid-ui",
-  "version": "0.8.6-beta.1",
+  "name": "syft-ui",
+  "version": "0.9.6-beta.6",
   "private": true,
   "scripts": {
     "dev": "pnpm i && vite dev --host --port 80",
@@ -36,7 +36,7 @@
     "tailwindcss": "3.3.1",
     "typescript": "^4.9.5",
     "typescript-svelte-plugin": "^0.3.34",
-    "vite": "^4.5.2",
+    "vite": "^4.5.3",
     "vitest": "^0.33.0"
   },
   "type": "module",
@@ -44,8 +44,8 @@
     "capnp-ts": "^0.7.0",
     "just-debounce-it": "^3.2.0",
     "ky": "^0.33.3",
-    "libsodium": "0.7.11",
-    "libsodium-wrappers": "0.7.11",
+    "libsodium": "0.7.14",
+    "libsodium-wrappers": "0.7.14",
     "prismjs": "^1.29.0",
     "semver": "^7.5.4",
     "uuid": "^9.0.0"
diff --git a/packages/grid/frontend/pnpm-lock.yaml b/packages/grid/frontend/pnpm-lock.yaml
index 087164903a7..be8c778678e 100644
--- a/packages/grid/frontend/pnpm-lock.yaml
+++ b/packages/grid/frontend/pnpm-lock.yaml
@@ -1,4 +1,4 @@
-lockfileVersion: '6.0'
+lockfileVersion: '9.0'
 
 settings:
   autoInstallPeers: true
@@ -7,450 +7,321 @@ settings:
 overrides:
   undici@<5.26.2: '>=5.26.2'
 
-dependencies:
-  capnp-ts:
-    specifier: ^0.7.0
-    version: 0.7.0
-  just-debounce-it:
-    specifier: ^3.2.0
-    version: 3.2.0
-  ky:
-    specifier: ^0.33.3
-    version: 0.33.3
-  libsodium:
-    specifier: 0.7.11
-    version: 0.7.11
-  libsodium-wrappers:
-    specifier: 0.7.11
-    version: 0.7.11
-  prismjs:
-    specifier: ^1.29.0
-    version: 1.29.0
-  semver:
-    specifier: ^7.5.4
-    version: 7.5.4
-  uuid:
-    specifier: ^9.0.0
-    version: 9.0.0
-
-devDependencies:
-  '@playwright/test':
-    specifier: ^1.36.1
-    version: 1.36.1
-  '@sveltejs/adapter-node':
-    specifier: ^1.3.1
-    version: 1.3.1(@sveltejs/kit@1.25.2)
-  '@sveltejs/kit':
-    specifier: 1.25.2
-    version: 1.25.2(svelte@3.59.2)(vite@4.5.2)
-  '@types/cookie':
-    specifier: ^0.5.3
-    version: 0.5.3
-  '@types/libsodium-wrappers':
-    specifier: 0.7.11
-    version: 0.7.11
-  '@types/prismjs':
-    specifier: ^1.26.0
-    version: 1.26.0
-  '@typescript-eslint/eslint-plugin':
-    specifier: ^5.62.0
-    version: 5.62.0(@typescript-eslint/parser@5.62.0)(eslint@8.44.0)(typescript@4.9.5)
-  '@typescript-eslint/parser':
-    specifier: ^5.62.0
-    version: 5.62.0(eslint@8.44.0)(typescript@4.9.5)
-  autoprefixer:
-    specifier: ^10.4.14
-    version: 10.4.14(postcss@8.4.31)
-  dotenv:
-    specifier: ^16.3.1
-    version: 16.3.1
-  esbuild:
-    specifier: ^0.18.13
-    version: 0.18.13
-  eslint:
-    specifier: ^8.44.0
-    version: 8.44.0
-  eslint-config-prettier:
-    specifier: ^8.8.0
-    version: 8.8.0(eslint@8.44.0)
-  eslint-plugin-svelte3:
-    specifier: ^4.0.0
-    version: 4.0.0(eslint@8.44.0)(svelte@3.59.2)
-  postcss:
-    specifier: ^8.4.31
-    version: 8.4.31
-  prettier:
-    specifier: ^2.8.8
-    version: 2.8.8
-  prettier-plugin-svelte:
-    specifier: ^2.10.1
-    version: 2.10.1(prettier@2.8.8)(svelte@3.59.2)
-  svelte:
-    specifier: ^3.59.2
-    version: 3.59.2
-  svelte-preprocess:
-    specifier: ^4.10.7
-    version: 4.10.7(postcss@8.4.31)(svelte@3.59.2)(typescript@4.9.5)
-  svelte-search:
-    specifier: ^2.0.1
-    version: 2.0.1
-  tailwindcss:
-    specifier: 3.3.1
-    version: 3.3.1(postcss@8.4.31)
-  typescript:
-    specifier: ^4.9.5
-    version: 4.9.5
-  typescript-svelte-plugin:
-    specifier: ^0.3.34
-    version: 0.3.34(svelte@3.59.2)(typescript@4.9.5)
-  vite:
-    specifier: ^4.5.2
-    version: 4.5.2(@types/node@20.8.2)
-  vitest:
-    specifier: ^0.33.0
-    version: 0.33.0
+importers:
+
+  .:
+    dependencies:
+      capnp-ts:
+        specifier: ^0.7.0
+        version: 0.7.0
+      just-debounce-it:
+        specifier: ^3.2.0
+        version: 3.2.0
+      ky:
+        specifier: ^0.33.3
+        version: 0.33.3
+      libsodium:
+        specifier: 0.7.14
+        version: 0.7.14
+      libsodium-wrappers:
+        specifier: 0.7.14
+        version: 0.7.14
+      prismjs:
+        specifier: ^1.29.0
+        version: 1.29.0
+      semver:
+        specifier: ^7.5.4
+        version: 7.5.4
+      uuid:
+        specifier: ^9.0.0
+        version: 9.0.0
+    devDependencies:
+      '@playwright/test':
+        specifier: ^1.36.1
+        version: 1.36.1
+      '@sveltejs/adapter-node':
+        specifier: ^1.3.1
+        version: 1.3.1(@sveltejs/kit@1.25.2(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0)))
+      '@sveltejs/kit':
+        specifier: 1.25.2
+        version: 1.25.2(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))
+      '@types/cookie':
+        specifier: ^0.5.3
+        version: 0.5.3
+      '@types/libsodium-wrappers':
+        specifier: 0.7.11
+        version: 0.7.11
+      '@types/prismjs':
+        specifier: ^1.26.0
+        version: 1.26.0
+      '@typescript-eslint/eslint-plugin':
+        specifier: ^5.62.0
+        version: 5.62.0(@typescript-eslint/parser@5.62.0(eslint@8.44.0)(typescript@4.9.5))(eslint@8.44.0)(typescript@4.9.5)
+      '@typescript-eslint/parser':
+        specifier: ^5.62.0
+        version: 5.62.0(eslint@8.44.0)(typescript@4.9.5)
+      autoprefixer:
+        specifier: ^10.4.14
+        version: 10.4.14(postcss@8.4.31)
+      dotenv:
+        specifier: ^16.3.1
+        version: 16.3.1
+      esbuild:
+        specifier: ^0.18.13
+        version: 0.18.13
+      eslint:
+        specifier: ^8.44.0
+        version: 8.44.0
+      eslint-config-prettier:
+        specifier: ^8.8.0
+        version: 8.8.0(eslint@8.44.0)
+      eslint-plugin-svelte3:
+        specifier: ^4.0.0
+        version: 4.0.0(eslint@8.44.0)(svelte@3.59.2)
+      postcss:
+        specifier: ^8.4.31
+        version: 8.4.31
+      prettier:
+        specifier: ^2.8.8
+        version: 2.8.8
+      prettier-plugin-svelte:
+        specifier: ^2.10.1
+        version: 2.10.1(prettier@2.8.8)(svelte@3.59.2)
+      svelte:
+        specifier: ^3.59.2
+        version: 3.59.2
+      svelte-preprocess:
+        specifier: ^4.10.7
+        version: 4.10.7(postcss-load-config@3.1.4(postcss@8.4.31))(postcss@8.4.31)(sass@1.68.0)(svelte@3.59.2)(typescript@4.9.5)
+      svelte-search:
+        specifier: ^2.0.1
+        version: 2.0.1
+      tailwindcss:
+        specifier: 3.3.1
+        version: 3.3.1(postcss@8.4.31)
+      typescript:
+        specifier: ^4.9.5
+        version: 4.9.5
+      typescript-svelte-plugin:
+        specifier: ^0.3.34
+        version: 0.3.34(svelte@3.59.2)(typescript@4.9.5)
+      vite:
+        specifier: ^4.5.3
+        version: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+      vitest:
+        specifier: ^0.33.0
+        version: 0.33.0(sass@1.68.0)
 
 packages:
 
-  /@aashutoshrathi/word-wrap@1.2.6:
+  '@aashutoshrathi/word-wrap@1.2.6':
     resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /@esbuild/android-arm64@0.18.13:
+  '@esbuild/android-arm64@0.18.13':
     resolution: {integrity: sha512-j7NhycJUoUAG5kAzGf4fPWfd17N6SM3o1X6MlXVqfHvs2buFraCJzos9vbeWjLxOyBKHyPOnuCuipbhvbYtTAg==}
     engines: {node: '>=12'}
     cpu: [arm64]
     os: [android]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/android-arm@0.18.13:
+  '@esbuild/android-arm@0.18.13':
     resolution: {integrity: sha512-KwqFhxRFMKZINHzCqf8eKxE0XqWlAVPRxwy6rc7CbVFxzUWB2sA/s3hbMZeemPdhN3fKBkqOaFhTbS8xJXYIWQ==}
     engines: {node: '>=12'}
     cpu: [arm]
     os: [android]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/android-x64@0.18.13:
+  '@esbuild/android-x64@0.18.13':
     resolution: {integrity: sha512-M2eZkRxR6WnWfVELHmv6MUoHbOqnzoTVSIxgtsyhm/NsgmL+uTmag/VVzdXvmahak1I6sOb1K/2movco5ikDJg==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [android]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/darwin-arm64@0.18.13:
+  '@esbuild/darwin-arm64@0.18.13':
     resolution: {integrity: sha512-f5goG30YgR1GU+fxtaBRdSW3SBG9pZW834Mmhxa6terzcboz7P2R0k4lDxlkP7NYRIIdBbWp+VgwQbmMH4yV7w==}
     engines: {node: '>=12'}
     cpu: [arm64]
     os: [darwin]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/darwin-x64@0.18.13:
+  '@esbuild/darwin-x64@0.18.13':
     resolution: {integrity: sha512-RIrxoKH5Eo+yE5BtaAIMZaiKutPhZjw+j0OCh8WdvKEKJQteacq0myZvBDLU+hOzQOZWJeDnuQ2xgSScKf1Ovw==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [darwin]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/freebsd-arm64@0.18.13:
+  '@esbuild/freebsd-arm64@0.18.13':
     resolution: {integrity: sha512-AfRPhHWmj9jGyLgW/2FkYERKmYR+IjYxf2rtSLmhOrPGFh0KCETFzSjx/JX/HJnvIqHt/DRQD/KAaVsUKoI3Xg==}
     engines: {node: '>=12'}
     cpu: [arm64]
     os: [freebsd]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/freebsd-x64@0.18.13:
+  '@esbuild/freebsd-x64@0.18.13':
     resolution: {integrity: sha512-pGzWWZJBInhIgdEwzn8VHUBang8UvFKsvjDkeJ2oyY5gZtAM6BaxK0QLCuZY+qoj/nx/lIaItH425rm/hloETA==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [freebsd]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-arm64@0.18.13:
+  '@esbuild/linux-arm64@0.18.13':
     resolution: {integrity: sha512-hCzZbVJEHV7QM77fHPv2qgBcWxgglGFGCxk6KfQx6PsVIdi1u09X7IvgE9QKqm38OpkzaAkPnnPqwRsltvLkIQ==}
     engines: {node: '>=12'}
     cpu: [arm64]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-arm@0.18.13:
+  '@esbuild/linux-arm@0.18.13':
     resolution: {integrity: sha512-4iMxLRMCxGyk7lEvkkvrxw4aJeC93YIIrfbBlUJ062kilUUnAiMb81eEkVvCVoh3ON283ans7+OQkuy1uHW+Hw==}
     engines: {node: '>=12'}
     cpu: [arm]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-ia32@0.18.13:
+  '@esbuild/linux-ia32@0.18.13':
     resolution: {integrity: sha512-I3OKGbynl3AAIO6onXNrup/ttToE6Rv2XYfFgLK/wnr2J+1g+7k4asLrE+n7VMhaqX+BUnyWkCu27rl+62Adug==}
     engines: {node: '>=12'}
     cpu: [ia32]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-loong64@0.18.13:
+  '@esbuild/linux-loong64@0.18.13':
     resolution: {integrity: sha512-8pcKDApAsKc6WW51ZEVidSGwGbebYw2qKnO1VyD8xd6JN0RN6EUXfhXmDk9Vc4/U3Y4AoFTexQewQDJGsBXBpg==}
     engines: {node: '>=12'}
     cpu: [loong64]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-mips64el@0.18.13:
+  '@esbuild/linux-mips64el@0.18.13':
     resolution: {integrity: sha512-6GU+J1PLiVqWx8yoCK4Z0GnfKyCGIH5L2KQipxOtbNPBs+qNDcMJr9euxnyJ6FkRPyMwaSkjejzPSISD9hb+gg==}
     engines: {node: '>=12'}
     cpu: [mips64el]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-ppc64@0.18.13:
+  '@esbuild/linux-ppc64@0.18.13':
     resolution: {integrity: sha512-pfn/OGZ8tyR8YCV7MlLl5hAit2cmS+j/ZZg9DdH0uxdCoJpV7+5DbuXrR+es4ayRVKIcfS9TTMCs60vqQDmh+w==}
     engines: {node: '>=12'}
     cpu: [ppc64]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-riscv64@0.18.13:
+  '@esbuild/linux-riscv64@0.18.13':
     resolution: {integrity: sha512-aIbhU3LPg0lOSCfVeGHbmGYIqOtW6+yzO+Nfv57YblEK01oj0mFMtvDJlOaeAZ6z0FZ9D13oahi5aIl9JFphGg==}
     engines: {node: '>=12'}
     cpu: [riscv64]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-s390x@0.18.13:
+  '@esbuild/linux-s390x@0.18.13':
     resolution: {integrity: sha512-Pct1QwF2sp+5LVi4Iu5Y+6JsGaV2Z2vm4O9Dd7XZ5tKYxEHjFtb140fiMcl5HM1iuv6xXO8O1Vrb1iJxHlv8UA==}
     engines: {node: '>=12'}
     cpu: [s390x]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/linux-x64@0.18.13:
+  '@esbuild/linux-x64@0.18.13':
     resolution: {integrity: sha512-zTrIP0KzYP7O0+3ZnmzvUKgGtUvf4+piY8PIO3V8/GfmVd3ZyHJGz7Ht0np3P1wz+I8qJ4rjwJKqqEAbIEPngA==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [linux]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/netbsd-x64@0.18.13:
+  '@esbuild/netbsd-x64@0.18.13':
     resolution: {integrity: sha512-I6zs10TZeaHDYoGxENuksxE1sxqZpCp+agYeW039yqFwh3MgVvdmXL5NMveImOC6AtpLvE4xG5ujVic4NWFIDQ==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [netbsd]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/openbsd-x64@0.18.13:
+  '@esbuild/openbsd-x64@0.18.13':
     resolution: {integrity: sha512-W5C5nczhrt1y1xPG5bV+0M12p2vetOGlvs43LH8SopQ3z2AseIROu09VgRqydx5qFN7y9qCbpgHLx0kb0TcW7g==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [openbsd]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/sunos-x64@0.18.13:
+  '@esbuild/sunos-x64@0.18.13':
     resolution: {integrity: sha512-X/xzuw4Hzpo/yq3YsfBbIsipNgmsm8mE/QeWbdGdTTeZ77fjxI2K0KP3AlhZ6gU3zKTw1bKoZTuKLnqcJ537qw==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [sunos]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/win32-arm64@0.18.13:
+  '@esbuild/win32-arm64@0.18.13':
     resolution: {integrity: sha512-4CGYdRQT/ILd+yLLE5i4VApMPfGE0RPc/wFQhlluDQCK09+b4JDbxzzjpgQqTPrdnP7r5KUtGVGZYclYiPuHrw==}
     engines: {node: '>=12'}
     cpu: [arm64]
     os: [win32]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/win32-ia32@0.18.13:
+  '@esbuild/win32-ia32@0.18.13':
     resolution: {integrity: sha512-D+wKZaRhQI+MUGMH+DbEr4owC2D7XnF+uyGiZk38QbgzLcofFqIOwFs7ELmIeU45CQgfHNy9Q+LKW3cE8g37Kg==}
     engines: {node: '>=12'}
     cpu: [ia32]
     os: [win32]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@esbuild/win32-x64@0.18.13:
+  '@esbuild/win32-x64@0.18.13':
     resolution: {integrity: sha512-iVl6lehAfJS+VmpF3exKpNQ8b0eucf5VWfzR8S7xFve64NBNz2jPUgx1X93/kfnkfgP737O+i1k54SVQS7uVZA==}
     engines: {node: '>=12'}
     cpu: [x64]
     os: [win32]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /@eslint-community/eslint-utils@4.4.0(eslint@8.44.0):
+  '@eslint-community/eslint-utils@4.4.0':
     resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     peerDependencies:
       eslint: ^6.0.0 || ^7.0.0 || >=8.0.0
-    dependencies:
-      eslint: 8.44.0
-      eslint-visitor-keys: 3.4.3
-    dev: true
 
-  /@eslint-community/regexpp@4.9.1:
+  '@eslint-community/regexpp@4.9.1':
     resolution: {integrity: sha512-Y27x+MBLjXa+0JWDhykM3+JE+il3kHKAEqabfEWq3SDhZjLYb6/BHL/JKFnH3fe207JaXkyDo685Oc2Glt6ifA==}
     engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
-    dev: true
 
-  /@eslint/eslintrc@2.1.2:
+  '@eslint/eslintrc@2.1.2':
     resolution: {integrity: sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dependencies:
-      ajv: 6.12.6
-      debug: 4.3.4
-      espree: 9.6.1
-      globals: 13.23.0
-      ignore: 5.2.4
-      import-fresh: 3.3.0
-      js-yaml: 4.1.0
-      minimatch: 3.1.2
-      strip-json-comments: 3.1.1
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@eslint/js@8.44.0:
+  '@eslint/js@8.44.0':
     resolution: {integrity: sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dev: true
 
-  /@fastify/busboy@2.0.0:
-    resolution: {integrity: sha512-JUFJad5lv7jxj926GPgymrWQxxjPYuJNiNjNMzqT+HiuP6Vl3dk5xzG+8sTX96np0ZAluvaMzPsjhHZ5rNuNQQ==}
-    engines: {node: '>=14'}
-    dev: true
-
-  /@humanwhocodes/config-array@0.11.11:
+  '@humanwhocodes/config-array@0.11.11':
     resolution: {integrity: sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA==}
     engines: {node: '>=10.10.0'}
-    dependencies:
-      '@humanwhocodes/object-schema': 1.2.1
-      debug: 4.3.4
-      minimatch: 3.1.2
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@humanwhocodes/module-importer@1.0.1:
+  '@humanwhocodes/module-importer@1.0.1':
     resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
     engines: {node: '>=12.22'}
-    dev: true
 
-  /@humanwhocodes/object-schema@1.2.1:
+  '@humanwhocodes/object-schema@1.2.1':
     resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==}
-    dev: true
 
-  /@jest/schemas@29.6.3:
+  '@jest/schemas@29.6.3':
     resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==}
     engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
-    dependencies:
-      '@sinclair/typebox': 0.27.8
-    dev: true
 
-  /@jridgewell/gen-mapping@0.3.3:
+  '@jridgewell/gen-mapping@0.3.3':
     resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==}
     engines: {node: '>=6.0.0'}
-    dependencies:
-      '@jridgewell/set-array': 1.1.2
-      '@jridgewell/sourcemap-codec': 1.4.15
-      '@jridgewell/trace-mapping': 0.3.19
-    dev: true
 
-  /@jridgewell/resolve-uri@3.1.1:
+  '@jridgewell/resolve-uri@3.1.1':
     resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==}
     engines: {node: '>=6.0.0'}
-    dev: true
 
-  /@jridgewell/set-array@1.1.2:
+  '@jridgewell/set-array@1.1.2':
     resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==}
     engines: {node: '>=6.0.0'}
-    dev: true
 
-  /@jridgewell/sourcemap-codec@1.4.15:
+  '@jridgewell/sourcemap-codec@1.4.15':
     resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==}
-    dev: true
 
-  /@jridgewell/trace-mapping@0.3.19:
+  '@jridgewell/trace-mapping@0.3.19':
     resolution: {integrity: sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==}
-    dependencies:
-      '@jridgewell/resolve-uri': 3.1.1
-      '@jridgewell/sourcemap-codec': 1.4.15
-    dev: true
 
-  /@nodelib/fs.scandir@2.1.5:
+  '@nodelib/fs.scandir@2.1.5':
     resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
     engines: {node: '>= 8'}
-    dependencies:
-      '@nodelib/fs.stat': 2.0.5
-      run-parallel: 1.2.0
-    dev: true
 
-  /@nodelib/fs.stat@2.0.5:
+  '@nodelib/fs.stat@2.0.5':
     resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==}
     engines: {node: '>= 8'}
-    dev: true
 
-  /@nodelib/fs.walk@1.2.8:
+  '@nodelib/fs.walk@1.2.8':
     resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
     engines: {node: '>= 8'}
-    dependencies:
-      '@nodelib/fs.scandir': 2.1.5
-      fastq: 1.15.0
-    dev: true
 
-  /@playwright/test@1.36.1:
+  '@playwright/test@1.36.1':
     resolution: {integrity: sha512-YK7yGWK0N3C2QInPU6iaf/L3N95dlGdbsezLya4n0ZCh3IL7VgPGxC6Gnznh9ApWdOmkJeleT2kMTcWPRZvzqg==}
     engines: {node: '>=16'}
     hasBin: true
-    dependencies:
-      '@types/node': 20.8.2
-      playwright-core: 1.36.1
-    optionalDependencies:
-      fsevents: 2.3.2
-    dev: true
 
-  /@polka/url@1.0.0-next.23:
+  '@polka/url@1.0.0-next.23':
     resolution: {integrity: sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg==}
-    dev: true
 
-  /@rollup/plugin-commonjs@25.0.5(rollup@3.29.4):
+  '@rollup/plugin-commonjs@25.0.5':
     resolution: {integrity: sha512-xY8r/A9oisSeSuLCTfhssyDjo9Vp/eDiRLXkg1MXCcEEgEjPmLU+ZyDB20OOD0NlyDa/8SGbK5uIggF5XTx77w==}
     engines: {node: '>=14.0.0'}
     peerDependencies:
@@ -458,17 +329,8 @@ packages:
     peerDependenciesMeta:
       rollup:
         optional: true
-    dependencies:
-      '@rollup/pluginutils': 5.0.5(rollup@3.29.4)
-      commondir: 1.0.1
-      estree-walker: 2.0.2
-      glob: 8.1.0
-      is-reference: 1.2.1
-      magic-string: 0.27.0
-      rollup: 3.29.4
-    dev: true
 
-  /@rollup/plugin-json@6.0.1(rollup@3.29.4):
+  '@rollup/plugin-json@6.0.1':
     resolution: {integrity: sha512-RgVfl5hWMkxN1h/uZj8FVESvPuBJ/uf6ly6GTj0GONnkfoBN5KC0MSz+PN2OLDgYXMhtG0mWpTrkiOjoxAIevw==}
     engines: {node: '>=14.0.0'}
     peerDependencies:
@@ -476,12 +338,8 @@ packages:
     peerDependenciesMeta:
       rollup:
         optional: true
-    dependencies:
-      '@rollup/pluginutils': 5.0.5(rollup@3.29.4)
-      rollup: 3.29.4
-    dev: true
 
-  /@rollup/plugin-node-resolve@15.2.2(rollup@3.29.4):
+  '@rollup/plugin-node-resolve@15.2.2':
     resolution: {integrity: sha512-f64bU4OKqV0yihtxFemmuf0oj37pToCFMISCA+sJbbIAl5wcpbRO9XgWNWb1tDiWQJUcPxo6V0l59hcuZOQ3kw==}
     engines: {node: '>=14.0.0'}
     peerDependencies:
@@ -489,17 +347,8 @@ packages:
     peerDependenciesMeta:
       rollup:
         optional: true
-    dependencies:
-      '@rollup/pluginutils': 5.0.5(rollup@3.29.4)
-      '@types/resolve': 1.20.2
-      deepmerge: 4.3.1
-      is-builtin-module: 3.2.1
-      is-module: 1.0.0
-      resolve: 1.22.6
-      rollup: 3.29.4
-    dev: true
 
-  /@rollup/pluginutils@5.0.5(rollup@3.29.4):
+  '@rollup/pluginutils@5.0.5':
     resolution: {integrity: sha512-6aEYR910NyP73oHiJglti74iRyOwgFU4x3meH/H8OJx6Ry0j6cOVZ5X/wTvub7G7Ao6qaHBEaNsV3GLJkSsF+Q==}
     engines: {node: '>=14.0.0'}
     peerDependencies:
@@ -507,147 +356,76 @@ packages:
     peerDependenciesMeta:
       rollup:
         optional: true
-    dependencies:
-      '@types/estree': 1.0.2
-      estree-walker: 2.0.2
-      picomatch: 2.3.1
-      rollup: 3.29.4
-    dev: true
 
-  /@sinclair/typebox@0.27.8:
+  '@sinclair/typebox@0.27.8':
     resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==}
-    dev: true
 
-  /@sveltejs/adapter-node@1.3.1(@sveltejs/kit@1.25.2):
+  '@sveltejs/adapter-node@1.3.1':
     resolution: {integrity: sha512-A0VgRQDCDPzdLNoiAbcOxGw4zT1Mc+n1LwT1OmO350R7WxrEqdMUChPPOd1iMfIDWlP4ie6E2d/WQf5es2d4Zw==}
     peerDependencies:
       '@sveltejs/kit': ^1.0.0
-    dependencies:
-      '@rollup/plugin-commonjs': 25.0.5(rollup@3.29.4)
-      '@rollup/plugin-json': 6.0.1(rollup@3.29.4)
-      '@rollup/plugin-node-resolve': 15.2.2(rollup@3.29.4)
-      '@sveltejs/kit': 1.25.2(svelte@3.59.2)(vite@4.5.2)
-      rollup: 3.29.4
-    dev: true
 
-  /@sveltejs/kit@1.25.2(svelte@3.59.2)(vite@4.5.2):
+  '@sveltejs/kit@1.25.2':
     resolution: {integrity: sha512-USuuSpdAPFDiLi58N2Pwd/TG9bcUSPAlzE5iaAXaLyCTWa3l36HDKH6nV5NqBybwfeux1ZwgtIeITLZJDJ6HDg==}
     engines: {node: ^16.14 || >=18}
     hasBin: true
-    requiresBuild: true
     peerDependencies:
       svelte: ^3.54.0 || ^4.0.0-next.0
       vite: ^4.0.0
-    dependencies:
-      '@sveltejs/vite-plugin-svelte': 2.4.6(svelte@3.59.2)(vite@4.5.2)
-      '@types/cookie': 0.5.3
-      cookie: 0.5.0
-      devalue: 4.3.2
-      esm-env: 1.0.0
-      kleur: 4.1.5
-      magic-string: 0.30.4
-      mime: 3.0.0
-      sade: 1.8.1
-      set-cookie-parser: 2.6.0
-      sirv: 2.0.3
-      svelte: 3.59.2
-      tiny-glob: 0.2.9
-      undici: 6.6.2
-      vite: 4.5.2(@types/node@20.8.2)
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@sveltejs/vite-plugin-svelte-inspector@1.0.4(@sveltejs/vite-plugin-svelte@2.4.6)(svelte@3.59.2)(vite@4.5.2):
+  '@sveltejs/vite-plugin-svelte-inspector@1.0.4':
     resolution: {integrity: sha512-zjiuZ3yydBtwpF3bj0kQNV0YXe+iKE545QGZVTaylW3eAzFr+pJ/cwK8lZEaRp4JtaJXhD5DyWAV4AxLh6DgaQ==}
     engines: {node: ^14.18.0 || >= 16}
     peerDependencies:
       '@sveltejs/vite-plugin-svelte': ^2.2.0
       svelte: ^3.54.0 || ^4.0.0
       vite: ^4.0.0
-    dependencies:
-      '@sveltejs/vite-plugin-svelte': 2.4.6(svelte@3.59.2)(vite@4.5.2)
-      debug: 4.3.4
-      svelte: 3.59.2
-      vite: 4.5.2(@types/node@20.8.2)
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@sveltejs/vite-plugin-svelte@2.4.6(svelte@3.59.2)(vite@4.5.2):
+  '@sveltejs/vite-plugin-svelte@2.4.6':
     resolution: {integrity: sha512-zO79p0+DZnXPnF0ltIigWDx/ux7Ni+HRaFOw720Qeivc1azFUrJxTl0OryXVibYNx1hCboGia1NRV3x8RNv4cA==}
     engines: {node: ^14.18.0 || >= 16}
     peerDependencies:
       svelte: ^3.54.0 || ^4.0.0
       vite: ^4.0.0
-    dependencies:
-      '@sveltejs/vite-plugin-svelte-inspector': 1.0.4(@sveltejs/vite-plugin-svelte@2.4.6)(svelte@3.59.2)(vite@4.5.2)
-      debug: 4.3.4
-      deepmerge: 4.3.1
-      kleur: 4.1.5
-      magic-string: 0.30.4
-      svelte: 3.59.2
-      svelte-hmr: 0.15.3(svelte@3.59.2)
-      vite: 4.5.2(@types/node@20.8.2)
-      vitefu: 0.2.4(vite@4.5.2)
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@types/chai-subset@1.3.3:
+  '@types/chai-subset@1.3.3':
     resolution: {integrity: sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==}
-    dependencies:
-      '@types/chai': 4.3.6
-    dev: true
 
-  /@types/chai@4.3.6:
+  '@types/chai@4.3.6':
     resolution: {integrity: sha512-VOVRLM1mBxIRxydiViqPcKn6MIxZytrbMpd6RJLIWKxUNr3zux8no0Oc7kJx0WAPIitgZ0gkrDS+btlqQpubpw==}
-    dev: true
 
-  /@types/cookie@0.5.3:
+  '@types/cookie@0.5.3':
     resolution: {integrity: sha512-SLg07AS9z1Ab2LU+QxzU8RCmzsja80ywjf/t5oqw+4NSH20gIGlhLOrBDm1L3PBWzPa4+wkgFQVZAjE6Ioj2ug==}
-    dev: true
 
-  /@types/estree@1.0.2:
+  '@types/estree@1.0.2':
     resolution: {integrity: sha512-VeiPZ9MMwXjO32/Xu7+OwflfmeoRwkE/qzndw42gGtgJwZopBnzy2gD//NN1+go1mADzkDcqf/KnFRSjTJ8xJA==}
-    dev: true
 
-  /@types/json-schema@7.0.13:
+  '@types/json-schema@7.0.13':
     resolution: {integrity: sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ==}
-    dev: true
 
-  /@types/libsodium-wrappers@0.7.11:
+  '@types/libsodium-wrappers@0.7.11':
     resolution: {integrity: sha512-8avZYJny690B6lFZQEDz4PEdCgC8D8qmGE/mhJBzCwzZvsqne61tCRbtJOhxsjYMItEZd3k4SoR4xKKLnI9Ztg==}
-    dev: true
 
-  /@types/node@20.8.2:
+  '@types/node@20.8.2':
     resolution: {integrity: sha512-Vvycsc9FQdwhxE3y3DzeIxuEJbWGDsnrxvMADzTDF/lcdR9/K+AQIeAghTQsHtotg/q0j3WEOYS/jQgSdWue3w==}
-    dev: true
 
-  /@types/prismjs@1.26.0:
+  '@types/prismjs@1.26.0':
     resolution: {integrity: sha512-ZTaqn/qSqUuAq1YwvOFQfVW1AR/oQJlLSZVustdjwI+GZ8kr0MSHBj0tsXPW1EqHubx50gtBEjbPGsdZwQwCjQ==}
-    dev: true
 
-  /@types/pug@2.0.7:
+  '@types/pug@2.0.7':
     resolution: {integrity: sha512-I469DU0UXNC1aHepwirWhu9YKg5fkxohZD95Ey/5A7lovC+Siu+MCLffva87lnfThaOrw9Vb1DUN5t55oULAAw==}
-    dev: true
 
-  /@types/resolve@1.20.2:
+  '@types/resolve@1.20.2':
     resolution: {integrity: sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==}
-    dev: true
 
-  /@types/sass@1.45.0:
+  '@types/sass@1.45.0':
     resolution: {integrity: sha512-jn7qwGFmJHwUSphV8zZneO3GmtlgLsmhs/LQyVvQbIIa+fzGMUiHI4HXJZL3FT8MJmgXWbLGiVVY7ElvHq6vDA==}
     deprecated: This is a stub types definition. sass provides its own type definitions, so you do not need this installed.
-    dependencies:
-      sass: 1.68.0
-    dev: true
 
-  /@types/semver@7.5.3:
+  '@types/semver@7.5.3':
     resolution: {integrity: sha512-OxepLK9EuNEIPxWNME+C6WwbRAOOI2o2BaQEGzz5Lu2e4Z5eDnEo+/aVEDMIXywoJitJ7xWd641wrGLZdtwRyw==}
-    dev: true
 
-  /@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0)(eslint@8.44.0)(typescript@4.9.5):
+  '@typescript-eslint/eslint-plugin@5.62.0':
     resolution: {integrity: sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     peerDependencies:
@@ -657,25 +435,8 @@ packages:
     peerDependenciesMeta:
       typescript:
         optional: true
-    dependencies:
-      '@eslint-community/regexpp': 4.9.1
-      '@typescript-eslint/parser': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
-      '@typescript-eslint/scope-manager': 5.62.0
-      '@typescript-eslint/type-utils': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
-      '@typescript-eslint/utils': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
-      debug: 4.3.4
-      eslint: 8.44.0
-      graphemer: 1.4.0
-      ignore: 5.2.4
-      natural-compare-lite: 1.4.0
-      semver: 7.5.4
-      tsutils: 3.21.0(typescript@4.9.5)
-      typescript: 4.9.5
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@typescript-eslint/parser@5.62.0(eslint@8.44.0)(typescript@4.9.5):
+  '@typescript-eslint/parser@5.62.0':
     resolution: {integrity: sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     peerDependencies:
@@ -684,26 +445,12 @@ packages:
     peerDependenciesMeta:
       typescript:
         optional: true
-    dependencies:
-      '@typescript-eslint/scope-manager': 5.62.0
-      '@typescript-eslint/types': 5.62.0
-      '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5)
-      debug: 4.3.4
-      eslint: 8.44.0
-      typescript: 4.9.5
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@typescript-eslint/scope-manager@5.62.0:
+  '@typescript-eslint/scope-manager@5.62.0':
     resolution: {integrity: sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dependencies:
-      '@typescript-eslint/types': 5.62.0
-      '@typescript-eslint/visitor-keys': 5.62.0
-    dev: true
 
-  /@typescript-eslint/type-utils@5.62.0(eslint@8.44.0)(typescript@4.9.5):
+  '@typescript-eslint/type-utils@5.62.0':
     resolution: {integrity: sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     peerDependencies:
@@ -712,23 +459,12 @@ packages:
     peerDependenciesMeta:
       typescript:
         optional: true
-    dependencies:
-      '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5)
-      '@typescript-eslint/utils': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
-      debug: 4.3.4
-      eslint: 8.44.0
-      tsutils: 3.21.0(typescript@4.9.5)
-      typescript: 4.9.5
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@typescript-eslint/types@5.62.0:
+  '@typescript-eslint/types@5.62.0':
     resolution: {integrity: sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dev: true
 
-  /@typescript-eslint/typescript-estree@5.62.0(typescript@4.9.5):
+  '@typescript-eslint/typescript-estree@5.62.0':
     resolution: {integrity: sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     peerDependencies:
@@ -736,339 +472,181 @@ packages:
     peerDependenciesMeta:
       typescript:
         optional: true
-    dependencies:
-      '@typescript-eslint/types': 5.62.0
-      '@typescript-eslint/visitor-keys': 5.62.0
-      debug: 4.3.4
-      globby: 11.1.0
-      is-glob: 4.0.3
-      semver: 7.5.4
-      tsutils: 3.21.0(typescript@4.9.5)
-      typescript: 4.9.5
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /@typescript-eslint/utils@5.62.0(eslint@8.44.0)(typescript@4.9.5):
+  '@typescript-eslint/utils@5.62.0':
     resolution: {integrity: sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     peerDependencies:
       eslint: ^6.0.0 || ^7.0.0 || ^8.0.0
-    dependencies:
-      '@eslint-community/eslint-utils': 4.4.0(eslint@8.44.0)
-      '@types/json-schema': 7.0.13
-      '@types/semver': 7.5.3
-      '@typescript-eslint/scope-manager': 5.62.0
-      '@typescript-eslint/types': 5.62.0
-      '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5)
-      eslint: 8.44.0
-      eslint-scope: 5.1.1
-      semver: 7.5.4
-    transitivePeerDependencies:
-      - supports-color
-      - typescript
-    dev: true
 
-  /@typescript-eslint/visitor-keys@5.62.0:
+  '@typescript-eslint/visitor-keys@5.62.0':
     resolution: {integrity: sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dependencies:
-      '@typescript-eslint/types': 5.62.0
-      eslint-visitor-keys: 3.4.3
-    dev: true
 
-  /@vitest/expect@0.33.0:
+  '@vitest/expect@0.33.0':
     resolution: {integrity: sha512-sVNf+Gla3mhTCxNJx+wJLDPp/WcstOe0Ksqz4Vec51MmgMth/ia0MGFEkIZmVGeTL5HtjYR4Wl/ZxBxBXZJTzQ==}
-    dependencies:
-      '@vitest/spy': 0.33.0
-      '@vitest/utils': 0.33.0
-      chai: 4.3.10
-    dev: true
 
-  /@vitest/runner@0.33.0:
+  '@vitest/runner@0.33.0':
     resolution: {integrity: sha512-UPfACnmCB6HKRHTlcgCoBh6ppl6fDn+J/xR8dTufWiKt/74Y9bHci5CKB8tESSV82zKYtkBJo9whU3mNvfaisg==}
-    dependencies:
-      '@vitest/utils': 0.33.0
-      p-limit: 4.0.0
-      pathe: 1.1.1
-    dev: true
 
-  /@vitest/snapshot@0.33.0:
+  '@vitest/snapshot@0.33.0':
     resolution: {integrity: sha512-tJjrl//qAHbyHajpFvr8Wsk8DIOODEebTu7pgBrP07iOepR5jYkLFiqLq2Ltxv+r0uptUb4izv1J8XBOwKkVYA==}
-    dependencies:
-      magic-string: 0.30.4
-      pathe: 1.1.1
-      pretty-format: 29.7.0
-    dev: true
 
-  /@vitest/spy@0.33.0:
+  '@vitest/spy@0.33.0':
     resolution: {integrity: sha512-Kv+yZ4hnH1WdiAkPUQTpRxW8kGtH8VRTnus7ZTGovFYM1ZezJpvGtb9nPIjPnptHbsyIAxYZsEpVPYgtpjGnrg==}
-    dependencies:
-      tinyspy: 2.2.0
-    dev: true
 
-  /@vitest/utils@0.33.0:
+  '@vitest/utils@0.33.0':
     resolution: {integrity: sha512-pF1w22ic965sv+EN6uoePkAOTkAPWM03Ri/jXNyMIKBb/XHLDPfhLvf/Fa9g0YECevAIz56oVYXhodLvLQ/awA==}
-    dependencies:
-      diff-sequences: 29.6.3
-      loupe: 2.3.6
-      pretty-format: 29.7.0
-    dev: true
 
-  /acorn-jsx@5.3.2(acorn@8.10.0):
+  acorn-jsx@5.3.2:
     resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
     peerDependencies:
       acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
-    dependencies:
-      acorn: 8.10.0
-    dev: true
 
-  /acorn-walk@8.2.0:
+  acorn-walk@8.2.0:
     resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==}
     engines: {node: '>=0.4.0'}
-    dev: true
 
-  /acorn@8.10.0:
+  acorn@8.10.0:
     resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==}
     engines: {node: '>=0.4.0'}
     hasBin: true
-    dev: true
 
-  /ajv@6.12.6:
+  ajv@6.12.6:
     resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==}
-    dependencies:
-      fast-deep-equal: 3.1.3
-      fast-json-stable-stringify: 2.1.0
-      json-schema-traverse: 0.4.1
-      uri-js: 4.4.1
-    dev: true
 
-  /ansi-regex@5.0.1:
+  ansi-regex@5.0.1:
     resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
     engines: {node: '>=8'}
-    dev: true
 
-  /ansi-styles@4.3.0:
+  ansi-styles@4.3.0:
     resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
     engines: {node: '>=8'}
-    dependencies:
-      color-convert: 2.0.1
-    dev: true
 
-  /ansi-styles@5.2.0:
+  ansi-styles@5.2.0:
     resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==}
     engines: {node: '>=10'}
-    dev: true
 
-  /any-promise@1.3.0:
+  any-promise@1.3.0:
     resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==}
-    dev: true
 
-  /anymatch@3.1.3:
+  anymatch@3.1.3:
     resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
     engines: {node: '>= 8'}
-    dependencies:
-      normalize-path: 3.0.0
-      picomatch: 2.3.1
-    dev: true
 
-  /arg@5.0.2:
+  arg@5.0.2:
     resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==}
-    dev: true
 
-  /argparse@2.0.1:
+  argparse@2.0.1:
     resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
-    dev: true
 
-  /array-union@2.1.0:
+  array-union@2.1.0:
     resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==}
     engines: {node: '>=8'}
-    dev: true
 
-  /assertion-error@1.1.0:
+  assertion-error@1.1.0:
     resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==}
-    dev: true
 
-  /autoprefixer@10.4.14(postcss@8.4.31):
+  autoprefixer@10.4.14:
     resolution: {integrity: sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==}
     engines: {node: ^10 || ^12 || >=14}
     hasBin: true
     peerDependencies:
       postcss: ^8.1.0
-    dependencies:
-      browserslist: 4.22.1
-      caniuse-lite: 1.0.30001546
-      fraction.js: 4.3.6
-      normalize-range: 0.1.2
-      picocolors: 1.0.0
-      postcss: 8.4.31
-      postcss-value-parser: 4.2.0
-    dev: true
 
-  /balanced-match@1.0.2:
+  balanced-match@1.0.2:
     resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
-    dev: true
 
-  /binary-extensions@2.2.0:
+  binary-extensions@2.2.0:
     resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==}
     engines: {node: '>=8'}
-    dev: true
 
-  /brace-expansion@1.1.11:
+  brace-expansion@1.1.11:
     resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==}
-    dependencies:
-      balanced-match: 1.0.2
-      concat-map: 0.0.1
-    dev: true
 
-  /brace-expansion@2.0.1:
+  brace-expansion@2.0.1:
     resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==}
-    dependencies:
-      balanced-match: 1.0.2
-    dev: true
 
-  /braces@3.0.2:
+  braces@3.0.2:
     resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
     engines: {node: '>=8'}
-    dependencies:
-      fill-range: 7.0.1
-    dev: true
 
-  /browserslist@4.22.1:
+  browserslist@4.22.1:
     resolution: {integrity: sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==}
     engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
     hasBin: true
-    dependencies:
-      caniuse-lite: 1.0.30001546
-      electron-to-chromium: 1.4.543
-      node-releases: 2.0.13
-      update-browserslist-db: 1.0.13(browserslist@4.22.1)
-    dev: true
 
-  /buffer-crc32@0.2.13:
+  buffer-crc32@0.2.13:
     resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==}
-    dev: true
 
-  /builtin-modules@3.3.0:
+  builtin-modules@3.3.0:
     resolution: {integrity: sha512-zhaCDicdLuWN5UbN5IMnFqNMhNfo919sH85y2/ea+5Yg9TsTkeZxpL+JLbp6cgYFS4sRLp3YV4S6yDuqVWHYOw==}
     engines: {node: '>=6'}
-    dev: true
 
-  /cac@6.7.14:
+  cac@6.7.14:
     resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==}
     engines: {node: '>=8'}
-    dev: true
 
-  /callsites@3.1.0:
+  callsites@3.1.0:
     resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
     engines: {node: '>=6'}
-    dev: true
 
-  /camelcase-css@2.0.1:
+  camelcase-css@2.0.1:
     resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==}
     engines: {node: '>= 6'}
-    dev: true
 
-  /caniuse-lite@1.0.30001546:
+  caniuse-lite@1.0.30001546:
     resolution: {integrity: sha512-zvtSJwuQFpewSyRrI3AsftF6rM0X80mZkChIt1spBGEvRglCrjTniXvinc8JKRoqTwXAgvqTImaN9igfSMtUBw==}
-    dev: true
 
-  /capnp-ts@0.7.0:
+  capnp-ts@0.7.0:
     resolution: {integrity: sha512-XKxXAC3HVPv7r674zP0VC3RTXz+/JKhfyw94ljvF80yynK6VkTnqE3jMuN8b3dUVmmc43TjyxjW4KTsmB3c86g==}
-    dependencies:
-      debug: 4.3.4
-      tslib: 2.6.2
-    transitivePeerDependencies:
-      - supports-color
-    dev: false
 
-  /chai@4.3.10:
+  chai@4.3.10:
     resolution: {integrity: sha512-0UXG04VuVbruMUYbJ6JctvH0YnC/4q3/AkT18q4NaITo91CUm0liMS9VqzT9vZhVQ/1eqPanMWjBM+Juhfb/9g==}
     engines: {node: '>=4'}
-    dependencies:
-      assertion-error: 1.1.0
-      check-error: 1.0.3
-      deep-eql: 4.1.3
-      get-func-name: 2.0.2
-      loupe: 2.3.6
-      pathval: 1.1.1
-      type-detect: 4.0.8
-    dev: true
 
-  /chalk@4.1.2:
+  chalk@4.1.2:
     resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
     engines: {node: '>=10'}
-    dependencies:
-      ansi-styles: 4.3.0
-      supports-color: 7.2.0
-    dev: true
 
-  /check-error@1.0.3:
+  check-error@1.0.3:
     resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==}
-    dependencies:
-      get-func-name: 2.0.2
-    dev: true
 
-  /chokidar@3.5.3:
+  chokidar@3.5.3:
     resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==}
     engines: {node: '>= 8.10.0'}
-    dependencies:
-      anymatch: 3.1.3
-      braces: 3.0.2
-      glob-parent: 5.1.2
-      is-binary-path: 2.1.0
-      is-glob: 4.0.3
-      normalize-path: 3.0.0
-      readdirp: 3.6.0
-    optionalDependencies:
-      fsevents: 2.3.3
-    dev: true
 
-  /color-convert@2.0.1:
+  color-convert@2.0.1:
     resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
     engines: {node: '>=7.0.0'}
-    dependencies:
-      color-name: 1.1.4
-    dev: true
 
-  /color-name@1.1.4:
+  color-name@1.1.4:
     resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==}
-    dev: true
 
-  /commander@4.1.1:
+  commander@4.1.1:
     resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==}
     engines: {node: '>= 6'}
-    dev: true
 
-  /commondir@1.0.1:
+  commondir@1.0.1:
     resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==}
-    dev: true
 
-  /concat-map@0.0.1:
+  concat-map@0.0.1:
     resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
-    dev: true
 
-  /cookie@0.5.0:
+  cookie@0.5.0:
     resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==}
     engines: {node: '>= 0.6'}
-    dev: true
 
-  /cross-spawn@7.0.3:
+  cross-spawn@7.0.3:
     resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==}
     engines: {node: '>= 8'}
-    dependencies:
-      path-key: 3.1.1
-      shebang-command: 2.0.0
-      which: 2.0.2
-    dev: true
 
-  /cssesc@3.0.0:
+  cssesc@3.0.0:
     resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
     engines: {node: '>=4'}
     hasBin: true
-    dev: true
 
-  /debug@4.3.4:
+  debug@4.3.4:
     resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==}
     engines: {node: '>=6.0'}
     peerDependencies:
@@ -1076,918 +654,536 @@ packages:
     peerDependenciesMeta:
       supports-color:
         optional: true
-    dependencies:
-      ms: 2.1.2
 
-  /dedent-js@1.0.1:
+  dedent-js@1.0.1:
     resolution: {integrity: sha512-OUepMozQULMLUmhxS95Vudo0jb0UchLimi3+pQ2plj61Fcy8axbP9hbiD4Sz6DPqn6XG3kfmziVfQ1rSys5AJQ==}
-    dev: true
 
-  /deep-eql@4.1.3:
+  deep-eql@4.1.3:
     resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==}
     engines: {node: '>=6'}
-    dependencies:
-      type-detect: 4.0.8
-    dev: true
 
-  /deep-is@0.1.4:
+  deep-is@0.1.4:
     resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==}
-    dev: true
 
-  /deepmerge@4.3.1:
+  deepmerge@4.3.1:
     resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /detect-indent@6.1.0:
+  detect-indent@6.1.0:
     resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==}
     engines: {node: '>=8'}
-    dev: true
 
-  /devalue@4.3.2:
+  devalue@4.3.2:
     resolution: {integrity: sha512-KqFl6pOgOW+Y6wJgu80rHpo2/3H07vr8ntR9rkkFIRETewbf5GaYYcakYfiKz89K+sLsuPkQIZaXDMjUObZwWg==}
-    dev: true
 
-  /didyoumean@1.2.2:
+  didyoumean@1.2.2:
     resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==}
-    dev: true
 
-  /diff-sequences@29.6.3:
+  diff-sequences@29.6.3:
     resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==}
     engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
-    dev: true
 
-  /dir-glob@3.0.1:
+  dir-glob@3.0.1:
     resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==}
     engines: {node: '>=8'}
-    dependencies:
-      path-type: 4.0.0
-    dev: true
 
-  /dlv@1.1.3:
+  dlv@1.1.3:
     resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==}
-    dev: true
 
-  /doctrine@3.0.0:
+  doctrine@3.0.0:
     resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==}
     engines: {node: '>=6.0.0'}
-    dependencies:
-      esutils: 2.0.3
-    dev: true
 
-  /dotenv@16.3.1:
+  dotenv@16.3.1:
     resolution: {integrity: sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ==}
     engines: {node: '>=12'}
-    dev: true
 
-  /electron-to-chromium@1.4.543:
+  electron-to-chromium@1.4.543:
     resolution: {integrity: sha512-t2ZP4AcGE0iKCCQCBx/K2426crYdxD3YU6l0uK2EO3FZH0pbC4pFz/sZm2ruZsND6hQBTcDWWlo/MLpiOdif5g==}
-    dev: true
 
-  /es6-promise@3.3.1:
+  es6-promise@3.3.1:
     resolution: {integrity: sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==}
-    dev: true
 
-  /esbuild@0.18.13:
+  esbuild@0.18.13:
     resolution: {integrity: sha512-vhg/WR/Oiu4oUIkVhmfcc23G6/zWuEQKFS+yiosSHe4aN6+DQRXIfeloYGibIfVhkr4wyfuVsGNLr+sQU1rWWw==}
     engines: {node: '>=12'}
     hasBin: true
-    requiresBuild: true
-    optionalDependencies:
-      '@esbuild/android-arm': 0.18.13
-      '@esbuild/android-arm64': 0.18.13
-      '@esbuild/android-x64': 0.18.13
-      '@esbuild/darwin-arm64': 0.18.13
-      '@esbuild/darwin-x64': 0.18.13
-      '@esbuild/freebsd-arm64': 0.18.13
-      '@esbuild/freebsd-x64': 0.18.13
-      '@esbuild/linux-arm': 0.18.13
-      '@esbuild/linux-arm64': 0.18.13
-      '@esbuild/linux-ia32': 0.18.13
-      '@esbuild/linux-loong64': 0.18.13
-      '@esbuild/linux-mips64el': 0.18.13
-      '@esbuild/linux-ppc64': 0.18.13
-      '@esbuild/linux-riscv64': 0.18.13
-      '@esbuild/linux-s390x': 0.18.13
-      '@esbuild/linux-x64': 0.18.13
-      '@esbuild/netbsd-x64': 0.18.13
-      '@esbuild/openbsd-x64': 0.18.13
-      '@esbuild/sunos-x64': 0.18.13
-      '@esbuild/win32-arm64': 0.18.13
-      '@esbuild/win32-ia32': 0.18.13
-      '@esbuild/win32-x64': 0.18.13
-    dev: true
 
-  /escalade@3.1.1:
+  escalade@3.1.1:
     resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==}
     engines: {node: '>=6'}
-    dev: true
 
-  /escape-string-regexp@4.0.0:
+  escape-string-regexp@4.0.0:
     resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
     engines: {node: '>=10'}
-    dev: true
 
-  /eslint-config-prettier@8.8.0(eslint@8.44.0):
+  eslint-config-prettier@8.8.0:
     resolution: {integrity: sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==}
     hasBin: true
     peerDependencies:
       eslint: '>=7.0.0'
-    dependencies:
-      eslint: 8.44.0
-    dev: true
 
-  /eslint-plugin-svelte3@4.0.0(eslint@8.44.0)(svelte@3.59.2):
+  eslint-plugin-svelte3@4.0.0:
     resolution: {integrity: sha512-OIx9lgaNzD02+MDFNLw0GEUbuovNcglg+wnd/UY0fbZmlQSz7GlQiQ1f+yX0XvC07XPcDOnFcichqI3xCwp71g==}
     peerDependencies:
       eslint: '>=8.0.0'
       svelte: ^3.2.0
-    dependencies:
-      eslint: 8.44.0
-      svelte: 3.59.2
-    dev: true
 
-  /eslint-scope@5.1.1:
+  eslint-scope@5.1.1:
     resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==}
     engines: {node: '>=8.0.0'}
-    dependencies:
-      esrecurse: 4.3.0
-      estraverse: 4.3.0
-    dev: true
 
-  /eslint-scope@7.2.2:
+  eslint-scope@7.2.2:
     resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dependencies:
-      esrecurse: 4.3.0
-      estraverse: 5.3.0
-    dev: true
 
-  /eslint-visitor-keys@3.4.3:
+  eslint-visitor-keys@3.4.3:
     resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dev: true
 
-  /eslint@8.44.0:
+  eslint@8.44.0:
     resolution: {integrity: sha512-0wpHoUbDUHgNCyvFB5aXLiQVfK9B0at6gUvzy83k4kAsQ/u769TQDX6iKC+aO4upIHO9WSaA3QoXYQDHbNwf1A==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
     hasBin: true
-    dependencies:
-      '@eslint-community/eslint-utils': 4.4.0(eslint@8.44.0)
-      '@eslint-community/regexpp': 4.9.1
-      '@eslint/eslintrc': 2.1.2
-      '@eslint/js': 8.44.0
-      '@humanwhocodes/config-array': 0.11.11
-      '@humanwhocodes/module-importer': 1.0.1
-      '@nodelib/fs.walk': 1.2.8
-      ajv: 6.12.6
-      chalk: 4.1.2
-      cross-spawn: 7.0.3
-      debug: 4.3.4
-      doctrine: 3.0.0
-      escape-string-regexp: 4.0.0
-      eslint-scope: 7.2.2
-      eslint-visitor-keys: 3.4.3
-      espree: 9.6.1
-      esquery: 1.5.0
-      esutils: 2.0.3
-      fast-deep-equal: 3.1.3
-      file-entry-cache: 6.0.1
-      find-up: 5.0.0
-      glob-parent: 6.0.2
-      globals: 13.23.0
-      graphemer: 1.4.0
-      ignore: 5.2.4
-      import-fresh: 3.3.0
-      imurmurhash: 0.1.4
-      is-glob: 4.0.3
-      is-path-inside: 3.0.3
-      js-yaml: 4.1.0
-      json-stable-stringify-without-jsonify: 1.0.1
-      levn: 0.4.1
-      lodash.merge: 4.6.2
-      minimatch: 3.1.2
-      natural-compare: 1.4.0
-      optionator: 0.9.3
-      strip-ansi: 6.0.1
-      strip-json-comments: 3.1.1
-      text-table: 0.2.0
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
 
-  /esm-env@1.0.0:
+  esm-env@1.0.0:
     resolution: {integrity: sha512-Cf6VksWPsTuW01vU9Mk/3vRue91Zevka5SjyNf3nEpokFRuqt/KjUQoGAwq9qMmhpLTHmXzSIrFRw8zxWzmFBA==}
-    dev: true
 
-  /espree@9.6.1:
+  espree@9.6.1:
     resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==}
     engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
-    dependencies:
-      acorn: 8.10.0
-      acorn-jsx: 5.3.2(acorn@8.10.0)
-      eslint-visitor-keys: 3.4.3
-    dev: true
 
-  /esquery@1.5.0:
+  esquery@1.5.0:
     resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==}
     engines: {node: '>=0.10'}
-    dependencies:
-      estraverse: 5.3.0
-    dev: true
 
-  /esrecurse@4.3.0:
+  esrecurse@4.3.0:
     resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
     engines: {node: '>=4.0'}
-    dependencies:
-      estraverse: 5.3.0
-    dev: true
 
-  /estraverse@4.3.0:
+  estraverse@4.3.0:
     resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==}
     engines: {node: '>=4.0'}
-    dev: true
 
-  /estraverse@5.3.0:
+  estraverse@5.3.0:
     resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==}
     engines: {node: '>=4.0'}
-    dev: true
 
-  /estree-walker@2.0.2:
+  estree-walker@2.0.2:
     resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==}
-    dev: true
 
-  /esutils@2.0.3:
+  esutils@2.0.3:
     resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /fast-deep-equal@3.1.3:
+  fast-deep-equal@3.1.3:
     resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
-    dev: true
 
-  /fast-glob@3.3.1:
+  fast-glob@3.3.1:
     resolution: {integrity: sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==}
     engines: {node: '>=8.6.0'}
-    dependencies:
-      '@nodelib/fs.stat': 2.0.5
-      '@nodelib/fs.walk': 1.2.8
-      glob-parent: 5.1.2
-      merge2: 1.4.1
-      micromatch: 4.0.5
-    dev: true
 
-  /fast-json-stable-stringify@2.1.0:
+  fast-json-stable-stringify@2.1.0:
     resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==}
-    dev: true
 
-  /fast-levenshtein@2.0.6:
+  fast-levenshtein@2.0.6:
     resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==}
-    dev: true
 
-  /fastq@1.15.0:
+  fastq@1.15.0:
     resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==}
-    dependencies:
-      reusify: 1.0.4
-    dev: true
 
-  /file-entry-cache@6.0.1:
+  file-entry-cache@6.0.1:
     resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
     engines: {node: ^10.12.0 || >=12.0.0}
-    dependencies:
-      flat-cache: 3.1.0
-    dev: true
 
-  /fill-range@7.0.1:
+  fill-range@7.0.1:
     resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==}
     engines: {node: '>=8'}
-    dependencies:
-      to-regex-range: 5.0.1
-    dev: true
 
-  /find-up@5.0.0:
+  find-up@5.0.0:
     resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==}
     engines: {node: '>=10'}
-    dependencies:
-      locate-path: 6.0.0
-      path-exists: 4.0.0
-    dev: true
 
-  /flat-cache@3.1.0:
+  flat-cache@3.1.0:
     resolution: {integrity: sha512-OHx4Qwrrt0E4jEIcI5/Xb+f+QmJYNj2rrK8wiIdQOIrB9WrrJL8cjZvXdXuBTkkEwEqLycb5BeZDV1o2i9bTew==}
     engines: {node: '>=12.0.0'}
-    dependencies:
-      flatted: 3.2.9
-      keyv: 4.5.3
-      rimraf: 3.0.2
-    dev: true
 
-  /flatted@3.2.9:
+  flatted@3.2.9:
     resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==}
-    dev: true
 
-  /fraction.js@4.3.6:
+  fraction.js@4.3.6:
     resolution: {integrity: sha512-n2aZ9tNfYDwaHhvFTkhFErqOMIb8uyzSQ+vGJBjZyanAKZVbGUQ1sngfk9FdkBw7G26O7AgNjLcecLffD1c7eg==}
-    dev: true
 
-  /fs.realpath@1.0.0:
+  fs.realpath@1.0.0:
     resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
-    dev: true
 
-  /fsevents@2.3.2:
+  fsevents@2.3.2:
     resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
     engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
     os: [darwin]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /fsevents@2.3.3:
+  fsevents@2.3.3:
     resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
     engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
     os: [darwin]
-    requiresBuild: true
-    dev: true
-    optional: true
 
-  /get-func-name@2.0.2:
+  get-func-name@2.0.2:
     resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==}
-    dev: true
 
-  /glob-parent@5.1.2:
+  glob-parent@5.1.2:
     resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
     engines: {node: '>= 6'}
-    dependencies:
-      is-glob: 4.0.3
-    dev: true
 
-  /glob-parent@6.0.2:
+  glob-parent@6.0.2:
     resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
     engines: {node: '>=10.13.0'}
-    dependencies:
-      is-glob: 4.0.3
-    dev: true
 
-  /glob@7.1.6:
+  glob@7.1.6:
     resolution: {integrity: sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==}
-    dependencies:
-      fs.realpath: 1.0.0
-      inflight: 1.0.6
-      inherits: 2.0.4
-      minimatch: 3.1.2
-      once: 1.4.0
-      path-is-absolute: 1.0.1
-    dev: true
 
-  /glob@7.2.3:
+  glob@7.2.3:
     resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
-    dependencies:
-      fs.realpath: 1.0.0
-      inflight: 1.0.6
-      inherits: 2.0.4
-      minimatch: 3.1.2
-      once: 1.4.0
-      path-is-absolute: 1.0.1
-    dev: true
 
-  /glob@8.1.0:
+  glob@8.1.0:
     resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==}
     engines: {node: '>=12'}
-    dependencies:
-      fs.realpath: 1.0.0
-      inflight: 1.0.6
-      inherits: 2.0.4
-      minimatch: 5.1.6
-      once: 1.4.0
-    dev: true
 
-  /globals@13.23.0:
+  globals@13.23.0:
     resolution: {integrity: sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==}
     engines: {node: '>=8'}
-    dependencies:
-      type-fest: 0.20.2
-    dev: true
 
-  /globalyzer@0.1.0:
+  globalyzer@0.1.0:
     resolution: {integrity: sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==}
-    dev: true
 
-  /globby@11.1.0:
+  globby@11.1.0:
     resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==}
     engines: {node: '>=10'}
-    dependencies:
-      array-union: 2.1.0
-      dir-glob: 3.0.1
-      fast-glob: 3.3.1
-      ignore: 5.2.4
-      merge2: 1.4.1
-      slash: 3.0.0
-    dev: true
 
-  /globrex@0.1.2:
+  globrex@0.1.2:
     resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==}
-    dev: true
 
-  /graceful-fs@4.2.11:
+  graceful-fs@4.2.11:
     resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
-    dev: true
 
-  /graphemer@1.4.0:
+  graphemer@1.4.0:
     resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==}
-    dev: true
 
-  /has-flag@4.0.0:
+  has-flag@4.0.0:
     resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
     engines: {node: '>=8'}
-    dev: true
 
-  /has@1.0.4:
+  has@1.0.4:
     resolution: {integrity: sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==}
     engines: {node: '>= 0.4.0'}
-    dev: true
 
-  /ignore@5.2.4:
+  ignore@5.2.4:
     resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==}
     engines: {node: '>= 4'}
-    dev: true
 
-  /immutable@4.3.4:
+  immutable@4.3.4:
     resolution: {integrity: sha512-fsXeu4J4i6WNWSikpI88v/PcVflZz+6kMhUfIwc5SY+poQRPnaf5V7qds6SUyUN3cVxEzuCab7QIoLOQ+DQ1wA==}
-    dev: true
 
-  /import-fresh@3.3.0:
+  import-fresh@3.3.0:
     resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==}
     engines: {node: '>=6'}
-    dependencies:
-      parent-module: 1.0.1
-      resolve-from: 4.0.0
-    dev: true
 
-  /imurmurhash@0.1.4:
+  imurmurhash@0.1.4:
     resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
     engines: {node: '>=0.8.19'}
-    dev: true
 
-  /inflight@1.0.6:
+  inflight@1.0.6:
     resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
-    dependencies:
-      once: 1.4.0
-      wrappy: 1.0.2
-    dev: true
 
-  /inherits@2.0.4:
+  inherits@2.0.4:
     resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
-    dev: true
 
-  /is-binary-path@2.1.0:
+  is-binary-path@2.1.0:
     resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
     engines: {node: '>=8'}
-    dependencies:
-      binary-extensions: 2.2.0
-    dev: true
 
-  /is-builtin-module@3.2.1:
+  is-builtin-module@3.2.1:
     resolution: {integrity: sha512-BSLE3HnV2syZ0FK0iMA/yUGplUeMmNz4AW5fnTunbCIqZi4vG3WjJT9FHMy5D69xmAYBHXQhJdALdpwVxV501A==}
     engines: {node: '>=6'}
-    dependencies:
-      builtin-modules: 3.3.0
-    dev: true
 
-  /is-core-module@2.13.0:
+  is-core-module@2.13.0:
     resolution: {integrity: sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==}
-    dependencies:
-      has: 1.0.4
-    dev: true
 
-  /is-extglob@2.1.1:
+  is-extglob@2.1.1:
     resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /is-glob@4.0.3:
+  is-glob@4.0.3:
     resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
     engines: {node: '>=0.10.0'}
-    dependencies:
-      is-extglob: 2.1.1
-    dev: true
 
-  /is-module@1.0.0:
+  is-module@1.0.0:
     resolution: {integrity: sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==}
-    dev: true
 
-  /is-number@7.0.0:
+  is-number@7.0.0:
     resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
     engines: {node: '>=0.12.0'}
-    dev: true
 
-  /is-path-inside@3.0.3:
+  is-path-inside@3.0.3:
     resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==}
     engines: {node: '>=8'}
-    dev: true
 
-  /is-reference@1.2.1:
+  is-reference@1.2.1:
     resolution: {integrity: sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==}
-    dependencies:
-      '@types/estree': 1.0.2
-    dev: true
 
-  /isexe@2.0.0:
+  isexe@2.0.0:
     resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
-    dev: true
 
-  /jiti@1.20.0:
+  jiti@1.20.0:
     resolution: {integrity: sha512-3TV69ZbrvV6U5DfQimop50jE9Dl6J8O1ja1dvBbMba/sZ3YBEQqJ2VZRoQPVnhlzjNtU1vaXRZVrVjU4qtm8yA==}
     hasBin: true
-    dev: true
 
-  /js-yaml@4.1.0:
+  js-yaml@4.1.0:
     resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==}
     hasBin: true
-    dependencies:
-      argparse: 2.0.1
-    dev: true
 
-  /json-buffer@3.0.1:
+  json-buffer@3.0.1:
     resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==}
-    dev: true
 
-  /json-schema-traverse@0.4.1:
+  json-schema-traverse@0.4.1:
     resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
-    dev: true
 
-  /json-stable-stringify-without-jsonify@1.0.1:
+  json-stable-stringify-without-jsonify@1.0.1:
     resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
-    dev: true
 
-  /jsonc-parser@3.2.0:
+  jsonc-parser@3.2.0:
     resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==}
-    dev: true
 
-  /just-debounce-it@3.2.0:
+  just-debounce-it@3.2.0:
     resolution: {integrity: sha512-WXzwLL0745uNuedrCsCs3rpmfD6DBaf7uuVwaq98/8dafURfgQaBsSpjiPp5+CW6Vjltwy9cOGI6qE71b3T8iQ==}
-    dev: false
 
-  /keyv@4.5.3:
+  keyv@4.5.3:
     resolution: {integrity: sha512-QCiSav9WaX1PgETJ+SpNnx2PRRapJ/oRSXM4VO5OGYGSjrxbKPVFVhB3l2OCbLCk329N8qyAtsJjSjvVBWzEug==}
-    dependencies:
-      json-buffer: 3.0.1
-    dev: true
 
-  /kleur@4.1.5:
+  kleur@4.1.5:
     resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==}
     engines: {node: '>=6'}
-    dev: true
 
-  /ky@0.33.3:
+  ky@0.33.3:
     resolution: {integrity: sha512-CasD9OCEQSFIam2U8efFK81Yeg8vNMTBUqtMOHlrcWQHqUX3HeCl9Dr31u4toV7emlH8Mymk5+9p0lL6mKb/Xw==}
     engines: {node: '>=14.16'}
-    dev: false
 
-  /levn@0.4.1:
+  levn@0.4.1:
     resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
     engines: {node: '>= 0.8.0'}
-    dependencies:
-      prelude-ls: 1.2.1
-      type-check: 0.4.0
-    dev: true
 
-  /libsodium-wrappers@0.7.11:
-    resolution: {integrity: sha512-SrcLtXj7BM19vUKtQuyQKiQCRJPgbpauzl3s0rSwD+60wtHqSUuqcoawlMDheCJga85nKOQwxNYQxf/CKAvs6Q==}
-    dependencies:
-      libsodium: 0.7.11
-    dev: false
+  libsodium-wrappers@0.7.14:
+    resolution: {integrity: sha512-300TtsePizhJZ7HjLmWr6hLHAgJUxIGhapSw+EwfCtDuWaEmEdGXSQv6j6qFw0bs9l4vS2NH9BtOHfXAq6h5kQ==}
 
-  /libsodium@0.7.11:
-    resolution: {integrity: sha512-WPfJ7sS53I2s4iM58QxY3Inb83/6mjlYgcmZs7DJsvDlnmVUwNinBCi5vBT43P6bHRy01O4zsMU2CoVR6xJ40A==}
-    dev: false
+  libsodium@0.7.14:
+    resolution: {integrity: sha512-/pOd7eO6oZrfORquRTC4284OUJFcMi8F3Vnc9xtRBT0teLfOUxWIItaBFF3odYjZ7nlJNwnLdUVEUFHxVyX/Sw==}
 
-  /lilconfig@2.1.0:
+  lilconfig@2.1.0:
     resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==}
     engines: {node: '>=10'}
-    dev: true
 
-  /lines-and-columns@1.2.4:
+  lines-and-columns@1.2.4:
     resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
-    dev: true
 
-  /local-pkg@0.4.3:
+  local-pkg@0.4.3:
     resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==}
     engines: {node: '>=14'}
-    dev: true
 
-  /locate-path@6.0.0:
+  locate-path@6.0.0:
     resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
     engines: {node: '>=10'}
-    dependencies:
-      p-locate: 5.0.0
-    dev: true
 
-  /lodash.merge@4.6.2:
+  lodash.merge@4.6.2:
     resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
-    dev: true
 
-  /loupe@2.3.6:
+  loupe@2.3.6:
     resolution: {integrity: sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==}
-    dependencies:
-      get-func-name: 2.0.2
-    dev: true
 
-  /lower-case@2.0.2:
+  lower-case@2.0.2:
     resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==}
-    dependencies:
-      tslib: 2.6.2
-    dev: true
 
-  /lru-cache@6.0.0:
+  lru-cache@6.0.0:
     resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==}
     engines: {node: '>=10'}
-    dependencies:
-      yallist: 4.0.0
 
-  /magic-string@0.25.9:
+  magic-string@0.25.9:
     resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==}
-    dependencies:
-      sourcemap-codec: 1.4.8
-    dev: true
 
-  /magic-string@0.27.0:
+  magic-string@0.27.0:
     resolution: {integrity: sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==}
     engines: {node: '>=12'}
-    dependencies:
-      '@jridgewell/sourcemap-codec': 1.4.15
-    dev: true
 
-  /magic-string@0.30.4:
+  magic-string@0.30.4:
     resolution: {integrity: sha512-Q/TKtsC5BPm0kGqgBIF9oXAs/xEf2vRKiIB4wCRQTJOQIByZ1d+NnUOotvJOvNpi5RNIgVOMC3pOuaP1ZTDlVg==}
     engines: {node: '>=12'}
-    dependencies:
-      '@jridgewell/sourcemap-codec': 1.4.15
-    dev: true
 
-  /merge2@1.4.1:
+  merge2@1.4.1:
     resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
     engines: {node: '>= 8'}
-    dev: true
 
-  /micromatch@4.0.5:
+  micromatch@4.0.5:
     resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==}
     engines: {node: '>=8.6'}
-    dependencies:
-      braces: 3.0.2
-      picomatch: 2.3.1
-    dev: true
 
-  /mime@3.0.0:
+  mime@3.0.0:
     resolution: {integrity: sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==}
     engines: {node: '>=10.0.0'}
     hasBin: true
-    dev: true
 
-  /min-indent@1.0.1:
+  min-indent@1.0.1:
     resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==}
     engines: {node: '>=4'}
-    dev: true
 
-  /minimatch@3.1.2:
+  minimatch@3.1.2:
     resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
-    dependencies:
-      brace-expansion: 1.1.11
-    dev: true
 
-  /minimatch@5.1.6:
+  minimatch@5.1.6:
     resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==}
     engines: {node: '>=10'}
-    dependencies:
-      brace-expansion: 2.0.1
-    dev: true
 
-  /minimist@1.2.8:
+  minimist@1.2.8:
     resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
-    dev: true
 
-  /mkdirp@0.5.6:
+  mkdirp@0.5.6:
     resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==}
     hasBin: true
-    dependencies:
-      minimist: 1.2.8
-    dev: true
 
-  /mlly@1.4.2:
+  mlly@1.4.2:
     resolution: {integrity: sha512-i/Ykufi2t1EZ6NaPLdfnZk2AX8cs0d+mTzVKuPfqPKPatxLApaBoxJQ9x1/uckXtrS/U5oisPMDkNs0yQTaBRg==}
-    dependencies:
-      acorn: 8.10.0
-      pathe: 1.1.1
-      pkg-types: 1.0.3
-      ufo: 1.3.1
-    dev: true
 
-  /mri@1.2.0:
+  mri@1.2.0:
     resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==}
     engines: {node: '>=4'}
-    dev: true
 
-  /mrmime@1.0.1:
+  mrmime@1.0.1:
     resolution: {integrity: sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==}
     engines: {node: '>=10'}
-    dev: true
 
-  /ms@2.1.2:
+  ms@2.1.2:
     resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==}
 
-  /mz@2.7.0:
+  mz@2.7.0:
     resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
-    dependencies:
-      any-promise: 1.3.0
-      object-assign: 4.1.1
-      thenify-all: 1.6.0
-    dev: true
 
-  /nanoid@3.3.6:
+  nanoid@3.3.6:
     resolution: {integrity: sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==}
     engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
     hasBin: true
-    dev: true
 
-  /natural-compare-lite@1.4.0:
+  natural-compare-lite@1.4.0:
     resolution: {integrity: sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==}
-    dev: true
 
-  /natural-compare@1.4.0:
+  natural-compare@1.4.0:
     resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
-    dev: true
 
-  /no-case@3.0.4:
+  no-case@3.0.4:
     resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==}
-    dependencies:
-      lower-case: 2.0.2
-      tslib: 2.6.2
-    dev: true
 
-  /node-releases@2.0.13:
+  node-releases@2.0.13:
     resolution: {integrity: sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==}
-    dev: true
 
-  /normalize-path@3.0.0:
+  normalize-path@3.0.0:
     resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /normalize-range@0.1.2:
+  normalize-range@0.1.2:
     resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /object-assign@4.1.1:
+  object-assign@4.1.1:
     resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /object-hash@3.0.0:
+  object-hash@3.0.0:
     resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==}
     engines: {node: '>= 6'}
-    dev: true
 
-  /once@1.4.0:
+  once@1.4.0:
     resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
-    dependencies:
-      wrappy: 1.0.2
-    dev: true
 
-  /optionator@0.9.3:
+  optionator@0.9.3:
     resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==}
     engines: {node: '>= 0.8.0'}
-    dependencies:
-      '@aashutoshrathi/word-wrap': 1.2.6
-      deep-is: 0.1.4
-      fast-levenshtein: 2.0.6
-      levn: 0.4.1
-      prelude-ls: 1.2.1
-      type-check: 0.4.0
-    dev: true
 
-  /p-limit@3.1.0:
+  p-limit@3.1.0:
     resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
     engines: {node: '>=10'}
-    dependencies:
-      yocto-queue: 0.1.0
-    dev: true
 
-  /p-limit@4.0.0:
+  p-limit@4.0.0:
     resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==}
     engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
-    dependencies:
-      yocto-queue: 1.0.0
-    dev: true
 
-  /p-locate@5.0.0:
+  p-locate@5.0.0:
     resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
     engines: {node: '>=10'}
-    dependencies:
-      p-limit: 3.1.0
-    dev: true
 
-  /parent-module@1.0.1:
+  parent-module@1.0.1:
     resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
     engines: {node: '>=6'}
-    dependencies:
-      callsites: 3.1.0
-    dev: true
 
-  /pascal-case@3.1.2:
+  pascal-case@3.1.2:
     resolution: {integrity: sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==}
-    dependencies:
-      no-case: 3.0.4
-      tslib: 2.6.2
-    dev: true
 
-  /path-exists@4.0.0:
+  path-exists@4.0.0:
     resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
     engines: {node: '>=8'}
-    dev: true
 
-  /path-is-absolute@1.0.1:
+  path-is-absolute@1.0.1:
     resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /path-key@3.1.1:
+  path-key@3.1.1:
     resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
     engines: {node: '>=8'}
-    dev: true
 
-  /path-parse@1.0.7:
+  path-parse@1.0.7:
     resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
-    dev: true
 
-  /path-type@4.0.0:
+  path-type@4.0.0:
     resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==}
     engines: {node: '>=8'}
-    dev: true
 
-  /pathe@1.1.1:
+  pathe@1.1.1:
     resolution: {integrity: sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==}
-    dev: true
 
-  /pathval@1.1.1:
+  pathval@1.1.1:
     resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==}
-    dev: true
 
-  /picocolors@1.0.0:
+  picocolors@1.0.0:
     resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==}
-    dev: true
 
-  /picomatch@2.3.1:
+  picomatch@2.3.1:
     resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
     engines: {node: '>=8.6'}
-    dev: true
 
-  /pify@2.3.0:
+  pify@2.3.0:
     resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /pirates@4.0.6:
+  pirates@4.0.6:
     resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==}
     engines: {node: '>= 6'}
-    dev: true
 
-  /pkg-types@1.0.3:
+  pkg-types@1.0.3:
     resolution: {integrity: sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==}
-    dependencies:
-      jsonc-parser: 3.2.0
-      mlly: 1.4.2
-      pathe: 1.1.1
-    dev: true
 
-  /playwright-core@1.36.1:
+  playwright-core@1.36.1:
     resolution: {integrity: sha512-7+tmPuMcEW4xeCL9cp9KxmYpQYHKkyjwoXRnoeTowaeNat8PoBMk/HwCYhqkH2fRkshfKEOiVus/IhID2Pg8kg==}
     engines: {node: '>=16'}
     hasBin: true
-    dev: true
 
-  /postcss-import@14.1.0(postcss@8.4.31):
+  postcss-import@14.1.0:
     resolution: {integrity: sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==}
     engines: {node: '>=10.0.0'}
     peerDependencies:
       postcss: ^8.0.0
-    dependencies:
-      postcss: 8.4.31
-      postcss-value-parser: 4.2.0
-      read-cache: 1.0.0
-      resolve: 1.22.6
-    dev: true
 
-  /postcss-js@4.0.1(postcss@8.4.31):
+  postcss-js@4.0.1:
     resolution: {integrity: sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==}
     engines: {node: ^12 || ^14 || >= 16}
     peerDependencies:
       postcss: ^8.4.21
-    dependencies:
-      camelcase-css: 2.0.1
-      postcss: 8.4.31
-    dev: true
 
-  /postcss-load-config@3.1.4(postcss@8.4.31):
+  postcss-load-config@3.1.4:
     resolution: {integrity: sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==}
     engines: {node: '>= 10'}
     peerDependencies:
@@ -1998,315 +1194,190 @@ packages:
         optional: true
       ts-node:
         optional: true
-    dependencies:
-      lilconfig: 2.1.0
-      postcss: 8.4.31
-      yaml: 1.10.2
-    dev: true
 
-  /postcss-nested@6.0.0(postcss@8.4.31):
+  postcss-nested@6.0.0:
     resolution: {integrity: sha512-0DkamqrPcmkBDsLn+vQDIrtkSbNkv5AD/M322ySo9kqFkCIYklym2xEmWkwo+Y3/qZo34tzEPNUw4y7yMCdv5w==}
     engines: {node: '>=12.0'}
     peerDependencies:
       postcss: ^8.2.14
-    dependencies:
-      postcss: 8.4.31
-      postcss-selector-parser: 6.0.13
-    dev: true
 
-  /postcss-selector-parser@6.0.13:
+  postcss-selector-parser@6.0.13:
     resolution: {integrity: sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==}
     engines: {node: '>=4'}
-    dependencies:
-      cssesc: 3.0.0
-      util-deprecate: 1.0.2
-    dev: true
 
-  /postcss-value-parser@4.2.0:
+  postcss-value-parser@4.2.0:
     resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
-    dev: true
 
-  /postcss@8.4.31:
+  postcss@8.4.31:
     resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
     engines: {node: ^10 || ^12 || >=14}
-    dependencies:
-      nanoid: 3.3.6
-      picocolors: 1.0.0
-      source-map-js: 1.0.2
-    dev: true
 
-  /prelude-ls@1.2.1:
+  prelude-ls@1.2.1:
     resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
     engines: {node: '>= 0.8.0'}
-    dev: true
 
-  /prettier-plugin-svelte@2.10.1(prettier@2.8.8)(svelte@3.59.2):
+  prettier-plugin-svelte@2.10.1:
     resolution: {integrity: sha512-Wlq7Z5v2ueCubWo0TZzKc9XHcm7TDxqcuzRuGd0gcENfzfT4JZ9yDlCbEgxWgiPmLHkBjfOtpAWkcT28MCDpUQ==}
     peerDependencies:
       prettier: ^1.16.4 || ^2.0.0
       svelte: ^3.2.0 || ^4.0.0-next.0
-    dependencies:
-      prettier: 2.8.8
-      svelte: 3.59.2
-    dev: true
 
-  /prettier@2.8.8:
+  prettier@2.8.8:
     resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==}
     engines: {node: '>=10.13.0'}
     hasBin: true
-    dev: true
 
-  /pretty-format@29.7.0:
+  pretty-format@29.7.0:
     resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==}
     engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
-    dependencies:
-      '@jest/schemas': 29.6.3
-      ansi-styles: 5.2.0
-      react-is: 18.2.0
-    dev: true
 
-  /prismjs@1.29.0:
+  prismjs@1.29.0:
     resolution: {integrity: sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==}
     engines: {node: '>=6'}
-    dev: false
 
-  /punycode@2.3.0:
+  punycode@2.3.0:
     resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==}
     engines: {node: '>=6'}
-    dev: true
 
-  /queue-microtask@1.2.3:
+  queue-microtask@1.2.3:
     resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
-    dev: true
 
-  /quick-lru@5.1.1:
+  quick-lru@5.1.1:
     resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==}
     engines: {node: '>=10'}
-    dev: true
 
-  /react-is@18.2.0:
+  react-is@18.2.0:
     resolution: {integrity: sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==}
-    dev: true
 
-  /read-cache@1.0.0:
+  read-cache@1.0.0:
     resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==}
-    dependencies:
-      pify: 2.3.0
-    dev: true
 
-  /readdirp@3.6.0:
+  readdirp@3.6.0:
     resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
     engines: {node: '>=8.10.0'}
-    dependencies:
-      picomatch: 2.3.1
-    dev: true
 
-  /resolve-from@4.0.0:
+  resolve-from@4.0.0:
     resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==}
     engines: {node: '>=4'}
-    dev: true
 
-  /resolve@1.22.6:
+  resolve@1.22.6:
     resolution: {integrity: sha512-njhxM7mV12JfufShqGy3Rz8j11RPdLy4xi15UurGJeoHLfJpVXKdh3ueuOqbYUcDZnffr6X739JBo5LzyahEsw==}
     hasBin: true
-    dependencies:
-      is-core-module: 2.13.0
-      path-parse: 1.0.7
-      supports-preserve-symlinks-flag: 1.0.0
-    dev: true
 
-  /reusify@1.0.4:
+  reusify@1.0.4:
     resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==}
     engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
-    dev: true
 
-  /rimraf@2.7.1:
+  rimraf@2.7.1:
     resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==}
     hasBin: true
-    dependencies:
-      glob: 7.2.3
-    dev: true
 
-  /rimraf@3.0.2:
+  rimraf@3.0.2:
     resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==}
     hasBin: true
-    dependencies:
-      glob: 7.2.3
-    dev: true
 
-  /rollup@3.29.4:
+  rollup@3.29.4:
     resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==}
     engines: {node: '>=14.18.0', npm: '>=8.0.0'}
     hasBin: true
-    optionalDependencies:
-      fsevents: 2.3.3
-    dev: true
 
-  /run-parallel@1.2.0:
+  run-parallel@1.2.0:
     resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
-    dependencies:
-      queue-microtask: 1.2.3
-    dev: true
 
-  /sade@1.8.1:
+  sade@1.8.1:
     resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==}
     engines: {node: '>=6'}
-    dependencies:
-      mri: 1.2.0
-    dev: true
 
-  /sander@0.5.1:
+  sander@0.5.1:
     resolution: {integrity: sha512-3lVqBir7WuKDHGrKRDn/1Ye3kwpXaDOMsiRP1wd6wpZW56gJhsbp5RqQpA6JG/P+pkXizygnr1dKR8vzWaVsfA==}
-    dependencies:
-      es6-promise: 3.3.1
-      graceful-fs: 4.2.11
-      mkdirp: 0.5.6
-      rimraf: 2.7.1
-    dev: true
 
-  /sass@1.68.0:
+  sass@1.68.0:
     resolution: {integrity: sha512-Lmj9lM/fef0nQswm1J2HJcEsBUba4wgNx2fea6yJHODREoMFnwRpZydBnX/RjyXw2REIwdkbqE4hrTo4qfDBUA==}
     engines: {node: '>=14.0.0'}
     hasBin: true
-    dependencies:
-      chokidar: 3.5.3
-      immutable: 4.3.4
-      source-map-js: 1.0.2
-    dev: true
 
-  /semver@7.5.4:
+  semver@7.5.4:
     resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==}
     engines: {node: '>=10'}
     hasBin: true
-    dependencies:
-      lru-cache: 6.0.0
 
-  /set-cookie-parser@2.6.0:
+  set-cookie-parser@2.6.0:
     resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==}
-    dev: true
 
-  /shebang-command@2.0.0:
+  shebang-command@2.0.0:
     resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==}
     engines: {node: '>=8'}
-    dependencies:
-      shebang-regex: 3.0.0
-    dev: true
 
-  /shebang-regex@3.0.0:
+  shebang-regex@3.0.0:
     resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
     engines: {node: '>=8'}
-    dev: true
 
-  /siginfo@2.0.0:
+  siginfo@2.0.0:
     resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==}
-    dev: true
 
-  /sirv@2.0.3:
+  sirv@2.0.3:
     resolution: {integrity: sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==}
     engines: {node: '>= 10'}
-    dependencies:
-      '@polka/url': 1.0.0-next.23
-      mrmime: 1.0.1
-      totalist: 3.0.1
-    dev: true
 
-  /slash@3.0.0:
+  slash@3.0.0:
     resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==}
     engines: {node: '>=8'}
-    dev: true
 
-  /sorcery@0.10.0:
+  sorcery@0.10.0:
     resolution: {integrity: sha512-R5ocFmKZQFfSTstfOtHjJuAwbpGyf9qjQa1egyhvXSbM7emjrtLXtGdZsDJDABC85YBfVvrOiGWKSYXPKdvP1g==}
     hasBin: true
-    dependencies:
-      buffer-crc32: 0.2.13
-      minimist: 1.2.8
-      sander: 0.5.1
-      sourcemap-codec: 1.4.8
-    dev: true
 
-  /source-map-js@1.0.2:
+  source-map-js@1.0.2:
     resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==}
     engines: {node: '>=0.10.0'}
-    dev: true
 
-  /sourcemap-codec@1.4.8:
+  sourcemap-codec@1.4.8:
     resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==}
     deprecated: Please use @jridgewell/sourcemap-codec instead
-    dev: true
 
-  /stackback@0.0.2:
+  stackback@0.0.2:
     resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==}
-    dev: true
 
-  /std-env@3.4.3:
+  std-env@3.4.3:
     resolution: {integrity: sha512-f9aPhy8fYBuMN+sNfakZV18U39PbalgjXG3lLB9WkaYTxijru61wb57V9wxxNthXM5Sd88ETBWi29qLAsHO52Q==}
-    dev: true
 
-  /strip-ansi@6.0.1:
+  strip-ansi@6.0.1:
     resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
     engines: {node: '>=8'}
-    dependencies:
-      ansi-regex: 5.0.1
-    dev: true
 
-  /strip-indent@3.0.0:
+  strip-indent@3.0.0:
     resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==}
     engines: {node: '>=8'}
-    dependencies:
-      min-indent: 1.0.1
-    dev: true
 
-  /strip-json-comments@3.1.1:
+  strip-json-comments@3.1.1:
     resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==}
     engines: {node: '>=8'}
-    dev: true
 
-  /strip-literal@1.3.0:
+  strip-literal@1.3.0:
     resolution: {integrity: sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==}
-    dependencies:
-      acorn: 8.10.0
-    dev: true
 
-  /sucrase@3.34.0:
+  sucrase@3.34.0:
     resolution: {integrity: sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==}
     engines: {node: '>=8'}
     hasBin: true
-    dependencies:
-      '@jridgewell/gen-mapping': 0.3.3
-      commander: 4.1.1
-      glob: 7.1.6
-      lines-and-columns: 1.2.4
-      mz: 2.7.0
-      pirates: 4.0.6
-      ts-interface-checker: 0.1.13
-    dev: true
 
-  /supports-color@7.2.0:
+  supports-color@7.2.0:
     resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==}
     engines: {node: '>=8'}
-    dependencies:
-      has-flag: 4.0.0
-    dev: true
 
-  /supports-preserve-symlinks-flag@1.0.0:
+  supports-preserve-symlinks-flag@1.0.0:
     resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
     engines: {node: '>= 0.4'}
-    dev: true
 
-  /svelte-hmr@0.15.3(svelte@3.59.2):
+  svelte-hmr@0.15.3:
     resolution: {integrity: sha512-41snaPswvSf8TJUhlkoJBekRrABDXDMdpNpT2tfHIv4JuhgvHqLMhEPGtaQn0BmbNSTkuz2Ed20DF2eHw0SmBQ==}
     engines: {node: ^12.20 || ^14.13.1 || >= 16}
     peerDependencies:
       svelte: ^3.19.0 || ^4.0.0
-    dependencies:
-      svelte: 3.59.2
-    dev: true
 
-  /svelte-preprocess@4.10.7(postcss@8.4.31)(svelte@3.59.2)(typescript@4.9.5):
+  svelte-preprocess@4.10.7:
     resolution: {integrity: sha512-sNPBnqYD6FnmdBrUmBCaqS00RyCsCpj2BG58A1JBswNF7b0OKviwxqVrOL/CKyJrLSClrSeqQv5BXNg2RUbPOw==}
     engines: {node: '>= 9.11.2'}
-    requiresBuild: true
     peerDependencies:
       '@babel/core': ^7.10.2
       coffeescript: ^2.5.1
@@ -2343,239 +1414,124 @@ packages:
         optional: true
       typescript:
         optional: true
-    dependencies:
-      '@types/pug': 2.0.7
-      '@types/sass': 1.45.0
-      detect-indent: 6.1.0
-      magic-string: 0.25.9
-      postcss: 8.4.31
-      sorcery: 0.10.0
-      strip-indent: 3.0.0
-      svelte: 3.59.2
-      typescript: 4.9.5
-    dev: true
 
-  /svelte-search@2.0.1:
+  svelte-search@2.0.1:
     resolution: {integrity: sha512-JBoObru/BUk86EmuRtYBa99xnH1RB8jqDuYYJHH0PUzN9BINo1+1GZataC/m5368BG3kZRb3wZI5ztjoi1WWXg==}
-    dev: true
 
-  /svelte2tsx@0.6.23(svelte@3.59.2)(typescript@4.9.5):
+  svelte2tsx@0.6.23:
     resolution: {integrity: sha512-3bwd1PuWUA3oEXy8+85zrLDnmJOsVpShpKVAehGWeYsz/66zMihTpRpUN97VVAKTZbO5tP4wnchHUXYs0zOwdw==}
     peerDependencies:
       svelte: ^3.55 || ^4.0.0-next.0 || ^4.0
       typescript: ^4.9.4 || ^5.0.0
-    dependencies:
-      dedent-js: 1.0.1
-      pascal-case: 3.1.2
-      svelte: 3.59.2
-      typescript: 4.9.5
-    dev: true
 
-  /svelte@3.59.2:
+  svelte@3.59.2:
     resolution: {integrity: sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA==}
     engines: {node: '>= 8'}
-    dev: true
 
-  /tailwindcss@3.3.1(postcss@8.4.31):
+  tailwindcss@3.3.1:
     resolution: {integrity: sha512-Vkiouc41d4CEq0ujXl6oiGFQ7bA3WEhUZdTgXAhtKxSy49OmKs8rEfQmupsfF0IGW8fv2iQkp1EVUuapCFrZ9g==}
     engines: {node: '>=12.13.0'}
     hasBin: true
     peerDependencies:
       postcss: ^8.0.9
-    dependencies:
-      arg: 5.0.2
-      chokidar: 3.5.3
-      color-name: 1.1.4
-      didyoumean: 1.2.2
-      dlv: 1.1.3
-      fast-glob: 3.3.1
-      glob-parent: 6.0.2
-      is-glob: 4.0.3
-      jiti: 1.20.0
-      lilconfig: 2.1.0
-      micromatch: 4.0.5
-      normalize-path: 3.0.0
-      object-hash: 3.0.0
-      picocolors: 1.0.0
-      postcss: 8.4.31
-      postcss-import: 14.1.0(postcss@8.4.31)
-      postcss-js: 4.0.1(postcss@8.4.31)
-      postcss-load-config: 3.1.4(postcss@8.4.31)
-      postcss-nested: 6.0.0(postcss@8.4.31)
-      postcss-selector-parser: 6.0.13
-      postcss-value-parser: 4.2.0
-      quick-lru: 5.1.1
-      resolve: 1.22.6
-      sucrase: 3.34.0
-    transitivePeerDependencies:
-      - ts-node
-    dev: true
 
-  /text-table@0.2.0:
+  text-table@0.2.0:
     resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
-    dev: true
 
-  /thenify-all@1.6.0:
+  thenify-all@1.6.0:
     resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==}
     engines: {node: '>=0.8'}
-    dependencies:
-      thenify: 3.3.1
-    dev: true
 
-  /thenify@3.3.1:
+  thenify@3.3.1:
     resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
-    dependencies:
-      any-promise: 1.3.0
-    dev: true
 
-  /tiny-glob@0.2.9:
+  tiny-glob@0.2.9:
     resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==}
-    dependencies:
-      globalyzer: 0.1.0
-      globrex: 0.1.2
-    dev: true
 
-  /tinybench@2.5.1:
+  tinybench@2.5.1:
     resolution: {integrity: sha512-65NKvSuAVDP/n4CqH+a9w2kTlLReS9vhsAP06MWx+/89nMinJyB2icyl58RIcqCmIggpojIGeuJGhjU1aGMBSg==}
-    dev: true
 
-  /tinypool@0.6.0:
+  tinypool@0.6.0:
     resolution: {integrity: sha512-FdswUUo5SxRizcBc6b1GSuLpLjisa8N8qMyYoP3rl+bym+QauhtJP5bvZY1ytt8krKGmMLYIRl36HBZfeAoqhQ==}
     engines: {node: '>=14.0.0'}
-    dev: true
 
-  /tinyspy@2.2.0:
+  tinyspy@2.2.0:
     resolution: {integrity: sha512-d2eda04AN/cPOR89F7Xv5bK/jrQEhmcLFe6HFldoeO9AJtps+fqEnh486vnT/8y4bw38pSyxDcTCAq+Ks2aJTg==}
     engines: {node: '>=14.0.0'}
-    dev: true
 
-  /to-regex-range@5.0.1:
+  to-regex-range@5.0.1:
     resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
     engines: {node: '>=8.0'}
-    dependencies:
-      is-number: 7.0.0
-    dev: true
 
-  /totalist@3.0.1:
+  totalist@3.0.1:
     resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==}
     engines: {node: '>=6'}
-    dev: true
 
-  /ts-interface-checker@0.1.13:
+  ts-interface-checker@0.1.13:
     resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==}
-    dev: true
 
-  /tslib@1.14.1:
+  tslib@1.14.1:
     resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==}
-    dev: true
 
-  /tslib@2.6.2:
+  tslib@2.6.2:
     resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==}
 
-  /tsutils@3.21.0(typescript@4.9.5):
+  tsutils@3.21.0:
     resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==}
     engines: {node: '>= 6'}
     peerDependencies:
       typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta'
-    dependencies:
-      tslib: 1.14.1
-      typescript: 4.9.5
-    dev: true
 
-  /type-check@0.4.0:
+  type-check@0.4.0:
     resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
     engines: {node: '>= 0.8.0'}
-    dependencies:
-      prelude-ls: 1.2.1
-    dev: true
 
-  /type-detect@4.0.8:
+  type-detect@4.0.8:
     resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==}
     engines: {node: '>=4'}
-    dev: true
 
-  /type-fest@0.20.2:
+  type-fest@0.20.2:
     resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==}
     engines: {node: '>=10'}
-    dev: true
 
-  /typescript-svelte-plugin@0.3.34(svelte@3.59.2)(typescript@4.9.5):
+  typescript-svelte-plugin@0.3.34:
     resolution: {integrity: sha512-LOex9QLlct+oHww0HVmABvfuOSUqlGrDU6COtNez61bycpSdu2ZnrYsP0N05WHqbrUPZG2u2R/fpbW77nd7x2Q==}
-    dependencies:
-      '@jridgewell/sourcemap-codec': 1.4.15
-      svelte2tsx: 0.6.23(svelte@3.59.2)(typescript@4.9.5)
-    transitivePeerDependencies:
-      - svelte
-      - typescript
-    dev: true
 
-  /typescript@4.9.5:
+  typescript@4.9.5:
     resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==}
     engines: {node: '>=4.2.0'}
     hasBin: true
-    dev: true
 
-  /ufo@1.3.1:
+  ufo@1.3.1:
     resolution: {integrity: sha512-uY/99gMLIOlJPwATcMVYfqDSxUR9//AUcgZMzwfSTJPDKzA1S8mX4VLqa+fiAtveraQUBCz4FFcwVZBGbwBXIw==}
-    dev: true
 
-  /undici@6.6.2:
-    resolution: {integrity: sha512-vSqvUE5skSxQJ5sztTZ/CdeJb1Wq0Hf44hlYMciqHghvz+K88U0l7D6u1VsndoFgskDcnU+nG3gYmMzJVzd9Qg==}
+  undici@6.11.1:
+    resolution: {integrity: sha512-KyhzaLJnV1qa3BSHdj4AZ2ndqI0QWPxYzaIOio0WzcEJB9gvuysprJSLtpvc2D9mhR9jPDUk7xlJlZbH2KR5iw==}
     engines: {node: '>=18.0'}
-    dependencies:
-      '@fastify/busboy': 2.0.0
-    dev: true
 
-  /update-browserslist-db@1.0.13(browserslist@4.22.1):
+  update-browserslist-db@1.0.13:
     resolution: {integrity: sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==}
     hasBin: true
     peerDependencies:
       browserslist: '>= 4.21.0'
-    dependencies:
-      browserslist: 4.22.1
-      escalade: 3.1.1
-      picocolors: 1.0.0
-    dev: true
 
-  /uri-js@4.4.1:
+  uri-js@4.4.1:
     resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
-    dependencies:
-      punycode: 2.3.0
-    dev: true
 
-  /util-deprecate@1.0.2:
+  util-deprecate@1.0.2:
     resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
-    dev: true
 
-  /uuid@9.0.0:
+  uuid@9.0.0:
     resolution: {integrity: sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==}
     hasBin: true
-    dev: false
 
-  /vite-node@0.33.0(@types/node@20.8.2):
+  vite-node@0.33.0:
     resolution: {integrity: sha512-19FpHYbwWWxDr73ruNahC+vtEdza52kA90Qb3La98yZ0xULqV8A5JLNPUff0f5zID4984tW7l3DH2przTJUZSw==}
     engines: {node: '>=v14.18.0'}
     hasBin: true
-    dependencies:
-      cac: 6.7.14
-      debug: 4.3.4
-      mlly: 1.4.2
-      pathe: 1.1.1
-      picocolors: 1.0.0
-      vite: 4.5.2(@types/node@20.8.2)
-    transitivePeerDependencies:
-      - '@types/node'
-      - less
-      - lightningcss
-      - sass
-      - stylus
-      - sugarss
-      - supports-color
-      - terser
-    dev: true
 
-  /vite@4.5.2(@types/node@20.8.2):
-    resolution: {integrity: sha512-tBCZBNSBbHQkaGyhGCDUGqeo2ph8Fstyp6FMSvTtsXeZSPpSMGlviAOav2hxVTqFcx8Hj/twtWKsMJXNY0xI8w==}
+  vite@4.5.3:
+    resolution: {integrity: sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg==}
     engines: {node: ^14.18.0 || >=16.0.0}
     hasBin: true
     peerDependencies:
@@ -2601,27 +1557,16 @@ packages:
         optional: true
       terser:
         optional: true
-    dependencies:
-      '@types/node': 20.8.2
-      esbuild: 0.18.13
-      postcss: 8.4.31
-      rollup: 3.29.4
-    optionalDependencies:
-      fsevents: 2.3.3
-    dev: true
 
-  /vitefu@0.2.4(vite@4.5.2):
+  vitefu@0.2.4:
     resolution: {integrity: sha512-fanAXjSaf9xXtOOeno8wZXIhgia+CZury481LsDaV++lSvcU2R9Ch2bPh3PYFyoHW+w9LqAeYRISVQjUIew14g==}
     peerDependencies:
       vite: ^3.0.0 || ^4.0.0
     peerDependenciesMeta:
       vite:
         optional: true
-    dependencies:
-      vite: 4.5.2(@types/node@20.8.2)
-    dev: true
 
-  /vitest@0.33.0:
+  vitest@0.33.0:
     resolution: {integrity: sha512-1CxaugJ50xskkQ0e969R/hW47za4YXDUfWJDxip1hwbnhUjYolpfUn2AMOulqG/Dtd9WYAtkHmM/m3yKVrEejQ==}
     engines: {node: '>=v14.18.0'}
     hasBin: true
@@ -2651,76 +1596,1464 @@ packages:
         optional: true
       webdriverio:
         optional: true
-    dependencies:
-      '@types/chai': 4.3.6
-      '@types/chai-subset': 1.3.3
-      '@types/node': 20.8.2
-      '@vitest/expect': 0.33.0
-      '@vitest/runner': 0.33.0
-      '@vitest/snapshot': 0.33.0
-      '@vitest/spy': 0.33.0
-      '@vitest/utils': 0.33.0
-      acorn: 8.10.0
-      acorn-walk: 8.2.0
-      cac: 6.7.14
-      chai: 4.3.10
-      debug: 4.3.4
-      local-pkg: 0.4.3
-      magic-string: 0.30.4
-      pathe: 1.1.1
-      picocolors: 1.0.0
-      std-env: 3.4.3
-      strip-literal: 1.3.0
-      tinybench: 2.5.1
-      tinypool: 0.6.0
-      vite: 4.5.2(@types/node@20.8.2)
-      vite-node: 0.33.0(@types/node@20.8.2)
-      why-is-node-running: 2.2.2
-    transitivePeerDependencies:
-      - less
-      - lightningcss
-      - sass
-      - stylus
-      - sugarss
-      - supports-color
-      - terser
-    dev: true
 
-  /which@2.0.2:
+  which@2.0.2:
     resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
     engines: {node: '>= 8'}
     hasBin: true
-    dependencies:
-      isexe: 2.0.0
-    dev: true
 
-  /why-is-node-running@2.2.2:
+  why-is-node-running@2.2.2:
     resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==}
     engines: {node: '>=8'}
     hasBin: true
-    dependencies:
-      siginfo: 2.0.0
-      stackback: 0.0.2
-    dev: true
 
-  /wrappy@1.0.2:
+  wrappy@1.0.2:
     resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
-    dev: true
 
-  /yallist@4.0.0:
+  yallist@4.0.0:
     resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==}
 
-  /yaml@1.10.2:
+  yaml@1.10.2:
     resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==}
     engines: {node: '>= 6'}
-    dev: true
 
-  /yocto-queue@0.1.0:
+  yocto-queue@0.1.0:
     resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
     engines: {node: '>=10'}
-    dev: true
 
-  /yocto-queue@1.0.0:
+  yocto-queue@1.0.0:
     resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==}
     engines: {node: '>=12.20'}
-    dev: true
+
+snapshots:
+
+  '@aashutoshrathi/word-wrap@1.2.6': {}
+
+  '@esbuild/android-arm64@0.18.13':
+    optional: true
+
+  '@esbuild/android-arm@0.18.13':
+    optional: true
+
+  '@esbuild/android-x64@0.18.13':
+    optional: true
+
+  '@esbuild/darwin-arm64@0.18.13':
+    optional: true
+
+  '@esbuild/darwin-x64@0.18.13':
+    optional: true
+
+  '@esbuild/freebsd-arm64@0.18.13':
+    optional: true
+
+  '@esbuild/freebsd-x64@0.18.13':
+    optional: true
+
+  '@esbuild/linux-arm64@0.18.13':
+    optional: true
+
+  '@esbuild/linux-arm@0.18.13':
+    optional: true
+
+  '@esbuild/linux-ia32@0.18.13':
+    optional: true
+
+  '@esbuild/linux-loong64@0.18.13':
+    optional: true
+
+  '@esbuild/linux-mips64el@0.18.13':
+    optional: true
+
+  '@esbuild/linux-ppc64@0.18.13':
+    optional: true
+
+  '@esbuild/linux-riscv64@0.18.13':
+    optional: true
+
+  '@esbuild/linux-s390x@0.18.13':
+    optional: true
+
+  '@esbuild/linux-x64@0.18.13':
+    optional: true
+
+  '@esbuild/netbsd-x64@0.18.13':
+    optional: true
+
+  '@esbuild/openbsd-x64@0.18.13':
+    optional: true
+
+  '@esbuild/sunos-x64@0.18.13':
+    optional: true
+
+  '@esbuild/win32-arm64@0.18.13':
+    optional: true
+
+  '@esbuild/win32-ia32@0.18.13':
+    optional: true
+
+  '@esbuild/win32-x64@0.18.13':
+    optional: true
+
+  '@eslint-community/eslint-utils@4.4.0(eslint@8.44.0)':
+    dependencies:
+      eslint: 8.44.0
+      eslint-visitor-keys: 3.4.3
+
+  '@eslint-community/regexpp@4.9.1': {}
+
+  '@eslint/eslintrc@2.1.2':
+    dependencies:
+      ajv: 6.12.6
+      debug: 4.3.4
+      espree: 9.6.1
+      globals: 13.23.0
+      ignore: 5.2.4
+      import-fresh: 3.3.0
+      js-yaml: 4.1.0
+      minimatch: 3.1.2
+      strip-json-comments: 3.1.1
+    transitivePeerDependencies:
+      - supports-color
+
+  '@eslint/js@8.44.0': {}
+
+  '@humanwhocodes/config-array@0.11.11':
+    dependencies:
+      '@humanwhocodes/object-schema': 1.2.1
+      debug: 4.3.4
+      minimatch: 3.1.2
+    transitivePeerDependencies:
+      - supports-color
+
+  '@humanwhocodes/module-importer@1.0.1': {}
+
+  '@humanwhocodes/object-schema@1.2.1': {}
+
+  '@jest/schemas@29.6.3':
+    dependencies:
+      '@sinclair/typebox': 0.27.8
+
+  '@jridgewell/gen-mapping@0.3.3':
+    dependencies:
+      '@jridgewell/set-array': 1.1.2
+      '@jridgewell/sourcemap-codec': 1.4.15
+      '@jridgewell/trace-mapping': 0.3.19
+
+  '@jridgewell/resolve-uri@3.1.1': {}
+
+  '@jridgewell/set-array@1.1.2': {}
+
+  '@jridgewell/sourcemap-codec@1.4.15': {}
+
+  '@jridgewell/trace-mapping@0.3.19':
+    dependencies:
+      '@jridgewell/resolve-uri': 3.1.1
+      '@jridgewell/sourcemap-codec': 1.4.15
+
+  '@nodelib/fs.scandir@2.1.5':
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      run-parallel: 1.2.0
+
+  '@nodelib/fs.stat@2.0.5': {}
+
+  '@nodelib/fs.walk@1.2.8':
+    dependencies:
+      '@nodelib/fs.scandir': 2.1.5
+      fastq: 1.15.0
+
+  '@playwright/test@1.36.1':
+    dependencies:
+      '@types/node': 20.8.2
+      playwright-core: 1.36.1
+    optionalDependencies:
+      fsevents: 2.3.2
+
+  '@polka/url@1.0.0-next.23': {}
+
+  '@rollup/plugin-commonjs@25.0.5(rollup@3.29.4)':
+    dependencies:
+      '@rollup/pluginutils': 5.0.5(rollup@3.29.4)
+      commondir: 1.0.1
+      estree-walker: 2.0.2
+      glob: 8.1.0
+      is-reference: 1.2.1
+      magic-string: 0.27.0
+    optionalDependencies:
+      rollup: 3.29.4
+
+  '@rollup/plugin-json@6.0.1(rollup@3.29.4)':
+    dependencies:
+      '@rollup/pluginutils': 5.0.5(rollup@3.29.4)
+    optionalDependencies:
+      rollup: 3.29.4
+
+  '@rollup/plugin-node-resolve@15.2.2(rollup@3.29.4)':
+    dependencies:
+      '@rollup/pluginutils': 5.0.5(rollup@3.29.4)
+      '@types/resolve': 1.20.2
+      deepmerge: 4.3.1
+      is-builtin-module: 3.2.1
+      is-module: 1.0.0
+      resolve: 1.22.6
+    optionalDependencies:
+      rollup: 3.29.4
+
+  '@rollup/pluginutils@5.0.5(rollup@3.29.4)':
+    dependencies:
+      '@types/estree': 1.0.2
+      estree-walker: 2.0.2
+      picomatch: 2.3.1
+    optionalDependencies:
+      rollup: 3.29.4
+
+  '@sinclair/typebox@0.27.8': {}
+
+  '@sveltejs/adapter-node@1.3.1(@sveltejs/kit@1.25.2(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0)))':
+    dependencies:
+      '@rollup/plugin-commonjs': 25.0.5(rollup@3.29.4)
+      '@rollup/plugin-json': 6.0.1(rollup@3.29.4)
+      '@rollup/plugin-node-resolve': 15.2.2(rollup@3.29.4)
+      '@sveltejs/kit': 1.25.2(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))
+      rollup: 3.29.4
+
+  '@sveltejs/kit@1.25.2(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))':
+    dependencies:
+      '@sveltejs/vite-plugin-svelte': 2.4.6(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))
+      '@types/cookie': 0.5.3
+      cookie: 0.5.0
+      devalue: 4.3.2
+      esm-env: 1.0.0
+      kleur: 4.1.5
+      magic-string: 0.30.4
+      mime: 3.0.0
+      sade: 1.8.1
+      set-cookie-parser: 2.6.0
+      sirv: 2.0.3
+      svelte: 3.59.2
+      tiny-glob: 0.2.9
+      undici: 6.11.1
+      vite: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+    transitivePeerDependencies:
+      - supports-color
+
+  '@sveltejs/vite-plugin-svelte-inspector@1.0.4(@sveltejs/vite-plugin-svelte@2.4.6(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0)))(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))':
+    dependencies:
+      '@sveltejs/vite-plugin-svelte': 2.4.6(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))
+      debug: 4.3.4
+      svelte: 3.59.2
+      vite: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+    transitivePeerDependencies:
+      - supports-color
+
+  '@sveltejs/vite-plugin-svelte@2.4.6(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))':
+    dependencies:
+      '@sveltejs/vite-plugin-svelte-inspector': 1.0.4(@sveltejs/vite-plugin-svelte@2.4.6(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0)))(svelte@3.59.2)(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))
+      debug: 4.3.4
+      deepmerge: 4.3.1
+      kleur: 4.1.5
+      magic-string: 0.30.4
+      svelte: 3.59.2
+      svelte-hmr: 0.15.3(svelte@3.59.2)
+      vite: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+      vitefu: 0.2.4(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0))
+    transitivePeerDependencies:
+      - supports-color
+
+  '@types/chai-subset@1.3.3':
+    dependencies:
+      '@types/chai': 4.3.6
+
+  '@types/chai@4.3.6': {}
+
+  '@types/cookie@0.5.3': {}
+
+  '@types/estree@1.0.2': {}
+
+  '@types/json-schema@7.0.13': {}
+
+  '@types/libsodium-wrappers@0.7.11': {}
+
+  '@types/node@20.8.2': {}
+
+  '@types/prismjs@1.26.0': {}
+
+  '@types/pug@2.0.7': {}
+
+  '@types/resolve@1.20.2': {}
+
+  '@types/sass@1.45.0':
+    dependencies:
+      sass: 1.68.0
+
+  '@types/semver@7.5.3': {}
+
+  '@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0(eslint@8.44.0)(typescript@4.9.5))(eslint@8.44.0)(typescript@4.9.5)':
+    dependencies:
+      '@eslint-community/regexpp': 4.9.1
+      '@typescript-eslint/parser': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
+      '@typescript-eslint/scope-manager': 5.62.0
+      '@typescript-eslint/type-utils': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
+      '@typescript-eslint/utils': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
+      debug: 4.3.4
+      eslint: 8.44.0
+      graphemer: 1.4.0
+      ignore: 5.2.4
+      natural-compare-lite: 1.4.0
+      semver: 7.5.4
+      tsutils: 3.21.0(typescript@4.9.5)
+    optionalDependencies:
+      typescript: 4.9.5
+    transitivePeerDependencies:
+      - supports-color
+
+  '@typescript-eslint/parser@5.62.0(eslint@8.44.0)(typescript@4.9.5)':
+    dependencies:
+      '@typescript-eslint/scope-manager': 5.62.0
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5)
+      debug: 4.3.4
+      eslint: 8.44.0
+    optionalDependencies:
+      typescript: 4.9.5
+    transitivePeerDependencies:
+      - supports-color
+
+  '@typescript-eslint/scope-manager@5.62.0':
+    dependencies:
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/visitor-keys': 5.62.0
+
+  '@typescript-eslint/type-utils@5.62.0(eslint@8.44.0)(typescript@4.9.5)':
+    dependencies:
+      '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5)
+      '@typescript-eslint/utils': 5.62.0(eslint@8.44.0)(typescript@4.9.5)
+      debug: 4.3.4
+      eslint: 8.44.0
+      tsutils: 3.21.0(typescript@4.9.5)
+    optionalDependencies:
+      typescript: 4.9.5
+    transitivePeerDependencies:
+      - supports-color
+
+  '@typescript-eslint/types@5.62.0': {}
+
+  '@typescript-eslint/typescript-estree@5.62.0(typescript@4.9.5)':
+    dependencies:
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/visitor-keys': 5.62.0
+      debug: 4.3.4
+      globby: 11.1.0
+      is-glob: 4.0.3
+      semver: 7.5.4
+      tsutils: 3.21.0(typescript@4.9.5)
+    optionalDependencies:
+      typescript: 4.9.5
+    transitivePeerDependencies:
+      - supports-color
+
+  '@typescript-eslint/utils@5.62.0(eslint@8.44.0)(typescript@4.9.5)':
+    dependencies:
+      '@eslint-community/eslint-utils': 4.4.0(eslint@8.44.0)
+      '@types/json-schema': 7.0.13
+      '@types/semver': 7.5.3
+      '@typescript-eslint/scope-manager': 5.62.0
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5)
+      eslint: 8.44.0
+      eslint-scope: 5.1.1
+      semver: 7.5.4
+    transitivePeerDependencies:
+      - supports-color
+      - typescript
+
+  '@typescript-eslint/visitor-keys@5.62.0':
+    dependencies:
+      '@typescript-eslint/types': 5.62.0
+      eslint-visitor-keys: 3.4.3
+
+  '@vitest/expect@0.33.0':
+    dependencies:
+      '@vitest/spy': 0.33.0
+      '@vitest/utils': 0.33.0
+      chai: 4.3.10
+
+  '@vitest/runner@0.33.0':
+    dependencies:
+      '@vitest/utils': 0.33.0
+      p-limit: 4.0.0
+      pathe: 1.1.1
+
+  '@vitest/snapshot@0.33.0':
+    dependencies:
+      magic-string: 0.30.4
+      pathe: 1.1.1
+      pretty-format: 29.7.0
+
+  '@vitest/spy@0.33.0':
+    dependencies:
+      tinyspy: 2.2.0
+
+  '@vitest/utils@0.33.0':
+    dependencies:
+      diff-sequences: 29.6.3
+      loupe: 2.3.6
+      pretty-format: 29.7.0
+
+  acorn-jsx@5.3.2(acorn@8.10.0):
+    dependencies:
+      acorn: 8.10.0
+
+  acorn-walk@8.2.0: {}
+
+  acorn@8.10.0: {}
+
+  ajv@6.12.6:
+    dependencies:
+      fast-deep-equal: 3.1.3
+      fast-json-stable-stringify: 2.1.0
+      json-schema-traverse: 0.4.1
+      uri-js: 4.4.1
+
+  ansi-regex@5.0.1: {}
+
+  ansi-styles@4.3.0:
+    dependencies:
+      color-convert: 2.0.1
+
+  ansi-styles@5.2.0: {}
+
+  any-promise@1.3.0: {}
+
+  anymatch@3.1.3:
+    dependencies:
+      normalize-path: 3.0.0
+      picomatch: 2.3.1
+
+  arg@5.0.2: {}
+
+  argparse@2.0.1: {}
+
+  array-union@2.1.0: {}
+
+  assertion-error@1.1.0: {}
+
+  autoprefixer@10.4.14(postcss@8.4.31):
+    dependencies:
+      browserslist: 4.22.1
+      caniuse-lite: 1.0.30001546
+      fraction.js: 4.3.6
+      normalize-range: 0.1.2
+      picocolors: 1.0.0
+      postcss: 8.4.31
+      postcss-value-parser: 4.2.0
+
+  balanced-match@1.0.2: {}
+
+  binary-extensions@2.2.0: {}
+
+  brace-expansion@1.1.11:
+    dependencies:
+      balanced-match: 1.0.2
+      concat-map: 0.0.1
+
+  brace-expansion@2.0.1:
+    dependencies:
+      balanced-match: 1.0.2
+
+  braces@3.0.2:
+    dependencies:
+      fill-range: 7.0.1
+
+  browserslist@4.22.1:
+    dependencies:
+      caniuse-lite: 1.0.30001546
+      electron-to-chromium: 1.4.543
+      node-releases: 2.0.13
+      update-browserslist-db: 1.0.13(browserslist@4.22.1)
+
+  buffer-crc32@0.2.13: {}
+
+  builtin-modules@3.3.0: {}
+
+  cac@6.7.14: {}
+
+  callsites@3.1.0: {}
+
+  camelcase-css@2.0.1: {}
+
+  caniuse-lite@1.0.30001546: {}
+
+  capnp-ts@0.7.0:
+    dependencies:
+      debug: 4.3.4
+      tslib: 2.6.2
+    transitivePeerDependencies:
+      - supports-color
+
+  chai@4.3.10:
+    dependencies:
+      assertion-error: 1.1.0
+      check-error: 1.0.3
+      deep-eql: 4.1.3
+      get-func-name: 2.0.2
+      loupe: 2.3.6
+      pathval: 1.1.1
+      type-detect: 4.0.8
+
+  chalk@4.1.2:
+    dependencies:
+      ansi-styles: 4.3.0
+      supports-color: 7.2.0
+
+  check-error@1.0.3:
+    dependencies:
+      get-func-name: 2.0.2
+
+  chokidar@3.5.3:
+    dependencies:
+      anymatch: 3.1.3
+      braces: 3.0.2
+      glob-parent: 5.1.2
+      is-binary-path: 2.1.0
+      is-glob: 4.0.3
+      normalize-path: 3.0.0
+      readdirp: 3.6.0
+    optionalDependencies:
+      fsevents: 2.3.3
+
+  color-convert@2.0.1:
+    dependencies:
+      color-name: 1.1.4
+
+  color-name@1.1.4: {}
+
+  commander@4.1.1: {}
+
+  commondir@1.0.1: {}
+
+  concat-map@0.0.1: {}
+
+  cookie@0.5.0: {}
+
+  cross-spawn@7.0.3:
+    dependencies:
+      path-key: 3.1.1
+      shebang-command: 2.0.0
+      which: 2.0.2
+
+  cssesc@3.0.0: {}
+
+  debug@4.3.4:
+    dependencies:
+      ms: 2.1.2
+
+  dedent-js@1.0.1: {}
+
+  deep-eql@4.1.3:
+    dependencies:
+      type-detect: 4.0.8
+
+  deep-is@0.1.4: {}
+
+  deepmerge@4.3.1: {}
+
+  detect-indent@6.1.0: {}
+
+  devalue@4.3.2: {}
+
+  didyoumean@1.2.2: {}
+
+  diff-sequences@29.6.3: {}
+
+  dir-glob@3.0.1:
+    dependencies:
+      path-type: 4.0.0
+
+  dlv@1.1.3: {}
+
+  doctrine@3.0.0:
+    dependencies:
+      esutils: 2.0.3
+
+  dotenv@16.3.1: {}
+
+  electron-to-chromium@1.4.543: {}
+
+  es6-promise@3.3.1: {}
+
+  esbuild@0.18.13:
+    optionalDependencies:
+      '@esbuild/android-arm': 0.18.13
+      '@esbuild/android-arm64': 0.18.13
+      '@esbuild/android-x64': 0.18.13
+      '@esbuild/darwin-arm64': 0.18.13
+      '@esbuild/darwin-x64': 0.18.13
+      '@esbuild/freebsd-arm64': 0.18.13
+      '@esbuild/freebsd-x64': 0.18.13
+      '@esbuild/linux-arm': 0.18.13
+      '@esbuild/linux-arm64': 0.18.13
+      '@esbuild/linux-ia32': 0.18.13
+      '@esbuild/linux-loong64': 0.18.13
+      '@esbuild/linux-mips64el': 0.18.13
+      '@esbuild/linux-ppc64': 0.18.13
+      '@esbuild/linux-riscv64': 0.18.13
+      '@esbuild/linux-s390x': 0.18.13
+      '@esbuild/linux-x64': 0.18.13
+      '@esbuild/netbsd-x64': 0.18.13
+      '@esbuild/openbsd-x64': 0.18.13
+      '@esbuild/sunos-x64': 0.18.13
+      '@esbuild/win32-arm64': 0.18.13
+      '@esbuild/win32-ia32': 0.18.13
+      '@esbuild/win32-x64': 0.18.13
+
+  escalade@3.1.1: {}
+
+  escape-string-regexp@4.0.0: {}
+
+  eslint-config-prettier@8.8.0(eslint@8.44.0):
+    dependencies:
+      eslint: 8.44.0
+
+  eslint-plugin-svelte3@4.0.0(eslint@8.44.0)(svelte@3.59.2):
+    dependencies:
+      eslint: 8.44.0
+      svelte: 3.59.2
+
+  eslint-scope@5.1.1:
+    dependencies:
+      esrecurse: 4.3.0
+      estraverse: 4.3.0
+
+  eslint-scope@7.2.2:
+    dependencies:
+      esrecurse: 4.3.0
+      estraverse: 5.3.0
+
+  eslint-visitor-keys@3.4.3: {}
+
+  eslint@8.44.0:
+    dependencies:
+      '@eslint-community/eslint-utils': 4.4.0(eslint@8.44.0)
+      '@eslint-community/regexpp': 4.9.1
+      '@eslint/eslintrc': 2.1.2
+      '@eslint/js': 8.44.0
+      '@humanwhocodes/config-array': 0.11.11
+      '@humanwhocodes/module-importer': 1.0.1
+      '@nodelib/fs.walk': 1.2.8
+      ajv: 6.12.6
+      chalk: 4.1.2
+      cross-spawn: 7.0.3
+      debug: 4.3.4
+      doctrine: 3.0.0
+      escape-string-regexp: 4.0.0
+      eslint-scope: 7.2.2
+      eslint-visitor-keys: 3.4.3
+      espree: 9.6.1
+      esquery: 1.5.0
+      esutils: 2.0.3
+      fast-deep-equal: 3.1.3
+      file-entry-cache: 6.0.1
+      find-up: 5.0.0
+      glob-parent: 6.0.2
+      globals: 13.23.0
+      graphemer: 1.4.0
+      ignore: 5.2.4
+      import-fresh: 3.3.0
+      imurmurhash: 0.1.4
+      is-glob: 4.0.3
+      is-path-inside: 3.0.3
+      js-yaml: 4.1.0
+      json-stable-stringify-without-jsonify: 1.0.1
+      levn: 0.4.1
+      lodash.merge: 4.6.2
+      minimatch: 3.1.2
+      natural-compare: 1.4.0
+      optionator: 0.9.3
+      strip-ansi: 6.0.1
+      strip-json-comments: 3.1.1
+      text-table: 0.2.0
+    transitivePeerDependencies:
+      - supports-color
+
+  esm-env@1.0.0: {}
+
+  espree@9.6.1:
+    dependencies:
+      acorn: 8.10.0
+      acorn-jsx: 5.3.2(acorn@8.10.0)
+      eslint-visitor-keys: 3.4.3
+
+  esquery@1.5.0:
+    dependencies:
+      estraverse: 5.3.0
+
+  esrecurse@4.3.0:
+    dependencies:
+      estraverse: 5.3.0
+
+  estraverse@4.3.0: {}
+
+  estraverse@5.3.0: {}
+
+  estree-walker@2.0.2: {}
+
+  esutils@2.0.3: {}
+
+  fast-deep-equal@3.1.3: {}
+
+  fast-glob@3.3.1:
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      '@nodelib/fs.walk': 1.2.8
+      glob-parent: 5.1.2
+      merge2: 1.4.1
+      micromatch: 4.0.5
+
+  fast-json-stable-stringify@2.1.0: {}
+
+  fast-levenshtein@2.0.6: {}
+
+  fastq@1.15.0:
+    dependencies:
+      reusify: 1.0.4
+
+  file-entry-cache@6.0.1:
+    dependencies:
+      flat-cache: 3.1.0
+
+  fill-range@7.0.1:
+    dependencies:
+      to-regex-range: 5.0.1
+
+  find-up@5.0.0:
+    dependencies:
+      locate-path: 6.0.0
+      path-exists: 4.0.0
+
+  flat-cache@3.1.0:
+    dependencies:
+      flatted: 3.2.9
+      keyv: 4.5.3
+      rimraf: 3.0.2
+
+  flatted@3.2.9: {}
+
+  fraction.js@4.3.6: {}
+
+  fs.realpath@1.0.0: {}
+
+  fsevents@2.3.2:
+    optional: true
+
+  fsevents@2.3.3:
+    optional: true
+
+  get-func-name@2.0.2: {}
+
+  glob-parent@5.1.2:
+    dependencies:
+      is-glob: 4.0.3
+
+  glob-parent@6.0.2:
+    dependencies:
+      is-glob: 4.0.3
+
+  glob@7.1.6:
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+
+  glob@7.2.3:
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+
+  glob@8.1.0:
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 5.1.6
+      once: 1.4.0
+
+  globals@13.23.0:
+    dependencies:
+      type-fest: 0.20.2
+
+  globalyzer@0.1.0: {}
+
+  globby@11.1.0:
+    dependencies:
+      array-union: 2.1.0
+      dir-glob: 3.0.1
+      fast-glob: 3.3.1
+      ignore: 5.2.4
+      merge2: 1.4.1
+      slash: 3.0.0
+
+  globrex@0.1.2: {}
+
+  graceful-fs@4.2.11: {}
+
+  graphemer@1.4.0: {}
+
+  has-flag@4.0.0: {}
+
+  has@1.0.4: {}
+
+  ignore@5.2.4: {}
+
+  immutable@4.3.4: {}
+
+  import-fresh@3.3.0:
+    dependencies:
+      parent-module: 1.0.1
+      resolve-from: 4.0.0
+
+  imurmurhash@0.1.4: {}
+
+  inflight@1.0.6:
+    dependencies:
+      once: 1.4.0
+      wrappy: 1.0.2
+
+  inherits@2.0.4: {}
+
+  is-binary-path@2.1.0:
+    dependencies:
+      binary-extensions: 2.2.0
+
+  is-builtin-module@3.2.1:
+    dependencies:
+      builtin-modules: 3.3.0
+
+  is-core-module@2.13.0:
+    dependencies:
+      has: 1.0.4
+
+  is-extglob@2.1.1: {}
+
+  is-glob@4.0.3:
+    dependencies:
+      is-extglob: 2.1.1
+
+  is-module@1.0.0: {}
+
+  is-number@7.0.0: {}
+
+  is-path-inside@3.0.3: {}
+
+  is-reference@1.2.1:
+    dependencies:
+      '@types/estree': 1.0.2
+
+  isexe@2.0.0: {}
+
+  jiti@1.20.0: {}
+
+  js-yaml@4.1.0:
+    dependencies:
+      argparse: 2.0.1
+
+  json-buffer@3.0.1: {}
+
+  json-schema-traverse@0.4.1: {}
+
+  json-stable-stringify-without-jsonify@1.0.1: {}
+
+  jsonc-parser@3.2.0: {}
+
+  just-debounce-it@3.2.0: {}
+
+  keyv@4.5.3:
+    dependencies:
+      json-buffer: 3.0.1
+
+  kleur@4.1.5: {}
+
+  ky@0.33.3: {}
+
+  levn@0.4.1:
+    dependencies:
+      prelude-ls: 1.2.1
+      type-check: 0.4.0
+
+  libsodium-wrappers@0.7.14:
+    dependencies:
+      libsodium: 0.7.14
+
+  libsodium@0.7.14: {}
+
+  lilconfig@2.1.0: {}
+
+  lines-and-columns@1.2.4: {}
+
+  local-pkg@0.4.3: {}
+
+  locate-path@6.0.0:
+    dependencies:
+      p-locate: 5.0.0
+
+  lodash.merge@4.6.2: {}
+
+  loupe@2.3.6:
+    dependencies:
+      get-func-name: 2.0.2
+
+  lower-case@2.0.2:
+    dependencies:
+      tslib: 2.6.2
+
+  lru-cache@6.0.0:
+    dependencies:
+      yallist: 4.0.0
+
+  magic-string@0.25.9:
+    dependencies:
+      sourcemap-codec: 1.4.8
+
+  magic-string@0.27.0:
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+
+  magic-string@0.30.4:
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+
+  merge2@1.4.1: {}
+
+  micromatch@4.0.5:
+    dependencies:
+      braces: 3.0.2
+      picomatch: 2.3.1
+
+  mime@3.0.0: {}
+
+  min-indent@1.0.1: {}
+
+  minimatch@3.1.2:
+    dependencies:
+      brace-expansion: 1.1.11
+
+  minimatch@5.1.6:
+    dependencies:
+      brace-expansion: 2.0.1
+
+  minimist@1.2.8: {}
+
+  mkdirp@0.5.6:
+    dependencies:
+      minimist: 1.2.8
+
+  mlly@1.4.2:
+    dependencies:
+      acorn: 8.10.0
+      pathe: 1.1.1
+      pkg-types: 1.0.3
+      ufo: 1.3.1
+
+  mri@1.2.0: {}
+
+  mrmime@1.0.1: {}
+
+  ms@2.1.2: {}
+
+  mz@2.7.0:
+    dependencies:
+      any-promise: 1.3.0
+      object-assign: 4.1.1
+      thenify-all: 1.6.0
+
+  nanoid@3.3.6: {}
+
+  natural-compare-lite@1.4.0: {}
+
+  natural-compare@1.4.0: {}
+
+  no-case@3.0.4:
+    dependencies:
+      lower-case: 2.0.2
+      tslib: 2.6.2
+
+  node-releases@2.0.13: {}
+
+  normalize-path@3.0.0: {}
+
+  normalize-range@0.1.2: {}
+
+  object-assign@4.1.1: {}
+
+  object-hash@3.0.0: {}
+
+  once@1.4.0:
+    dependencies:
+      wrappy: 1.0.2
+
+  optionator@0.9.3:
+    dependencies:
+      '@aashutoshrathi/word-wrap': 1.2.6
+      deep-is: 0.1.4
+      fast-levenshtein: 2.0.6
+      levn: 0.4.1
+      prelude-ls: 1.2.1
+      type-check: 0.4.0
+
+  p-limit@3.1.0:
+    dependencies:
+      yocto-queue: 0.1.0
+
+  p-limit@4.0.0:
+    dependencies:
+      yocto-queue: 1.0.0
+
+  p-locate@5.0.0:
+    dependencies:
+      p-limit: 3.1.0
+
+  parent-module@1.0.1:
+    dependencies:
+      callsites: 3.1.0
+
+  pascal-case@3.1.2:
+    dependencies:
+      no-case: 3.0.4
+      tslib: 2.6.2
+
+  path-exists@4.0.0: {}
+
+  path-is-absolute@1.0.1: {}
+
+  path-key@3.1.1: {}
+
+  path-parse@1.0.7: {}
+
+  path-type@4.0.0: {}
+
+  pathe@1.1.1: {}
+
+  pathval@1.1.1: {}
+
+  picocolors@1.0.0: {}
+
+  picomatch@2.3.1: {}
+
+  pify@2.3.0: {}
+
+  pirates@4.0.6: {}
+
+  pkg-types@1.0.3:
+    dependencies:
+      jsonc-parser: 3.2.0
+      mlly: 1.4.2
+      pathe: 1.1.1
+
+  playwright-core@1.36.1: {}
+
+  postcss-import@14.1.0(postcss@8.4.31):
+    dependencies:
+      postcss: 8.4.31
+      postcss-value-parser: 4.2.0
+      read-cache: 1.0.0
+      resolve: 1.22.6
+
+  postcss-js@4.0.1(postcss@8.4.31):
+    dependencies:
+      camelcase-css: 2.0.1
+      postcss: 8.4.31
+
+  postcss-load-config@3.1.4(postcss@8.4.31):
+    dependencies:
+      lilconfig: 2.1.0
+      yaml: 1.10.2
+    optionalDependencies:
+      postcss: 8.4.31
+
+  postcss-nested@6.0.0(postcss@8.4.31):
+    dependencies:
+      postcss: 8.4.31
+      postcss-selector-parser: 6.0.13
+
+  postcss-selector-parser@6.0.13:
+    dependencies:
+      cssesc: 3.0.0
+      util-deprecate: 1.0.2
+
+  postcss-value-parser@4.2.0: {}
+
+  postcss@8.4.31:
+    dependencies:
+      nanoid: 3.3.6
+      picocolors: 1.0.0
+      source-map-js: 1.0.2
+
+  prelude-ls@1.2.1: {}
+
+  prettier-plugin-svelte@2.10.1(prettier@2.8.8)(svelte@3.59.2):
+    dependencies:
+      prettier: 2.8.8
+      svelte: 3.59.2
+
+  prettier@2.8.8: {}
+
+  pretty-format@29.7.0:
+    dependencies:
+      '@jest/schemas': 29.6.3
+      ansi-styles: 5.2.0
+      react-is: 18.2.0
+
+  prismjs@1.29.0: {}
+
+  punycode@2.3.0: {}
+
+  queue-microtask@1.2.3: {}
+
+  quick-lru@5.1.1: {}
+
+  react-is@18.2.0: {}
+
+  read-cache@1.0.0:
+    dependencies:
+      pify: 2.3.0
+
+  readdirp@3.6.0:
+    dependencies:
+      picomatch: 2.3.1
+
+  resolve-from@4.0.0: {}
+
+  resolve@1.22.6:
+    dependencies:
+      is-core-module: 2.13.0
+      path-parse: 1.0.7
+      supports-preserve-symlinks-flag: 1.0.0
+
+  reusify@1.0.4: {}
+
+  rimraf@2.7.1:
+    dependencies:
+      glob: 7.2.3
+
+  rimraf@3.0.2:
+    dependencies:
+      glob: 7.2.3
+
+  rollup@3.29.4:
+    optionalDependencies:
+      fsevents: 2.3.3
+
+  run-parallel@1.2.0:
+    dependencies:
+      queue-microtask: 1.2.3
+
+  sade@1.8.1:
+    dependencies:
+      mri: 1.2.0
+
+  sander@0.5.1:
+    dependencies:
+      es6-promise: 3.3.1
+      graceful-fs: 4.2.11
+      mkdirp: 0.5.6
+      rimraf: 2.7.1
+
+  sass@1.68.0:
+    dependencies:
+      chokidar: 3.5.3
+      immutable: 4.3.4
+      source-map-js: 1.0.2
+
+  semver@7.5.4:
+    dependencies:
+      lru-cache: 6.0.0
+
+  set-cookie-parser@2.6.0: {}
+
+  shebang-command@2.0.0:
+    dependencies:
+      shebang-regex: 3.0.0
+
+  shebang-regex@3.0.0: {}
+
+  siginfo@2.0.0: {}
+
+  sirv@2.0.3:
+    dependencies:
+      '@polka/url': 1.0.0-next.23
+      mrmime: 1.0.1
+      totalist: 3.0.1
+
+  slash@3.0.0: {}
+
+  sorcery@0.10.0:
+    dependencies:
+      buffer-crc32: 0.2.13
+      minimist: 1.2.8
+      sander: 0.5.1
+      sourcemap-codec: 1.4.8
+
+  source-map-js@1.0.2: {}
+
+  sourcemap-codec@1.4.8: {}
+
+  stackback@0.0.2: {}
+
+  std-env@3.4.3: {}
+
+  strip-ansi@6.0.1:
+    dependencies:
+      ansi-regex: 5.0.1
+
+  strip-indent@3.0.0:
+    dependencies:
+      min-indent: 1.0.1
+
+  strip-json-comments@3.1.1: {}
+
+  strip-literal@1.3.0:
+    dependencies:
+      acorn: 8.10.0
+
+  sucrase@3.34.0:
+    dependencies:
+      '@jridgewell/gen-mapping': 0.3.3
+      commander: 4.1.1
+      glob: 7.1.6
+      lines-and-columns: 1.2.4
+      mz: 2.7.0
+      pirates: 4.0.6
+      ts-interface-checker: 0.1.13
+
+  supports-color@7.2.0:
+    dependencies:
+      has-flag: 4.0.0
+
+  supports-preserve-symlinks-flag@1.0.0: {}
+
+  svelte-hmr@0.15.3(svelte@3.59.2):
+    dependencies:
+      svelte: 3.59.2
+
+  svelte-preprocess@4.10.7(postcss-load-config@3.1.4(postcss@8.4.31))(postcss@8.4.31)(sass@1.68.0)(svelte@3.59.2)(typescript@4.9.5):
+    dependencies:
+      '@types/pug': 2.0.7
+      '@types/sass': 1.45.0
+      detect-indent: 6.1.0
+      magic-string: 0.25.9
+      sorcery: 0.10.0
+      strip-indent: 3.0.0
+      svelte: 3.59.2
+    optionalDependencies:
+      postcss: 8.4.31
+      postcss-load-config: 3.1.4(postcss@8.4.31)
+      sass: 1.68.0
+      typescript: 4.9.5
+
+  svelte-search@2.0.1: {}
+
+  svelte2tsx@0.6.23(svelte@3.59.2)(typescript@4.9.5):
+    dependencies:
+      dedent-js: 1.0.1
+      pascal-case: 3.1.2
+      svelte: 3.59.2
+      typescript: 4.9.5
+
+  svelte@3.59.2: {}
+
+  tailwindcss@3.3.1(postcss@8.4.31):
+    dependencies:
+      arg: 5.0.2
+      chokidar: 3.5.3
+      color-name: 1.1.4
+      didyoumean: 1.2.2
+      dlv: 1.1.3
+      fast-glob: 3.3.1
+      glob-parent: 6.0.2
+      is-glob: 4.0.3
+      jiti: 1.20.0
+      lilconfig: 2.1.0
+      micromatch: 4.0.5
+      normalize-path: 3.0.0
+      object-hash: 3.0.0
+      picocolors: 1.0.0
+      postcss: 8.4.31
+      postcss-import: 14.1.0(postcss@8.4.31)
+      postcss-js: 4.0.1(postcss@8.4.31)
+      postcss-load-config: 3.1.4(postcss@8.4.31)
+      postcss-nested: 6.0.0(postcss@8.4.31)
+      postcss-selector-parser: 6.0.13
+      postcss-value-parser: 4.2.0
+      quick-lru: 5.1.1
+      resolve: 1.22.6
+      sucrase: 3.34.0
+    transitivePeerDependencies:
+      - ts-node
+
+  text-table@0.2.0: {}
+
+  thenify-all@1.6.0:
+    dependencies:
+      thenify: 3.3.1
+
+  thenify@3.3.1:
+    dependencies:
+      any-promise: 1.3.0
+
+  tiny-glob@0.2.9:
+    dependencies:
+      globalyzer: 0.1.0
+      globrex: 0.1.2
+
+  tinybench@2.5.1: {}
+
+  tinypool@0.6.0: {}
+
+  tinyspy@2.2.0: {}
+
+  to-regex-range@5.0.1:
+    dependencies:
+      is-number: 7.0.0
+
+  totalist@3.0.1: {}
+
+  ts-interface-checker@0.1.13: {}
+
+  tslib@1.14.1: {}
+
+  tslib@2.6.2: {}
+
+  tsutils@3.21.0(typescript@4.9.5):
+    dependencies:
+      tslib: 1.14.1
+      typescript: 4.9.5
+
+  type-check@0.4.0:
+    dependencies:
+      prelude-ls: 1.2.1
+
+  type-detect@4.0.8: {}
+
+  type-fest@0.20.2: {}
+
+  typescript-svelte-plugin@0.3.34(svelte@3.59.2)(typescript@4.9.5):
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+      svelte2tsx: 0.6.23(svelte@3.59.2)(typescript@4.9.5)
+    transitivePeerDependencies:
+      - svelte
+      - typescript
+
+  typescript@4.9.5: {}
+
+  ufo@1.3.1: {}
+
+  undici@6.11.1: {}
+
+  update-browserslist-db@1.0.13(browserslist@4.22.1):
+    dependencies:
+      browserslist: 4.22.1
+      escalade: 3.1.1
+      picocolors: 1.0.0
+
+  uri-js@4.4.1:
+    dependencies:
+      punycode: 2.3.0
+
+  util-deprecate@1.0.2: {}
+
+  uuid@9.0.0: {}
+
+  vite-node@0.33.0(@types/node@20.8.2)(sass@1.68.0):
+    dependencies:
+      cac: 6.7.14
+      debug: 4.3.4
+      mlly: 1.4.2
+      pathe: 1.1.1
+      picocolors: 1.0.0
+      vite: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+    transitivePeerDependencies:
+      - '@types/node'
+      - less
+      - lightningcss
+      - sass
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+
+  vite@4.5.3(@types/node@20.8.2)(sass@1.68.0):
+    dependencies:
+      esbuild: 0.18.13
+      postcss: 8.4.31
+      rollup: 3.29.4
+    optionalDependencies:
+      '@types/node': 20.8.2
+      fsevents: 2.3.3
+      sass: 1.68.0
+
+  vitefu@0.2.4(vite@4.5.3(@types/node@20.8.2)(sass@1.68.0)):
+    optionalDependencies:
+      vite: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+
+  vitest@0.33.0(sass@1.68.0):
+    dependencies:
+      '@types/chai': 4.3.6
+      '@types/chai-subset': 1.3.3
+      '@types/node': 20.8.2
+      '@vitest/expect': 0.33.0
+      '@vitest/runner': 0.33.0
+      '@vitest/snapshot': 0.33.0
+      '@vitest/spy': 0.33.0
+      '@vitest/utils': 0.33.0
+      acorn: 8.10.0
+      acorn-walk: 8.2.0
+      cac: 6.7.14
+      chai: 4.3.10
+      debug: 4.3.4
+      local-pkg: 0.4.3
+      magic-string: 0.30.4
+      pathe: 1.1.1
+      picocolors: 1.0.0
+      std-env: 3.4.3
+      strip-literal: 1.3.0
+      tinybench: 2.5.1
+      tinypool: 0.6.0
+      vite: 4.5.3(@types/node@20.8.2)(sass@1.68.0)
+      vite-node: 0.33.0(@types/node@20.8.2)(sass@1.68.0)
+      why-is-node-running: 2.2.2
+    transitivePeerDependencies:
+      - less
+      - lightningcss
+      - sass
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+
+  which@2.0.2:
+    dependencies:
+      isexe: 2.0.0
+
+  why-is-node-running@2.2.2:
+    dependencies:
+      siginfo: 2.0.0
+      stackback: 0.0.2
+
+  wrappy@1.0.2: {}
+
+  yallist@4.0.0: {}
+
+  yaml@1.10.2: {}
+
+  yocto-queue@0.1.0: {}
+
+  yocto-queue@1.0.0: {}
diff --git a/packages/grid/frontend/src/_routes/(app)/account/+page.svelte b/packages/grid/frontend/src/_routes/(app)/account/+page.svelte
index 10780f2c120..f9771b7d562 100644
--- a/packages/grid/frontend/src/_routes/(app)/account/+page.svelte
+++ b/packages/grid/frontend/src/_routes/(app)/account/+page.svelte
@@ -6,7 +6,7 @@
   import GreenCheck from '$lib/components/icons/GreenCheck.svelte';
   import Info from '$lib/components/icons/Info.svelte';
 
-  let showDeleteNodeModal = false;
+  let showDeleteServerModal = false;
   let showDeleteAccountModal = false;
   let showDeleteConfirmModal = false;
 
@@ -23,7 +23,7 @@
   >
     
     

- Your profile information is public-facing information that other users and node owners can + Your profile information is public-facing information that other users and server owners can see.

@@ -85,8 +85,8 @@

When you delete your user account all information relating to you will be deleted as well - as any permissions and requests. If you are the domain owner the domain node will be - deleted as well and will be closed to all users. To transfer ownership of a domain node + as any permissions and requests. If you are the datasite owner the datasite server will be + deleted as well and will be closed to all users. To transfer ownership of a datasite server before deleting your account you can follow the instructions here

@@ -127,27 +127,27 @@ {/if} - {#if showDeleteNodeModal} + {#if showDeleteServerModal}
-

Are you sure you want to delete your node?

+

Are you sure you want to delete your server?

- Because you are the domain owner, the domain node along with all uploaded datasets, user + Because you are the datasite owner, the datasite server along with all uploaded datasets, user accounts, and requests will be deleted. All network memberships will also be removed. If you - would like to keep this domain node but no longer want to be an owner press “cancel” and - follow the instructions here to transfer ownership of your domain node. + would like to keep this datasite server but no longer want to be an owner press “cancel” and + follow the instructions here to transfer ownership of your datasite server.

Cancel @@ -165,7 +165,7 @@

To help us improve future experiences could you share with us any frustrations or - suggestions you have with or for the PyGridUI Platform? + suggestions you have with or for the Syft UI Platform?

diff --git a/packages/grid/frontend/src/_routes/(app)/config/+page.svelte b/packages/grid/frontend/src/_routes/(app)/config/+page.svelte index 03469b6e007..80b79071e63 100644 --- a/packages/grid/frontend/src/_routes/(app)/config/+page.svelte +++ b/packages/grid/frontend/src/_routes/(app)/config/+page.svelte @@ -19,7 +19,7 @@ let current_user = data.current_user let tabs = [ - { label: "Domain", id: "tab1" }, + { label: "Datasite", id: "tab1" }, // { label: 'Connection', id: 'tab2' } // { label: 'Permissions', id: 'tab3' }, ] @@ -27,7 +27,7 @@ let currentTab = tabs[0].id $: profileInformation = [ - { label: "Domain name", id: "domain_name", value: metadata?.name }, + { label: "Datasite name", id: "datasite_name", value: metadata?.name }, { label: "Organization", id: "organization", @@ -44,7 +44,7 @@ const handleUpdate = async () => { let newMetadata = { - name: openModal === "domain_name" ? name : null, + name: openModal === "datasite_name" ? name : null, organization: openModal === "organization" ? organization : null, description: openModal === "description" ? description : null, } @@ -69,7 +69,7 @@ $: current_user = data.current_user $: metadata = data.metadata - $: domainInitials = getInitials(metadata?.name) + $: datasiteInitials = getInitials(metadata?.name) $: userInitials = getInitials(current_user?.name) @@ -82,7 +82,7 @@ Loading... {:else} - +
@@ -121,7 +121,7 @@

ID#:

- {metadata.node_id} + {metadata.server_id}

Deployed on:

@@ -141,11 +141,11 @@ Turned off

- When remote execution is turned on for the domain, it means that you are allowing + When remote execution is turned on for the datasite, it means that you are allowing PySyft to execute code submitted by users as is against the real private data instead of the mock data when a request is approved. If this is a third-party user please review the function and policy code carefully before approving this request. You can - turn off "Remote Execution" for your domain by going to your "Domain Settings" or + turn off "Remote Execution" for your datasite by going to your "Datasite Settings" or clicking the link below.

@@ -167,8 +167,8 @@
- {#if openModal === "domain_name"} - + {#if openModal === "datasite_name"} + {:else if openModal === "organization"} { diff --git a/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte b/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte index f893eece0e9..a261ab5ebae 100644 --- a/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte +++ b/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte @@ -3,7 +3,7 @@ import Badge from "$lib/components/Badge.svelte" import CaretLeft from "$lib/components/icons/CaretLeft.svelte" import { getInitials, getUserRole } from "$lib/utils" - import type { UserView } from "../../../../types/domain/users" + import type { UserView } from "../../../../types/datasite/users" import type { PageData } from "./$types" export let data: PageData diff --git a/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts b/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts index 55ae3091828..38c9793b054 100644 --- a/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts +++ b/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts @@ -8,7 +8,7 @@ export const actions: Actions = { default: async ({ cookies, request }) => { try { const data = await request.formData() - const { email, password, node_id } = get_form_data_values(data) + const { email, password, server_id } = get_form_data_values(data) if ( !email || @@ -16,14 +16,14 @@ export const actions: Actions = { typeof email !== "string" || typeof password !== "string" ) { - throw new Error(`invalid form data: email:${email} node:${node_id}`) + throw new Error(`invalid form data: email:${email} server:${server_id}`) } const { signing_key, uid } = await login({ email, password }) const cookie_user = { uid, - node_id, + server_id, } cookies.set( diff --git a/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte b/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte index 1b6ddaf5b69..26b2438d9f2 100644 --- a/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte +++ b/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte @@ -2,10 +2,10 @@ import { enhance } from "$app/forms" import Button from "$lib/components/Button.svelte" import Modal from "$lib/components/Modal.svelte" - import DomainMetadataPanel from "$lib/components/authentication/DomainMetadataPanel.svelte" + import DatasiteMetadataPanel from "$lib/components/authentication/DatasiteMetadataPanel.svelte" import Input from "$lib/components/Input.svelte" - import DomainOnlineIndicator from "$lib/components/DomainOnlineIndicator.svelte" - import type { DomainOnlineStatus } from "../../../types/domain/onlineIndicator" + import DatasiteOnlineIndicator from "$lib/components/DatasiteOnlineIndicator.svelte" + import type { DatasiteOnlineStatus } from "../../../types/datasite/onlineIndicator" import type { PageData, ActionData } from "./$types" export let data: PageData @@ -13,13 +13,13 @@ const { metadata } = data - let status: DomainOnlineStatus = "online" + let status: DatasiteOnlineStatus = "online"
- +
@@ -38,16 +38,16 @@

{/if}
- +

{#if status === "pending"} Checking connection {:else} - Domain {status} + Datasite {status} {/if}

- + import { enhance } from "$app/forms" import Button from "$lib/components/Button.svelte" - import DomainMetadataPanel from "$lib/components/authentication/DomainMetadataPanel.svelte" + import DatasiteMetadataPanel from "$lib/components/authentication/DatasiteMetadataPanel.svelte" import Input from "$lib/components/Input.svelte" import Modal from "$lib/components/Modal.svelte" import type { ActionData, PageData } from "./$types" @@ -15,7 +15,7 @@
- +
{ try { const { page_size, page_index } = get_url_page_params(url) - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const dataset = await jsSyftCall({ path: "dataset.get_all", payload: { page_size, page_index }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts index c774c41899b..366cf4dba1a 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts @@ -17,9 +17,9 @@ export const GET: RequestHandler = async () => { highest_version: metadata_raw?.highest_version, lowest_version: metadata_raw?.lowest_version, name: metadata_raw?.name, - node_id: metadata_raw?.id?.value, - node_side: metadata_raw?.node_side_type, - node_type: metadata_raw?.node_type?.value, + server_id: metadata_raw?.id?.value, + server_side: metadata_raw?.server_side_type, + server_type: metadata_raw?.server_type?.value, organization: metadata_raw?.organization, signup_enabled: metadata_raw?.signup_enabled, syft_version: metadata_raw?.syft_version, @@ -32,7 +32,7 @@ export const GET: RequestHandler = async () => { export const PATCH: RequestHandler = async ({ cookies, request }) => { try { - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const metadata = await request.json() @@ -41,10 +41,10 @@ export const PATCH: RequestHandler = async ({ cookies, request }) => { payload: { settings: { ...metadata, - fqn: "syft.service.settings.settings.NodeSettingsUpdate", + fqn: "syft.service.settings.settings.ServerSettingsUpdate", }, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts index f9351d70286..f757af854cf 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts @@ -8,12 +8,12 @@ export const GET: RequestHandler = async ({ cookies, url }) => { const page_size = parseInt(url.searchParams.get("page_size") || "10") const page_index = parseInt(url.searchParams.get("page_index") || "0") - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const users = await jsSyftCall({ path: "user.get_all", payload: { page_size, page_index }, - node_id, + server_id, signing_key, }) @@ -26,7 +26,7 @@ export const GET: RequestHandler = async ({ cookies, url }) => { export const POST: RequestHandler = async ({ cookies, request }) => { try { - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const new_user = await request.json() @@ -35,7 +35,7 @@ export const POST: RequestHandler = async ({ cookies, request }) => { payload: { user_create: { ...new_user, fqn: "syft.service.user.user.UserCreate" }, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts index 491d773808a..2523a2776db 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts @@ -7,12 +7,12 @@ export const GET: RequestHandler = async ({ cookies, params }) => { try { const requested_uid = params.uid - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const user = await jsSyftCall({ path: "user.view", payload: { uid: { value: requested_uid, fqn: "syft.types.uid.UID" } }, - node_id, + server_id, signing_key, }) @@ -34,7 +34,7 @@ export const GET: RequestHandler = async ({ cookies, params }) => { export const PUT: RequestHandler = async ({ cookies, params, request }) => { try { - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const requested_uid = params.uid const body = await request.json() @@ -54,7 +54,7 @@ export const PUT: RequestHandler = async ({ cookies, params, request }) => { fqn: "syft.service.user.user.UserUpdate", }, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts index 19678b3c292..7ec0322ff51 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts @@ -9,7 +9,7 @@ export const GET: RequestHandler = async ({ cookies, url }) => { const page_index = parseInt(url.searchParams.get("page_index") || "0") const name = url.searchParams.get("name") - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const users = await jsSyftCall({ path: "user.search", @@ -21,7 +21,7 @@ export const GET: RequestHandler = async ({ cookies, url }) => { page_index, page_size, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/app.html b/packages/grid/frontend/src/app.html index 5ee015f0cf1..e1934dec53b 100644 --- a/packages/grid/frontend/src/app.html +++ b/packages/grid/frontend/src/app.html @@ -5,7 +5,7 @@ %sveltekit.head% - PyGrid + Syft UI
%sveltekit.body%
diff --git a/packages/grid/frontend/src/lib/api/syft_api.ts b/packages/grid/frontend/src/lib/api/syft_api.ts index f7ddec743f1..2261bf99455 100644 --- a/packages/grid/frontend/src/lib/api/syft_api.ts +++ b/packages/grid/frontend/src/lib/api/syft_api.ts @@ -7,7 +7,7 @@ const SYFT_MSG_URL = `${API_BASE_URL}/api_call` const FQN = { VERIFY_KEY: "nacl.signing.VerifyKey", - SYFT_VERIFY_KEY: "syft.node.credentials.SyftVerifyKey", + SYFT_VERIFY_KEY: "syft.server.credentials.SyftVerifyKey", UID: "syft.types.uid.UID", SYFT_API_CALL: "syft.client.api.SyftAPICall", SIGNED_SYFT_API_CALL: "syft.client.api.SignedSyftAPICall", @@ -17,7 +17,7 @@ interface SyftAPICall { path: string payload: Record signing_key: string | Uint8Array - node_id: string + server_id: string } const getKeyPair = (signing_key: string | Uint8Array) => @@ -28,12 +28,12 @@ const getKeyPair = (signing_key: string | Uint8Array) => const getSignedMessage = ({ path, payload, - node_id, + server_id, signing_key, }: Omit & { signing_key: Uint8Array }) => { const syftAPIPayload = { path, - node_uid: { value: node_id, fqn: FQN.UID }, + server_uid: { value: server_id, fqn: FQN.UID }, args: [], kwargs: new Map(Object.entries(payload)), fqn: FQN.SYFT_API_CALL, @@ -87,13 +87,13 @@ export const jsSyftCall = async ({ path, payload, signing_key, - node_id, + server_id, }: SyftAPICall): Promise => { const key = getKeyPair(signing_key) const signedMessage = getSignedMessage({ path, payload, - node_id, + server_id, signing_key: key, }) return await send(signedMessage) diff --git a/packages/grid/frontend/src/lib/client/jsPyClassMap.js b/packages/grid/frontend/src/lib/client/jsPyClassMap.js index 91bce7c022d..2cc055b774f 100644 --- a/packages/grid/frontend/src/lib/client/jsPyClassMap.js +++ b/packages/grid/frontend/src/lib/client/jsPyClassMap.js @@ -3,7 +3,7 @@ import { UUID } from "$lib/client/objects/uid.ts" import { APICall } from "$lib/client/messages/syftMessage.ts" export const classMapping = { - "syft.node.credentials.SyftVerifyKey": SyftVerifyKey, + "syft.server.credentials.SyftVerifyKey": SyftVerifyKey, "nacl.signing.VerifyKey": VerifyKey, "syft.types.uid.UID": UUID, "syft.client.api.SyftAPICall": APICall, diff --git a/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte b/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte index c68c8364ead..3baa59e61db 100644 --- a/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte +++ b/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte @@ -25,7 +25,7 @@ } this.userId = window.localStorage.getItem('id'); - this.nodeId = window.localStorage.getItem('nodeId'); + this.serverId = window.localStorage.getItem('serverId'); return this; } @@ -150,7 +150,7 @@ // Create a new object called 'newMetadata' with updated fields and a new property called 'fqn' with a value. const updateMetadata = { ...newMetadata, - fqn: 'syft.service.metadata.node_metadata.NodeMetadataUpdate' + fqn: 'syft.service.metadata.server_metadata.ServerMetadataUpdate' }; // Create a new object called 'reqFields' with one property: 'metadata', which is set to 'updateMetadata'. @@ -249,7 +249,7 @@ * @throws {Error} An error is thrown if the message signature and public key don't match. */ async send(args, kwargs, path) { - const signedCall = new APICall(this.nodeId, path, args, kwargs).sign(this.key, this.serde); + const signedCall = new APICall(this.serverId, path, args, kwargs).sign(this.key, this.serde); try { // Make a POST request to the server with the signed call. diff --git a/packages/grid/frontend/src/lib/client/messages/syftMessage.ts b/packages/grid/frontend/src/lib/client/messages/syftMessage.ts index 1dbb2a82285..24d5dabd56c 100644 --- a/packages/grid/frontend/src/lib/client/messages/syftMessage.ts +++ b/packages/grid/frontend/src/lib/client/messages/syftMessage.ts @@ -30,14 +30,14 @@ export class SignedAPICall { } export class APICall { - node_uid: UUID + server_uid: UUID path: string args: object kwargs: object blocking: boolean constructor(id, path, args, kwargs, blocking = true) { - this.node_uid = new UUID(id) + this.server_uid = new UUID(id) this.path = path this.args = args if (kwargs) { diff --git a/packages/grid/frontend/src/lib/client/objects/key.ts b/packages/grid/frontend/src/lib/client/objects/key.ts index a340db2a9c0..964d0debb5b 100644 --- a/packages/grid/frontend/src/lib/client/objects/key.ts +++ b/packages/grid/frontend/src/lib/client/objects/key.ts @@ -14,6 +14,6 @@ export class SyftVerifyKey { constructor(verify_key: Uint8Array) { this.verify_key = new VerifyKey(verify_key) - this.fqn = "syft.node.credentials.SyftVerifyKey" + this.fqn = "syft.server.credentials.SyftVerifyKey" } } diff --git a/packages/grid/frontend/src/lib/client/objects/userCode.ts b/packages/grid/frontend/src/lib/client/objects/userCode.ts index 733487eb1ed..49e4e96cc6e 100644 --- a/packages/grid/frontend/src/lib/client/objects/userCode.ts +++ b/packages/grid/frontend/src/lib/client/objects/userCode.ts @@ -39,7 +39,7 @@ interface InputObject { class InputPolicy { id: UUID inputs: Map - node_id?: UUID + server_id?: UUID /** * Creates a new instance of InputPolicy. diff --git a/packages/grid/frontend/src/lib/components/AccountSettings.svelte b/packages/grid/frontend/src/lib/components/AccountSettings.svelte index 0b08334b172..9aa88814ef4 100644 --- a/packages/grid/frontend/src/lib/components/AccountSettings.svelte +++ b/packages/grid/frontend/src/lib/components/AccountSettings.svelte @@ -296,8 +296,8 @@

When you delete your user account all information relating to you will be deleted as well as - any permissions and requests. If you are the domain owner the domain node will be deleted as - well and will be closed to all users. To transfer ownership of a domain node before deleting + any permissions and requests. If you are the datasite owner the datasite server will be deleted as + well and will be closed to all users. To transfer ownership of a datasite server before deleting your account you can follow the instructions here.

diff --git a/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte b/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte index 606ebebce17..b4b1da788e0 100644 --- a/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte +++ b/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte @@ -3,7 +3,7 @@ import TableIcon from '$lib/components/icons/TableIcon.svelte'; import Tooltip from '$lib/components/Tooltip.svelte'; import TooltipText from '../TooltipText.svelte'; - import type { Dataset } from '../../../types/domain/dataset'; + import type { Dataset } from '../../../types/datasite/dataset'; export let dataset: Dataset; diff --git a/packages/grid/frontend/src/lib/components/Datasets/DatasetModalNew.svelte b/packages/grid/frontend/src/lib/components/Datasets/DatasetModalNew.svelte index 66587a5fd63..3b0cb81318a 100644 --- a/packages/grid/frontend/src/lib/components/Datasets/DatasetModalNew.svelte +++ b/packages/grid/frontend/src/lib/components/Datasets/DatasetModalNew.svelte @@ -43,10 +43,10 @@ > 2
-

Install HAGrid by running the code below in your Jupyter Notebook

+

-

pip install -U hagrid

+

  • @@ -57,12 +57,10 @@ 3
  • - Once HAGrid is installed open the "Upload Dataset" quickstart tutorial notebook by - running the code below in your Jupyter Notebook.

    -

    hagrid quickstart

    +

    diff --git a/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte b/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte index fe02b87ef03..21977baa9ea 100644 --- a/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte +++ b/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte @@ -1,7 +1,7 @@

    No Datasets Uploaded

    - To begin adding datasets to this domain node please click the "+" button and follow + To begin adding datasets to this datasite server please click the "+" button and follow instructions.

    diff --git a/packages/grid/frontend/src/lib/components/DatasiteOnlineIndicator.svelte b/packages/grid/frontend/src/lib/components/DatasiteOnlineIndicator.svelte new file mode 100644 index 00000000000..b534a481813 --- /dev/null +++ b/packages/grid/frontend/src/lib/components/DatasiteOnlineIndicator.svelte @@ -0,0 +1,13 @@ + + +
    + +
    diff --git a/packages/grid/frontend/src/lib/components/DomainOnlineIndicator.svelte b/packages/grid/frontend/src/lib/components/DomainOnlineIndicator.svelte deleted file mode 100644 index 1232283ac6c..00000000000 --- a/packages/grid/frontend/src/lib/components/DomainOnlineIndicator.svelte +++ /dev/null @@ -1,13 +0,0 @@ - - -
    - -
    diff --git a/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte b/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte index f72f9dddd45..081139b9cd8 100644 --- a/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte +++ b/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte @@ -27,7 +27,7 @@

    Data Owner handbook

    Check out the data owner handbook to learn more tips & tricks about how to manage your - domain node. + datasite server.

    diff --git a/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte b/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte index 9e127766cf3..1a3dc762b4e 100644 --- a/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte +++ b/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte @@ -13,7 +13,7 @@ { href: "", icon: BellIcon, disabled: true }, ] - $: domainInitials = getInitials(metadata.name) + $: datasiteInitials = getInitials(metadata.name) $: userInitials = getInitials(user.name) @@ -21,7 +21,7 @@ class="w-full py-2 px-6 flex items-center justify-between tablet:justify-end shadow-topbar-1 tablet:shadow-none" >
    - +
      {#each links as link} diff --git a/packages/grid/frontend/src/lib/components/OnboardingModal.svelte b/packages/grid/frontend/src/lib/components/OnboardingModal.svelte index 8e381c18630..9f2fe27871f 100644 --- a/packages/grid/frontend/src/lib/components/OnboardingModal.svelte +++ b/packages/grid/frontend/src/lib/components/OnboardingModal.svelte @@ -7,7 +7,7 @@ import Progress from "$lib/components/Progress.svelte" import Input from "$lib/components/Input.svelte" import ButtonGhost from "$lib/components/ButtonGhost.svelte" - import NodeIcon from "$lib/components/icons/NodeIcon.svelte" + import ServerIcon from "$lib/components/icons/ServerIcon.svelte" import CheckIcon from "$lib/components/icons/CheckIcon.svelte" export let metadata @@ -23,15 +23,15 @@ website: "", } - let domainSettings = { + let datasiteSettings = { name: "", description: "", organization: "", on_board: false, } - let checkRequiredDomainFields = () => { - return domainSettings.name !== "" ? true : false + let checkRequiredDatasiteFields = () => { + return datasiteSettings.name !== "" ? true : false } let checkRequiredUserFields = () => { @@ -49,7 +49,7 @@ headers: { "Content-Type": "application/json", }, - body: JSON.stringify(domainSettings), + body: JSON.stringify(datasiteSettings), }) await fetch(`/_syft_api/users/${userSettings.id}`, { @@ -86,7 +86,7 @@ website: "", } - domainSettings = { + datasiteSettings = { name: "", description: "", organization: "", @@ -105,13 +105,13 @@ Welcome to PyGrid

    - Welcome to PyGrid Admin! + Welcome to Syft UI!

    Step 1 of 4

    @@ -125,7 +125,7 @@

    - Congratulations on logging into {metadata?.name ?? ""} node. This wizard + Congratulations on logging into {metadata?.name ?? ""} server. This wizard will help get you started in setting up your user account. You can skip this wizard by pressing “Cancel” below. You can edit any of your responses later by going to "Account Settings" indicated by your avatar in the top @@ -148,10 +148,10 @@

    - +
    -

    Domain Profile

    +

    Datasite Profile

    Step 2 of 4

    @@ -182,28 +182,28 @@

    - Let's begin by describing some basic information about this domain - node. This information will be shown to outside users to help them - find and understand what your domain offers. + Let's begin by describing some basic information about this datasite + server. This information will be shown to outside users to help them + find and understand what your datasite offers.

    @@ -232,7 +232,7 @@

    - Now that we have described our domain, let's update our password and + Now that we have described our datasite, let's update our password and describe some basic information about ourselves for our "User Profile". User profile information will be shown to teammates and collaborators when working on studies together. diff --git a/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte b/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte index 77ebcc3fb70..05c4a8ee18c 100644 --- a/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte +++ b/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte @@ -16,19 +16,19 @@ roleId: 2, title: "Data Scientist", description: - "This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your domain through search and discovery. By default this user can see a list of your datasets and can request to get results.", + "This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your datasite through search and discovery. By default this user can see a list of your datasets and can request to get results.", }, { roleId: 32, title: "Data Owner", description: - "This role is for users on your team who will be responsible for uploading data to the domain.", + "This role is for users on your team who will be responsible for uploading data to the datasite.", }, { roleId: 128, title: "Admin", description: - "This role is for users who will help you manage your node. This should be users you trust as they will be users who will have full permissions to the node.", + "This role is for users who will help you manage your server. This should be users you trust as they will be users who will have full permissions to the server.", }, ] @@ -105,7 +105,7 @@

    To begin let's select the role this user is going to have on your - domain node. + datasite server.

    {#each cardsContent as { title, description, roleId }} @@ -221,8 +221,8 @@ Welcome to {metadata?.name} {name},
    - You are formally invited you to join {metadata?.name} Domain. Below is - your login credentials and the URL to the domain. After logging in you + You are formally invited you to join {metadata?.name} Datasite. Below is + your login credentials and the URL to the datasite. After logging in you will be prompted to customize your account.

    {href} diff --git a/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte b/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte index b5ca6737d6f..ad34edaf1cb 100644 --- a/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte +++ b/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte @@ -1,6 +1,6 @@ + +
    + {#if metadata} +
    +
    + +
    + +
    +

    {metadata.name}

    + {#if metadata.organization} +

    {metadata.organization}

    + {/if} +
    +
    +
    +

    Id#:

    + {metadata.server_id} +
    +
    +

    Side:

    +

    {metadata.server_side}

    +
    +
    +

    Type:

    +

    {metadata.server_type}

    +
    +
    + {/if} +
    diff --git a/packages/grid/frontend/src/lib/components/authentication/DomainMetadataPanel.svelte b/packages/grid/frontend/src/lib/components/authentication/DomainMetadataPanel.svelte deleted file mode 100644 index 50cce5ff0f2..00000000000 --- a/packages/grid/frontend/src/lib/components/authentication/DomainMetadataPanel.svelte +++ /dev/null @@ -1,44 +0,0 @@ - - -
    - {#if metadata} -
    -
    - -
    - -
    -

    {metadata.name}

    - {#if metadata.organization} -

    {metadata.organization}

    - {/if} -
    -
    -
    -

    Id#:

    - {metadata.node_id} -
    -
    -

    Side:

    -

    {metadata.node_side}

    -
    -
    -

    Type:

    -

    {metadata.node_type}

    -
    -
    - {/if} -
    diff --git a/packages/grid/frontend/src/lib/components/authentication/Nav.svelte b/packages/grid/frontend/src/lib/components/authentication/Nav.svelte index da591ae26f7..5b2ed997dfa 100644 --- a/packages/grid/frontend/src/lib/components/authentication/Nav.svelte +++ b/packages/grid/frontend/src/lib/components/authentication/Nav.svelte @@ -9,7 +9,7 @@
    PyGrid logo {#if version} diff --git a/packages/grid/frontend/src/lib/components/icons/NodeIcon.svelte b/packages/grid/frontend/src/lib/components/icons/ServerIcon.svelte similarity index 100% rename from packages/grid/frontend/src/lib/components/icons/NodeIcon.svelte rename to packages/grid/frontend/src/lib/components/icons/ServerIcon.svelte diff --git a/packages/grid/frontend/src/lib/utils.ts b/packages/grid/frontend/src/lib/utils.ts index 2ffed30dec5..cd8dcc89b52 100644 --- a/packages/grid/frontend/src/lib/utils.ts +++ b/packages/grid/frontend/src/lib/utils.ts @@ -1,4 +1,4 @@ -import { ServiceRoles } from "../types/domain/users" +import { ServiceRoles } from "../types/datasite/users" import { COOKIES } from "./constants" import type { CookieSerializeOptions } from "cookie" import type { Cookies } from "@sveltejs/kit" @@ -33,7 +33,7 @@ export function getInitials(name: string) { export function logout() { window.localStorage.removeItem("id") - window.localStorage.removeItem("nodeId") + window.localStorage.removeItem("serverId") window.localStorage.removeItem("key") } @@ -51,7 +51,7 @@ export const default_cookie_config: CookieSerializeOptions = { interface CookieData { uid: string - node_id: string + server_id: string signing_key: string } diff --git a/packages/grid/frontend/src/routes/+page.svelte b/packages/grid/frontend/src/routes/+page.svelte index 3637adb7bd1..da607f84d1b 100644 --- a/packages/grid/frontend/src/routes/+page.svelte +++ b/packages/grid/frontend/src/routes/+page.svelte @@ -15,5 +15,5 @@
    - PyGrid + Syft UI
    diff --git a/packages/grid/frontend/src/routes/[...all]/+page.svelte b/packages/grid/frontend/src/routes/[...all]/+page.svelte index b1bb3b1cb01..0994b12a178 100644 --- a/packages/grid/frontend/src/routes/[...all]/+page.svelte +++ b/packages/grid/frontend/src/routes/[...all]/+page.svelte @@ -1,6 +1,6 @@ + + diff --git a/packages/syft-extras/.archive/examples/chat/requirements.txt b/packages/syft-extras/.archive/examples/chat/requirements.txt new file mode 100644 index 00000000000..235b7870ea2 --- /dev/null +++ b/packages/syft-extras/.archive/examples/chat/requirements.txt @@ -0,0 +1,17 @@ +authlib +db-dtypes +fastapi +google-cloud-bigquery +httpx +itsdangerous +jinja2 +langchain +langchain-core +langchain-experimental +langchain-huggingface +langchain-ollama +langchain-openai +magika +python-jose +python-multipart +uvicorn diff --git a/packages/syft-extras/.archive/examples/chat/run.pid b/packages/syft-extras/.archive/examples/chat/run.pid new file mode 100644 index 00000000000..5b95659cc50 --- /dev/null +++ b/packages/syft-extras/.archive/examples/chat/run.pid @@ -0,0 +1 @@ +78005 \ No newline at end of file diff --git a/packages/syft-extras/.archive/examples/chat/run.sh b/packages/syft-extras/.archive/examples/chat/run.sh new file mode 100755 index 00000000000..a719517d28d --- /dev/null +++ b/packages/syft-extras/.archive/examples/chat/run.sh @@ -0,0 +1,4 @@ +#!/bin/sh +uv venv +uv pip install -r requirements.txt +uv run main.py diff --git a/packages/syft-extras/.archive/examples/fedhr/_.syftperm b/packages/syft-extras/.archive/examples/fedhr/_.syftperm new file mode 100644 index 00000000000..e63bf5cd23d --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/_.syftperm @@ -0,0 +1 @@ +{"admin": ["madhava@openmined.org"], "read": ["madhava@openmined.org"], "write": ["madhava@openmined.org"], "terminal": false} \ No newline at end of file diff --git a/packages/syft-extras/.archive/examples/fedhr/data/data.py b/packages/syft-extras/.archive/examples/fedhr/data/data.py new file mode 100644 index 00000000000..8b805130e86 --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/data/data.py @@ -0,0 +1,73 @@ +from fastapi.responses import JSONResponse +from pathlib import Path +import pandas as pd + +current_dir = Path(__file__).parent + + +async def handler(request, form_data=None): + csv_file_path = current_dir / ".." / "hr.csv" + df = pd.read_csv(csv_file_path, header=None, names=["timestamp", "value"]) + + # Convert the timestamp column to datetime + df["timestamp"] = pd.to_datetime(df["timestamp"], errors="coerce") + + # Filter out any rows where the timestamp could not be parsed + df = df.dropna(subset=["timestamp"]) + + # Convert the value column to numeric, coercing errors to NaN + df["value"] = pd.to_numeric(df["value"], errors="coerce") + + # Drop rows where the value could not be converted to a number or is missing + df = df.dropna(subset=["value"]) + + # Check if the timestamp column is already timezone-aware + if df["timestamp"].dt.tz is None: + # Make it timezone-aware if not + df["timestamp"] = df["timestamp"].dt.tz_localize("UTC") + else: + # Convert to UTC if it is already timezone-aware + df["timestamp"] = df["timestamp"].dt.tz_convert("UTC") + + # Set the timestamp as the index + df.set_index("timestamp", inplace=True) + + # Resample the data to 1-minute intervals, averaging the values + resampled_df = df.resample("1T").mean() + + # Filter out rows with NaN values after resampling + resampled_df = resampled_df.dropna() + + # Get the current time as a timezone-aware datetime + now_utc = pd.Timestamp.now(tz="UTC") + + # Keep only the last 48 hours of data + recent = resampled_df.loc[now_utc - pd.Timedelta(hours=12) :] + + # Reset the index to have timestamp as a column + recent.reset_index(inplace=True) + + # Convert the timestamp to a string in ISO format + recent["timestamp"] = recent["timestamp"].astype(str) + + # Convert the DataFrame to a JSON format + json_output = recent.to_dict(orient="records") + + # Return JSON response + return JSONResponse(content=json_output) + + +# data = [ +# {"timestamp": "2024-11-04 12:18:00", "value": 91.0}, +# {"timestamp": "2024-11-04 12:22:00", "value": 88.0}, +# {"timestamp": "2024-11-04 12:26:55", "value": 66.0}, +# {"timestamp": "2024-11-04 12:28:55", "value": 98.5}, +# {"timestamp": "2024-11-04 12:29:55", "value": 100.98}, +# {"timestamp": "2024-11-04 12:30:55", "value": 107.5}, +# {"timestamp": "2024-11-04 12:31:55", "value": 104.0}, +# {"timestamp": "2024-11-04 12:33:55", "value": 93.0}, +# {"timestamp": "2024-11-04 12:35:00", "value": 90.0}, +# {"timestamp": "2024-11-04 12:41:00", "value": 93.0}, +# {"timestamp": "2024-11-04 12:48:31", "value": 90.0}, +# {"timestamp": "2024-11-04 12:52:00", "value": 73.0}, +# ] diff --git a/packages/syft-extras/.archive/examples/fedhr/data/routes.yaml b/packages/syft-extras/.archive/examples/fedhr/data/routes.yaml new file mode 100644 index 00000000000..8ff45405743 --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/data/routes.yaml @@ -0,0 +1,5 @@ +routes: + read: + file: data.py + methods: + GET: {} diff --git a/packages/syft-extras/.archive/examples/fedhr/healthkit_importer.py b/packages/syft-extras/.archive/examples/fedhr/healthkit_importer.py new file mode 100644 index 00000000000..000bcdd1b2a --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/healthkit_importer.py @@ -0,0 +1,107 @@ +import json +import os +from datetime import datetime +from pathlib import Path + +import pandas as pd +from fastapi.responses import HTMLResponse, JSONResponse + +current_dir = Path(__file__).parent + + +async def get_page(): + with open(current_dir / "page.html") as f: + return f.read() + + +async def handler(request, form_data=None): + request_type = request.method + headers = dict(request.headers) + + response = {} + if request_type == "GET": + response = await get_page() + return HTMLResponse(response) + elif request_type == "POST": + try: + response = await handle_post(request) + except Exception as e: + print("Error handling POST request:", e) + response = {"error": f"Bad request: {e}", "cookies": request.cookies} + else: + response = {"error": f"unexpected method: {request.method}"} + + log_data = f"Timestamp: {datetime.now()}\n" + log_data += f"Request Type: {request.method}\n" + log_data += "Headers:\n" + for header, value in headers.items(): + log_data += f" {header}: {value}\n" + + log_data += "Body:\n" + body = await request.body() + if isinstance(body, bytes): + body = body.decode("utf-8") + else: + body = str(body) # Convert to string if it's not bytes + log_data += body + "\n" + + # Write log to a file + with open(current_dir / "request_log.txt", "a") as log_file: + log_file.write(log_data + "\n" + str(response) + "-" * 50 + "\n") + return JSONResponse(response) + + +async def handle_post(request): + csv_file_path = current_dir / "hr.csv" + + # Read the body and check if it's JSON + try: + body = await request.json() + + # Check if the JSON has the structure for metrics data + if "data" in body and "metrics" in body["data"]: + metrics_data = body["data"]["metrics"][0]["data"] + + # Filter data to only keep "date" and "Avg" (renamed to "avg") + filtered_data = [ + {"date": entry["date"], "avg": entry["Avg"]} + for entry in metrics_data + if "date" in entry and "Avg" in entry + ] + + # Convert to DataFrame + new_df = pd.DataFrame(filtered_data) + + # Read existing CSV file if it exists + if os.path.exists(csv_file_path): + existing_df = pd.read_csv(csv_file_path) + # Append new data and remove duplicates + combined_df = pd.concat([existing_df, new_df]).drop_duplicates( + subset=["date"] + ) + else: + combined_df = new_df + + # Ensure the date column has a space before the timezone for consistent formatting + combined_df["date"] = combined_df["date"].str.replace( + r"(\d{2}:\d{2}:\d{2})(\+\d{4})", r"\1 \2", regex=True + ) + + # Convert the date column to datetime, allowing for mixed formats + combined_df["date"] = pd.to_datetime(combined_df["date"], format="mixed") + + # Sort the combined DataFrame by the "date" column + combined_df = combined_df.sort_values(by="date") + + # Save the combined DataFrame back to the CSV + combined_df.to_csv(csv_file_path, index=False) + + # Add a message indicating that data was saved + response = { + "message": "Filtered, sorted, and appended data saved to metrics_data.csv" + } + + except (json.JSONDecodeError, ValueError) as e: + response = {"error": f"Error decoding or processing data: {e}"} + + return response diff --git a/packages/syft-extras/.archive/examples/fedhr/hr.csv b/packages/syft-extras/.archive/examples/fedhr/hr.csv new file mode 100644 index 00000000000..987f4294035 --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/hr.csv @@ -0,0 +1,3890 @@ +date,avg +2024-10-29 00:03:00+10:00,63.0 +2024-10-29 00:03:00+10:00,63.0 +2024-10-29 00:09:00+10:00,60.0 +2024-10-29 00:09:00+10:00,60.0 +2024-10-29 00:11:00+10:00,64.0 +2024-10-29 00:11:00+10:00,64.0 +2024-10-29 00:17:00+10:00,55.58563232421875 +2024-10-29 00:17:00+10:00,55.58563232421875 +2024-10-29 00:19:00+10:00,54.0 +2024-10-29 00:19:00+10:00,54.0 +2024-10-29 00:21:00+10:00,54.0 +2024-10-29 00:21:00+10:00,54.0 +2024-10-29 00:27:00+10:00,53.0 +2024-10-29 00:27:00+10:00,53.0 +2024-10-29 00:33:00+10:00,53.0 +2024-10-29 00:33:00+10:00,53.0 +2024-10-29 00:35:00+10:00,53.0 +2024-10-29 00:35:00+10:00,53.0 +2024-10-29 00:38:00+10:00,52.0 +2024-10-29 00:38:00+10:00,52.0 +2024-10-29 00:44:00+10:00,53.0 +2024-10-29 00:44:00+10:00,53.0 +2024-10-29 00:45:00+10:00,52.0 +2024-10-29 00:45:00+10:00,52.0 +2024-10-29 00:51:00+10:00,49.0 +2024-10-29 00:51:00+10:00,49.0 +2024-10-29 00:57:00+10:00,49.0 +2024-10-29 00:57:00+10:00,49.0 +2024-10-29 01:02:00+10:00,53.0 +2024-10-29 01:02:00+10:00,53.0 +2024-10-29 01:03:00+10:00,49.0 +2024-10-29 01:03:00+10:00,49.0 +2024-10-29 01:08:00+10:00,52.0 +2024-10-29 01:08:00+10:00,52.0 +2024-10-29 01:12:00+10:00,49.0 +2024-10-29 01:12:00+10:00,49.0 +2024-10-29 01:19:00+10:00,48.0 +2024-10-29 01:19:00+10:00,48.0 +2024-10-29 01:23:00+10:00,50.0 +2024-10-29 01:23:00+10:00,50.0 +2024-10-29 01:26:00+10:00,50.0 +2024-10-29 01:26:00+10:00,50.0 +2024-10-29 01:32:00+10:00,50.0 +2024-10-29 01:32:00+10:00,50.0 +2024-10-29 01:33:00+10:00,52.0 +2024-10-29 01:33:00+10:00,52.0 +2024-10-29 01:35:00+10:00,51.888980865478516 +2024-10-29 01:35:00+10:00,51.88898086547851 +2024-10-29 01:38:00+10:00,52.0 +2024-10-29 01:38:00+10:00,52.0 +2024-10-29 01:42:00+10:00,52.0 +2024-10-29 01:42:00+10:00,52.0 +2024-10-29 01:50:00+10:00,53.0 +2024-10-29 01:50:00+10:00,53.0 +2024-10-29 01:54:00+10:00,54.0 +2024-10-29 01:54:00+10:00,54.0 +2024-10-29 01:56:00+10:00,55.0 +2024-10-29 01:56:00+10:00,55.0 +2024-10-29 02:03:00+10:00,63.0 +2024-10-29 02:03:00+10:00,63.0 +2024-10-29 02:04:00+10:00,64.0 +2024-10-29 02:04:00+10:00,64.0 +2024-10-29 02:08:00+10:00,63.0 +2024-10-29 02:08:00+10:00,63.0 +2024-10-29 02:10:00+10:00,63.0 +2024-10-29 02:10:00+10:00,63.0 +2024-10-29 02:16:00+10:00,62.00000000000001 +2024-10-29 02:16:00+10:00,62.00000000000001 +2024-10-29 02:20:00+10:00,62.00000000000001 +2024-10-29 02:20:00+10:00,62.00000000000001 +2024-10-29 02:25:00+10:00,57.0 +2024-10-29 02:25:00+10:00,57.0 +2024-10-29 02:30:00+10:00,56.0 +2024-10-29 02:30:00+10:00,56.0 +2024-10-29 02:33:00+10:00,57.0 +2024-10-29 02:33:00+10:00,57.0 +2024-10-29 02:36:00+10:00,55.0 +2024-10-29 02:36:00+10:00,55.0 +2024-10-29 02:42:00+10:00,53.0 +2024-10-29 02:42:00+10:00,53.0 +2024-10-29 02:47:00+10:00,50.0 +2024-10-29 02:47:00+10:00,50.0 +2024-10-29 02:55:00+10:00,52.0 +2024-10-29 02:55:00+10:00,52.0 +2024-10-29 02:58:00+10:00,52.0 +2024-10-29 02:58:00+10:00,52.0 +2024-10-29 03:00:00+10:00,53.0 +2024-10-29 03:00:00+10:00,53.0 +2024-10-29 03:03:00+10:00,52.0 +2024-10-29 03:03:00+10:00,52.0 +2024-10-29 03:05:00+10:00,52.0 +2024-10-29 03:05:00+10:00,52.0 +2024-10-29 03:15:00+10:00,53.0 +2024-10-29 03:15:00+10:00,53.0 +2024-10-29 03:18:00+10:00,53.0 +2024-10-29 03:18:00+10:00,53.0 +2024-10-29 03:21:00+10:00,53.0 +2024-10-29 03:21:00+10:00,53.0 +2024-10-29 03:27:00+10:00,55.0 +2024-10-29 03:27:00+10:00,55.0 +2024-10-29 03:31:00+10:00,55.0 +2024-10-29 03:31:00+10:00,55.0 +2024-10-29 03:33:00+10:00,51.0 +2024-10-29 03:33:00+10:00,51.0 +2024-10-29 03:35:00+10:00,54.01382827758789 +2024-10-29 03:35:00+10:00,54.01382827758789 +2024-10-29 03:39:00+10:00,53.0 +2024-10-29 03:39:00+10:00,53.0 +2024-10-29 03:42:00+10:00,61.0 +2024-10-29 03:42:00+10:00,61.0 +2024-10-29 03:48:00+10:00,63.0 +2024-10-29 03:48:00+10:00,63.0 +2024-10-29 03:52:00+10:00,63.0 +2024-10-29 03:52:00+10:00,63.0 +2024-10-29 03:59:00+10:00,63.0 +2024-10-29 03:59:00+10:00,63.0 +2024-10-29 04:03:00+10:00,60.0 +2024-10-29 04:03:00+10:00,60.0 +2024-10-29 04:04:00+10:00,58.0 +2024-10-29 04:04:00+10:00,58.0 +2024-10-29 04:06:00+10:00,66.0 +2024-10-29 04:06:00+10:00,66.0 +2024-10-29 04:11:00+10:00,60.0 +2024-10-29 04:11:00+10:00,60.0 +2024-10-29 04:16:00+10:00,58.0 +2024-10-29 04:16:00+10:00,58.0 +2024-10-29 04:20:00+10:00,52.0 +2024-10-29 04:20:00+10:00,52.0 +2024-10-29 04:30:00+10:00,52.0 +2024-10-29 04:30:00+10:00,52.0 +2024-10-29 04:31:00+10:00,52.0 +2024-10-29 04:31:00+10:00,52.0 +2024-10-29 04:39:00+10:00,53.0 +2024-10-29 04:39:00+10:00,53.0 +2024-10-29 04:42:00+10:00,53.0 +2024-10-29 04:42:00+10:00,53.0 +2024-10-29 04:46:00+10:00,52.0 +2024-10-29 04:46:00+10:00,52.0 +2024-10-29 04:54:00+10:00,52.0 +2024-10-29 04:54:00+10:00,52.0 +2024-10-29 04:56:00+10:00,51.0 +2024-10-29 04:56:00+10:00,51.0 +2024-10-29 05:00:00+10:00,49.0 +2024-10-29 05:00:00+10:00,49.0 +2024-10-29 05:06:00+10:00,52.0 +2024-10-29 05:06:00+10:00,52.0 +2024-10-29 05:08:00+10:00,53.0 +2024-10-29 05:08:00+10:00,53.0 +2024-10-29 05:10:00+10:00,69.0 +2024-10-29 05:10:00+10:00,69.0 +2024-10-29 05:19:00+10:00,65.0 +2024-10-29 05:19:00+10:00,65.0 +2024-10-29 05:20:00+10:00,60.0 +2024-10-29 05:20:00+10:00,60.0 +2024-10-29 05:27:00+10:00,58.0 +2024-10-29 05:27:00+10:00,58.0 +2024-10-29 05:33:00+10:00,61.0 +2024-10-29 05:33:00+10:00,61.0 +2024-10-29 05:35:00+10:00,57.186943054199226 +2024-10-29 05:35:00+10:00,57.186943054199226 +2024-10-29 05:36:00+10:00,54.0 +2024-10-29 05:36:00+10:00,54.0 +2024-10-29 05:37:00+10:00,56.0 +2024-10-29 05:37:00+10:00,56.0 +2024-10-29 05:41:00+10:00,55.0 +2024-10-29 05:41:00+10:00,55.0 +2024-10-29 05:48:00+10:00,55.0 +2024-10-29 05:48:00+10:00,55.0 +2024-10-29 05:53:00+10:00,53.0 +2024-10-29 05:53:00+10:00,53.0 +2024-10-29 05:58:00+10:00,52.0 +2024-10-29 05:58:00+10:00,52.0 +2024-10-29 06:00:00+10:00,53.0 +2024-10-29 06:00:00+10:00,53.0 +2024-10-29 06:06:00+10:00,72.0 +2024-10-29 06:06:00+10:00,72.0 +2024-10-29 06:09:00+10:00,52.0 +2024-10-29 06:09:00+10:00,52.0 +2024-10-29 06:14:00+10:00,52.0 +2024-10-29 06:14:00+10:00,52.0 +2024-10-29 06:15:00+10:00,52.0 +2024-10-29 06:15:00+10:00,52.0 +2024-10-29 06:24:00+10:00,52.0 +2024-10-29 06:24:00+10:00,52.0 +2024-10-29 06:25:00+10:00,52.0 +2024-10-29 06:25:00+10:00,52.0 +2024-10-29 06:30:00+10:00,52.0 +2024-10-29 06:30:00+10:00,52.0 +2024-10-29 06:36:00+10:00,52.0 +2024-10-29 06:36:00+10:00,52.0 +2024-10-29 06:37:00+10:00,53.0 +2024-10-29 06:37:00+10:00,53.0 +2024-10-29 06:40:00+10:00,53.0 +2024-10-29 06:40:00+10:00,53.0 +2024-10-29 07:29:00+10:00,56.0 +2024-10-29 07:29:00+10:00,56.0 +2024-10-29 07:31:00+10:00,57.0 +2024-10-29 07:31:00+10:00,57.0 +2024-10-29 07:38:00+10:00,58.5 +2024-10-29 07:38:00+10:00,58.5 +2024-10-29 07:44:00+10:00,56.0 +2024-10-29 07:44:00+10:00,56.0 +2024-10-29 07:48:00+10:00,57.0 +2024-10-29 07:48:00+10:00,57.0 +2024-10-29 07:51:00+10:00,89.0 +2024-10-29 07:51:00+10:00,89.0 +2024-10-29 08:05:00+10:00,92.00000000000001 +2024-10-29 08:05:00+10:00,92.0 +2024-10-29 08:12:00+10:00,94.0 +2024-10-29 08:12:00+10:00,94.0 +2024-10-29 08:20:00+10:00,86.0 +2024-10-29 08:20:00+10:00,86.0 +2024-10-29 08:27:00+10:00,84.0 +2024-10-29 08:27:00+10:00,84.0 +2024-10-29 08:35:00+10:00,65.0 +2024-10-29 08:35:00+10:00,65.0 +2024-10-29 08:37:00+10:00,76.0 +2024-10-29 08:37:00+10:00,76.0 +2024-10-29 08:43:00+10:00,70.0 +2024-10-29 08:43:00+10:00,70.0 +2024-10-29 08:54:00+10:00,74.0 +2024-10-29 08:54:00+10:00,74.0 +2024-10-29 08:57:00+10:00,65.0 +2024-10-29 08:57:00+10:00,65.0 +2024-10-29 09:00:00+10:00,72.0 +2024-10-29 09:00:00+10:00,72.0 +2024-10-29 09:07:00+10:00,63.0 +2024-10-29 09:07:00+10:00,63.0 +2024-10-29 09:11:00+10:00,74.0 +2024-10-29 09:11:00+10:00,74.0 +2024-10-29 09:16:00+10:00,69.0 +2024-10-29 09:16:00+10:00,69.0 +2024-10-29 09:21:00+10:00,66.0 +2024-10-29 09:21:00+10:00,66.0 +2024-10-29 09:25:00+10:00,58.0 +2024-10-29 09:25:00+10:00,58.0 +2024-10-29 09:30:00+10:00,59.0 +2024-10-29 09:30:00+10:00,59.0 +2024-10-29 09:39:00+10:00,65.0 +2024-10-29 09:39:00+10:00,65.0 +2024-10-29 09:40:00+10:00,59.9101036381781 +2024-10-29 09:40:00+10:00,59.9101036381781 +2024-10-29 09:49:00+10:00,65.0 +2024-10-29 09:49:00+10:00,65.0 +2024-10-29 09:54:00+10:00,59.0 +2024-10-29 09:54:00+10:00,59.0 +2024-10-29 09:57:00+10:00,59.0 +2024-10-29 09:57:00+10:00,59.0 +2024-10-29 10:01:00+10:00,59.0 +2024-10-29 10:01:00+10:00,59.0 +2024-10-29 10:09:00+10:00,71.0 +2024-10-29 10:09:00+10:00,71.0 +2024-10-29 10:11:00+10:00,67.0 +2024-10-29 10:11:00+10:00,67.0 +2024-10-29 10:14:00+10:00,102.0 +2024-10-29 10:14:00+10:00,102.0 +2024-10-29 10:15:00+10:00,93.6091953994088 +2024-10-29 10:15:00+10:00,93.60919539940879 +2024-10-29 10:16:00+10:00,96.0 +2024-10-29 10:16:00+10:00,96.0 +2024-10-29 10:21:00+10:00,77.34426229508198 +2024-10-29 10:21:00+10:00,77.34426229508198 +2024-10-29 10:22:00+10:00,73.62676056338029 +2024-10-29 10:22:00+10:00,73.62676056338029 +2024-10-29 10:24:00+10:00,74.0 +2024-10-29 10:24:00+10:00,74.0 +2024-10-29 10:26:00+10:00,67.0 +2024-10-29 10:26:00+10:00,67.0 +2024-10-29 10:28:00+10:00,107.0 +2024-10-29 10:28:00+10:00,107.0 +2024-10-29 10:29:00+10:00,84.78089887640449 +2024-10-29 10:29:00+10:00,84.78089887640449 +2024-10-29 10:30:00+10:00,87.8622828784119 +2024-10-29 10:30:00+10:00,87.8622828784119 +2024-10-29 10:31:00+10:00,99.4659090909091 +2024-10-29 10:31:00+10:00,99.4659090909091 +2024-10-29 10:32:00+10:00,79.2 +2024-10-29 10:32:00+10:00,79.2 +2024-10-29 10:33:00+10:00,80.21428571428572 +2024-10-29 10:33:00+10:00,80.21428571428572 +2024-10-29 10:39:00+10:00,66.0 +2024-10-29 10:39:00+10:00,66.0 +2024-10-29 10:40:00+10:00,65.0 +2024-10-29 10:40:00+10:00,65.0 +2024-10-29 10:48:00+10:00,68.0 +2024-10-29 10:48:00+10:00,68.0 +2024-10-29 10:59:00+10:00,59.0 +2024-10-29 10:59:00+10:00,59.0 +2024-10-29 11:00:00+10:00,66.89412689208984 +2024-10-29 11:00:00+10:00,66.89412689208984 +2024-10-29 11:04:00+10:00,60.0 +2024-10-29 11:04:00+10:00,60.0 +2024-10-29 11:10:00+10:00,56.0 +2024-10-29 11:10:00+10:00,56.0 +2024-10-29 11:13:00+10:00,59.0 +2024-10-29 11:13:00+10:00,59.0 +2024-10-29 11:15:00+10:00,60.0 +2024-10-29 11:15:00+10:00,60.0 +2024-10-29 11:24:00+10:00,62.00000000000001 +2024-10-29 11:24:00+10:00,62.00000000000001 +2024-10-29 11:29:00+10:00,63.0 +2024-10-29 11:29:00+10:00,63.0 +2024-10-29 11:33:00+10:00,58.0 +2024-10-29 11:33:00+10:00,58.0 +2024-10-29 11:39:00+10:00,58.0 +2024-10-29 11:39:00+10:00,58.0 +2024-10-29 11:42:00+10:00,57.0 +2024-10-29 11:42:00+10:00,57.0 +2024-10-29 11:45:00+10:00,56.0 +2024-10-29 11:45:00+10:00,56.0 +2024-10-29 11:51:00+10:00,56.0 +2024-10-29 11:51:00+10:00,56.0 +2024-10-29 11:57:00+10:00,77.00000000000001 +2024-10-29 11:57:00+10:00,77.00000000000001 +2024-10-29 12:03:00+10:00,64.0 +2024-10-29 12:03:00+10:00,64.0 +2024-10-29 12:07:00+10:00,80.0 +2024-10-29 12:07:00+10:00,80.0 +2024-10-29 12:16:00+10:00,59.0 +2024-10-29 12:16:00+10:00,59.0 +2024-10-29 12:23:00+10:00,63.0 +2024-10-29 12:23:00+10:00,63.0 +2024-10-29 12:30:00+10:00,65.0 +2024-10-29 12:30:00+10:00,65.0 +2024-10-29 12:33:00+10:00,63.0 +2024-10-29 12:33:00+10:00,63.0 +2024-10-29 12:40:00+10:00,65.0 +2024-10-29 12:40:00+10:00,65.0 +2024-10-29 12:42:00+10:00,66.0 +2024-10-29 12:42:00+10:00,66.0 +2024-10-29 12:47:00+10:00,64.0 +2024-10-29 12:47:00+10:00,64.0 +2024-10-29 12:50:00+10:00,68.0 +2024-10-29 12:50:00+10:00,68.0 +2024-10-29 12:59:00+10:00,63.0 +2024-10-29 12:59:00+10:00,63.0 +2024-10-29 13:00:00+10:00,65.0 +2024-10-29 13:00:00+10:00,65.0 +2024-10-29 13:05:00+10:00,66.0 +2024-10-29 13:05:00+10:00,66.0 +2024-10-29 13:14:00+10:00,60.0 +2024-10-29 13:14:00+10:00,60.0 +2024-10-29 13:19:00+10:00,60.0 +2024-10-29 13:19:00+10:00,60.0 +2024-10-29 13:20:00+10:00,61.0 +2024-10-29 13:20:00+10:00,61.0 +2024-10-29 13:25:00+10:00,59.0 +2024-10-29 13:25:00+10:00,59.0 +2024-10-29 13:32:00+10:00,61.0 +2024-10-29 13:32:00+10:00,61.0 +2024-10-29 13:37:00+10:00,64.0 +2024-10-29 13:37:00+10:00,64.0 +2024-10-29 13:44:00+10:00,59.0 +2024-10-29 13:44:00+10:00,59.0 +2024-10-29 13:47:00+10:00,59.0 +2024-10-29 13:47:00+10:00,59.0 +2024-10-29 13:52:00+10:00,81.0 +2024-10-29 13:52:00+10:00,81.0 +2024-10-29 13:57:00+10:00,78.0 +2024-10-29 13:57:00+10:00,78.0 +2024-10-29 14:00:00+10:00,90.0 +2024-10-29 14:00:00+10:00,90.0 +2024-10-29 14:09:00+10:00,84.0 +2024-10-29 14:09:00+10:00,84.0 +2024-10-29 14:12:00+10:00,87.0 +2024-10-29 14:12:00+10:00,87.0 +2024-10-29 14:15:00+10:00,88.0 +2024-10-29 14:15:00+10:00,88.0 +2024-10-29 14:23:00+10:00,63.0 +2024-10-29 14:23:00+10:00,63.0 +2024-10-29 14:27:00+10:00,67.0 +2024-10-29 14:27:00+10:00,67.0 +2024-10-29 14:31:00+10:00,65.0 +2024-10-29 14:31:00+10:00,65.0 +2024-10-29 14:39:00+10:00,64.0 +2024-10-29 14:39:00+10:00,64.0 +2024-10-29 14:41:00+10:00,64.0 +2024-10-29 14:41:00+10:00,64.0 +2024-10-29 14:46:00+10:00,66.23700714111328 +2024-10-29 14:46:00+10:00,66.23700714111328 +2024-10-29 14:47:00+10:00,66.0 +2024-10-29 14:47:00+10:00,66.0 +2024-10-29 14:51:00+10:00,74.0 +2024-10-29 14:51:00+10:00,74.0 +2024-10-29 14:59:00+10:00,64.0 +2024-10-29 14:59:00+10:00,64.0 +2024-10-29 15:00:00+10:00,73.0 +2024-10-29 15:00:00+10:00,73.0 +2024-10-29 15:06:00+10:00,66.0 +2024-10-29 15:06:00+10:00,66.0 +2024-10-29 15:12:00+10:00,65.0 +2024-10-29 15:12:00+10:00,65.0 +2024-10-29 15:20:00+10:00,60.0 +2024-10-29 15:20:00+10:00,60.0 +2024-10-29 15:23:00+10:00,71.0 +2024-10-29 15:23:00+10:00,71.0 +2024-10-29 15:30:00+10:00,64.0 +2024-10-29 15:30:00+10:00,64.0 +2024-10-29 15:35:00+10:00,63.0 +2024-10-29 15:35:00+10:00,63.0 +2024-10-29 15:39:00+10:00,61.777677536010735 +2024-10-29 15:39:00+10:00,61.777677536010735 +2024-10-29 15:43:00+10:00,60.0 +2024-10-29 15:43:00+10:00,60.0 +2024-10-29 15:47:00+10:00,63.0 +2024-10-29 15:47:00+10:00,63.0 +2024-10-29 15:48:00+10:00,65.0 +2024-10-29 15:48:00+10:00,65.0 +2024-10-29 15:53:00+10:00,62.00000000000001 +2024-10-29 15:53:00+10:00,62.00000000000001 +2024-10-29 15:57:00+10:00,64.0 +2024-10-29 15:57:00+10:00,64.0 +2024-10-29 16:04:00+10:00,60.0 +2024-10-29 16:04:00+10:00,60.0 +2024-10-29 16:07:00+10:00,65.0 +2024-10-29 16:07:00+10:00,65.0 +2024-10-29 16:11:00+10:00,66.0 +2024-10-29 16:11:00+10:00,66.0 +2024-10-29 16:17:00+10:00,66.0 +2024-10-29 16:17:00+10:00,66.0 +2024-10-29 16:21:00+10:00,59.0 +2024-10-29 16:21:00+10:00,59.0 +2024-10-29 16:27:00+10:00,92.375 +2024-10-29 16:27:00+10:00,92.375 +2024-10-29 16:34:00+10:00,79.0 +2024-10-29 16:34:00+10:00,79.0 +2024-10-29 16:39:00+10:00,66.0 +2024-10-29 16:39:00+10:00,66.0 +2024-10-29 16:45:00+10:00,99.44705882563308 +2024-10-29 16:45:00+10:00,99.44705882563309 +2024-10-29 16:51:00+10:00,94.88194444444444 +2024-10-29 16:51:00+10:00,94.88194444444444 +2024-10-29 16:52:00+10:00,91.0 +2024-10-29 16:52:00+10:00,91.0 +2024-10-29 17:00:00+10:00,102.0 +2024-10-29 17:00:00+10:00,102.0 +2024-10-29 17:02:00+10:00,95.0 +2024-10-29 17:02:00+10:00,95.0 +2024-10-29 17:34:00+10:00,79.0 +2024-10-29 17:34:00+10:00,79.0 +2024-10-29 17:39:00+10:00,71.0 +2024-10-29 17:39:00+10:00,71.0 +2024-10-29 17:40:00+10:00,70.0 +2024-10-29 17:40:00+10:00,70.0 +2024-10-29 17:47:00+10:00,66.0 +2024-10-29 17:47:00+10:00,66.0 +2024-10-29 17:50:00+10:00,75.76883697509766 +2024-10-29 17:50:00+10:00,75.76883697509766 +2024-10-29 17:52:00+10:00,67.0 +2024-10-29 17:52:00+10:00,67.0 +2024-10-29 17:59:00+10:00,72.0 +2024-10-29 17:59:00+10:00,72.0 +2024-10-29 18:02:00+10:00,64.0 +2024-10-29 18:02:00+10:00,64.0 +2024-10-29 18:07:00+10:00,67.0 +2024-10-29 18:07:00+10:00,67.0 +2024-10-29 18:10:00+10:00,72.0 +2024-10-29 18:10:00+10:00,72.0 +2024-10-29 18:18:00+10:00,72.0 +2024-10-29 18:18:00+10:00,72.0 +2024-10-29 18:24:00+10:00,74.0 +2024-10-29 18:24:00+10:00,74.0 +2024-10-29 18:26:00+10:00,72.0 +2024-10-29 18:26:00+10:00,72.0 +2024-10-29 18:33:00+10:00,71.0 +2024-10-29 18:33:00+10:00,71.0 +2024-10-29 18:37:00+10:00,76.0 +2024-10-29 18:37:00+10:00,76.0 +2024-10-29 18:41:00+10:00,83.0 +2024-10-29 18:41:00+10:00,83.0 +2024-10-29 18:47:00+10:00,89.0 +2024-10-29 18:47:00+10:00,89.0 +2024-10-29 18:52:00+10:00,76.0 +2024-10-29 18:52:00+10:00,76.0 +2024-10-29 18:55:00+10:00,89.0 +2024-10-29 18:55:00+10:00,89.0 +2024-10-29 19:00:00+10:00,94.0 +2024-10-29 19:00:00+10:00,94.0 +2024-10-29 19:07:00+10:00,90.0 +2024-10-29 19:07:00+10:00,90.0 +2024-10-29 19:19:00+10:00,77.00000000000001 +2024-10-29 19:19:00+10:00,77.00000000000001 +2024-10-29 19:21:00+10:00,75.0 +2024-10-29 19:21:00+10:00,75.0 +2024-10-29 19:26:00+10:00,80.0 +2024-10-29 19:26:00+10:00,80.0 +2024-10-29 19:30:00+10:00,74.0 +2024-10-29 19:30:00+10:00,74.0 +2024-10-29 19:32:00+10:00,73.0 +2024-10-29 19:32:00+10:00,73.0 +2024-10-29 19:35:00+10:00,67.0 +2024-10-29 19:35:00+10:00,67.0 +2024-10-29 19:38:00+10:00,72.64804077148439 +2024-10-29 19:38:00+10:00,72.64804077148439 +2024-10-29 19:40:00+10:00,70.0 +2024-10-29 19:40:00+10:00,70.0 +2024-10-29 19:49:00+10:00,63.0 +2024-10-29 19:49:00+10:00,63.0 +2024-10-29 19:50:00+10:00,63.0 +2024-10-29 19:50:00+10:00,63.0 +2024-10-29 19:55:00+10:00,67.0 +2024-10-29 19:55:00+10:00,67.0 +2024-10-29 20:03:00+10:00,60.0 +2024-10-29 20:03:00+10:00,60.0 +2024-10-29 20:05:00+10:00,65.0 +2024-10-29 20:05:00+10:00,65.0 +2024-10-29 20:13:00+10:00,64.0 +2024-10-29 20:13:00+10:00,64.0 +2024-10-29 20:19:00+10:00,59.0 +2024-10-29 20:19:00+10:00,59.0 +2024-10-29 20:22:00+10:00,58.0 +2024-10-29 20:22:00+10:00,58.0 +2024-10-29 20:25:00+10:00,60.0 +2024-10-29 20:25:00+10:00,60.0 +2024-10-29 20:30:00+10:00,61.0 +2024-10-29 20:30:00+10:00,61.0 +2024-10-29 20:37:00+10:00,59.0 +2024-10-29 20:37:00+10:00,59.0 +2024-10-29 20:40:00+10:00,53.0 +2024-10-29 20:40:00+10:00,53.0 +2024-10-29 20:45:00+10:00,53.0 +2024-10-29 20:45:00+10:00,53.0 +2024-10-29 20:52:00+10:00,75.0 +2024-10-29 20:52:00+10:00,75.0 +2024-10-29 21:47:00+10:00,60.042831420898445 +2024-10-29 21:47:00+10:00,60.042831420898445 +2024-10-29 21:48:00+10:00,57.0 +2024-10-29 21:48:00+10:00,57.0 +2024-10-29 21:50:00+10:00,59.0 +2024-10-29 21:50:00+10:00,59.0 +2024-10-29 21:51:00+10:00,59.0 +2024-10-29 21:51:00+10:00,59.0 +2024-10-29 21:58:00+10:00,60.0 +2024-10-29 21:58:00+10:00,60.0 +2024-10-29 22:04:00+10:00,59.0 +2024-10-29 22:04:00+10:00,59.0 +2024-10-29 22:06:00+10:00,59.0 +2024-10-29 22:06:00+10:00,59.0 +2024-10-29 22:12:00+10:00,59.0 +2024-10-29 22:12:00+10:00,59.0 +2024-10-29 22:16:00+10:00,63.0 +2024-10-29 22:16:00+10:00,63.0 +2024-10-29 22:21:00+10:00,63.0 +2024-10-29 22:21:00+10:00,63.0 +2024-10-29 22:26:00+10:00,61.99999999999999 +2024-10-29 22:26:00+10:00,61.99999999999999 +2024-10-29 22:28:00+10:00,60.0 +2024-10-29 22:28:00+10:00,60.0 +2024-10-29 22:30:00+10:00,60.0 +2024-10-29 22:30:00+10:00,60.0 +2024-10-29 22:35:00+10:00,60.0 +2024-10-29 22:35:00+10:00,60.0 +2024-10-29 22:43:00+10:00,62.00000000000001 +2024-10-29 22:43:00+10:00,62.00000000000001 +2024-10-29 22:49:00+10:00,63.0 +2024-10-29 22:49:00+10:00,63.0 +2024-10-29 22:51:00+10:00,66.0 +2024-10-29 22:51:00+10:00,66.0 +2024-10-29 22:55:00+10:00,67.0 +2024-10-29 22:55:00+10:00,67.0 +2024-10-29 22:56:00+10:00,71.0 +2024-10-29 22:56:00+10:00,71.0 +2024-10-29 23:00:00+10:00,68.0 +2024-10-29 23:00:00+10:00,68.0 +2024-10-29 23:05:00+10:00,72.0 +2024-10-29 23:05:00+10:00,72.0 +2024-10-29 23:10:00+10:00,79.0 +2024-10-29 23:10:00+10:00,79.0 +2024-10-29 23:18:00+10:00,72.0 +2024-10-29 23:18:00+10:00,72.0 +2024-10-29 23:22:00+10:00,79.0 +2024-10-29 23:22:00+10:00,79.0 +2024-10-29 23:25:00+10:00,76.0 +2024-10-29 23:25:00+10:00,76.0 +2024-10-29 23:27:00+10:00,69.0 +2024-10-29 23:27:00+10:00,69.0 +2024-10-29 23:31:00+10:00,68.0 +2024-10-29 23:31:00+10:00,68.0 +2024-10-29 23:35:00+10:00,66.21392059326172 +2024-10-29 23:35:00+10:00,66.21392059326172 +2024-10-29 23:37:00+10:00,64.0 +2024-10-29 23:37:00+10:00,64.0 +2024-10-29 23:41:00+10:00,63.0 +2024-10-29 23:41:00+10:00,63.0 +2024-10-29 23:45:00+10:00,60.0 +2024-10-29 23:45:00+10:00,60.0 +2024-10-29 23:50:00+10:00,56.0 +2024-10-29 23:50:00+10:00,56.0 +2024-10-29 23:57:00+10:00,57.0 +2024-10-29 23:57:00+10:00,57.0 +2024-10-30 00:02:00+10:00,56.0 +2024-10-30 00:02:00+10:00,56.0 +2024-10-30 00:05:00+10:00,57.0 +2024-10-30 00:05:00+10:00,57.0 +2024-10-30 00:13:00+10:00,55.0 +2024-10-30 00:13:00+10:00,55.0 +2024-10-30 00:15:00+10:00,54.0 +2024-10-30 00:15:00+10:00,54.0 +2024-10-30 00:20:00+10:00,58.0 +2024-10-30 00:20:00+10:00,58.0 +2024-10-30 00:27:00+10:00,57.0 +2024-10-30 00:27:00+10:00,57.0 +2024-10-30 00:30:00+10:00,58.0 +2024-10-30 00:30:00+10:00,58.0 +2024-10-30 00:35:00+10:00,56.0 +2024-10-30 00:35:00+10:00,56.0 +2024-10-30 00:41:00+10:00,57.0 +2024-10-30 00:41:00+10:00,57.0 +2024-10-30 00:45:00+10:00,58.0 +2024-10-30 00:45:00+10:00,58.0 +2024-10-30 00:50:00+10:00,57.0 +2024-10-30 00:50:00+10:00,57.0 +2024-10-30 00:55:00+10:00,56.0 +2024-10-30 00:55:00+10:00,56.0 +2024-10-30 00:57:00+10:00,57.0 +2024-10-30 00:57:00+10:00,57.0 +2024-10-30 01:00:00+10:00,56.0 +2024-10-30 01:00:00+10:00,56.0 +2024-10-30 01:06:00+10:00,61.0 +2024-10-30 01:06:00+10:00,61.0 +2024-10-30 01:11:00+10:00,67.0 +2024-10-30 01:11:00+10:00,67.0 +2024-10-30 01:18:00+10:00,60.0 +2024-10-30 01:18:00+10:00,60.0 +2024-10-30 01:20:00+10:00,59.0 +2024-10-30 01:20:00+10:00,59.0 +2024-10-30 01:26:00+10:00,62.00000000000001 +2024-10-30 01:26:00+10:00,62.00000000000001 +2024-10-30 01:27:00+10:00,65.0 +2024-10-30 01:27:00+10:00,65.0 +2024-10-30 01:30:00+10:00,63.0 +2024-10-30 01:30:00+10:00,63.0 +2024-10-30 01:35:00+10:00,61.77190399169922 +2024-10-30 01:35:00+10:00,61.77190399169922 +2024-10-30 01:39:00+10:00,58.0 +2024-10-30 01:39:00+10:00,58.0 +2024-10-30 01:43:00+10:00,54.0 +2024-10-30 01:43:00+10:00,54.0 +2024-10-30 01:46:00+10:00,52.0 +2024-10-30 01:46:00+10:00,52.0 +2024-10-30 01:54:00+10:00,52.0 +2024-10-30 01:54:00+10:00,52.0 +2024-10-30 01:56:00+10:00,49.0 +2024-10-30 01:56:00+10:00,49.0 +2024-10-30 01:57:00+10:00,51.0 +2024-10-30 01:57:00+10:00,51.0 +2024-10-30 02:00:00+10:00,49.0 +2024-10-30 02:00:00+10:00,49.0 +2024-10-30 02:08:00+10:00,52.0 +2024-10-30 02:08:00+10:00,52.0 +2024-10-30 02:13:00+10:00,52.0 +2024-10-30 02:13:00+10:00,52.0 +2024-10-30 02:15:00+10:00,52.0 +2024-10-30 02:15:00+10:00,52.0 +2024-10-30 02:22:00+10:00,53.0 +2024-10-30 02:22:00+10:00,53.0 +2024-10-30 02:28:00+10:00,49.0 +2024-10-30 02:28:00+10:00,49.0 +2024-10-30 02:30:00+10:00,49.0 +2024-10-30 02:30:00+10:00,49.0 +2024-10-30 02:32:00+10:00,48.0 +2024-10-30 02:32:00+10:00,48.0 +2024-10-30 02:36:00+10:00,48.0 +2024-10-30 02:36:00+10:00,48.0 +2024-10-30 02:40:00+10:00,47.0 +2024-10-30 02:40:00+10:00,47.0 +2024-10-30 02:46:00+10:00,49.0 +2024-10-30 02:46:00+10:00,49.0 +2024-10-30 02:52:00+10:00,52.0 +2024-10-30 02:52:00+10:00,52.0 +2024-10-30 02:59:00+10:00,50.0 +2024-10-30 02:59:00+10:00,50.0 +2024-10-30 03:00:00+10:00,50.0 +2024-10-30 03:00:00+10:00,50.0 +2024-10-30 03:08:00+10:00,52.0 +2024-10-30 03:08:00+10:00,52.0 +2024-10-30 03:10:00+10:00,50.0 +2024-10-30 03:10:00+10:00,50.0 +2024-10-30 03:15:00+10:00,50.0 +2024-10-30 03:15:00+10:00,50.0 +2024-10-30 03:20:00+10:00,49.0 +2024-10-30 03:20:00+10:00,49.0 +2024-10-30 03:25:00+10:00,48.0 +2024-10-30 03:25:00+10:00,48.0 +2024-10-30 03:31:00+10:00,49.0 +2024-10-30 03:31:00+10:00,49.0 +2024-10-30 03:35:00+10:00,54.70035362243652 +2024-10-30 03:35:00+10:00,54.70035362243652 +2024-10-30 03:44:00+10:00,60.0 +2024-10-30 03:44:00+10:00,60.0 +2024-10-30 03:47:00+10:00,53.0 +2024-10-30 03:47:00+10:00,53.0 +2024-10-30 03:50:00+10:00,54.0 +2024-10-30 03:50:00+10:00,54.0 +2024-10-30 03:55:00+10:00,55.0 +2024-10-30 03:55:00+10:00,55.0 +2024-10-30 04:01:00+10:00,57.0 +2024-10-30 04:01:00+10:00,57.0 +2024-10-30 04:09:00+10:00,57.0 +2024-10-30 04:09:00+10:00,57.0 +2024-10-30 04:10:00+10:00,60.0 +2024-10-30 04:10:00+10:00,60.0 +2024-10-30 04:16:00+10:00,52.0 +2024-10-30 04:16:00+10:00,52.0 +2024-10-30 04:22:00+10:00,52.0 +2024-10-30 04:22:00+10:00,52.0 +2024-10-30 04:28:00+10:00,54.0 +2024-10-30 04:28:00+10:00,54.0 +2024-10-30 04:30:00+10:00,55.0 +2024-10-30 04:30:00+10:00,55.0 +2024-10-30 04:38:00+10:00,54.0 +2024-10-30 04:38:00+10:00,54.0 +2024-10-30 04:41:00+10:00,53.0 +2024-10-30 04:41:00+10:00,53.0 +2024-10-30 04:44:00+10:00,53.0 +2024-10-30 04:44:00+10:00,53.0 +2024-10-30 04:45:00+10:00,55.0 +2024-10-30 04:45:00+10:00,55.0 +2024-10-30 04:53:00+10:00,52.0 +2024-10-30 04:53:00+10:00,52.0 +2024-10-30 04:56:00+10:00,52.0 +2024-10-30 04:56:00+10:00,52.0 +2024-10-30 05:01:00+10:00,54.0 +2024-10-30 05:01:00+10:00,54.0 +2024-10-30 05:06:00+10:00,55.0 +2024-10-30 05:06:00+10:00,55.0 +2024-10-30 05:11:00+10:00,55.0 +2024-10-30 05:11:00+10:00,55.0 +2024-10-30 05:14:00+10:00,55.0 +2024-10-30 05:14:00+10:00,55.0 +2024-10-30 05:19:00+10:00,46.00000000000001 +2024-10-30 05:19:00+10:00,46.00000000000001 +2024-10-30 05:21:00+10:00,53.0 +2024-10-30 05:21:00+10:00,53.0 +2024-10-30 05:30:00+10:00,55.0 +2024-10-30 05:30:00+10:00,55.0 +2024-10-30 05:31:00+10:00,55.0 +2024-10-30 05:31:00+10:00,55.0 +2024-10-30 05:35:00+10:00,57.648590087890625 +2024-10-30 05:35:00+10:00,57.648590087890625 +2024-10-30 05:36:00+10:00,56.0 +2024-10-30 05:36:00+10:00,56.0 +2024-10-30 05:41:00+10:00,55.00000000000001 +2024-10-30 05:41:00+10:00,55.00000000000001 +2024-10-30 05:48:00+10:00,56.0 +2024-10-30 05:48:00+10:00,56.0 +2024-10-30 05:54:00+10:00,53.0 +2024-10-30 05:54:00+10:00,53.0 +2024-10-30 05:57:00+10:00,55.0 +2024-10-30 05:57:00+10:00,55.0 +2024-10-30 06:04:00+10:00,53.0 +2024-10-30 06:04:00+10:00,53.0 +2024-10-30 06:08:00+10:00,52.0 +2024-10-30 06:08:00+10:00,52.0 +2024-10-30 06:11:00+10:00,51.0 +2024-10-30 06:11:00+10:00,51.0 +2024-10-30 06:13:00+10:00,55.0 +2024-10-30 06:13:00+10:00,55.0 +2024-10-30 06:15:00+10:00,54.0 +2024-10-30 06:15:00+10:00,54.0 +2024-10-30 06:20:00+10:00,49.0 +2024-10-30 06:20:00+10:00,49.0 +2024-10-30 06:28:00+10:00,59.0 +2024-10-30 06:28:00+10:00,59.0 +2024-10-30 06:31:00+10:00,58.0 +2024-10-30 06:31:00+10:00,58.0 +2024-10-30 06:36:00+10:00,54.0 +2024-10-30 06:36:00+10:00,54.0 +2024-10-30 06:41:00+10:00,53.0 +2024-10-30 06:41:00+10:00,53.0 +2024-10-30 06:49:00+10:00,47.0 +2024-10-30 06:49:00+10:00,47.0 +2024-10-30 06:52:00+10:00,50.0 +2024-10-30 06:52:00+10:00,50.0 +2024-10-30 06:59:00+10:00,52.0 +2024-10-30 06:59:00+10:00,52.0 +2024-10-30 07:04:00+10:00,68.0 +2024-10-30 07:04:00+10:00,68.0 +2024-10-30 07:08:00+10:00,57.0 +2024-10-30 07:08:00+10:00,57.0 +2024-10-30 07:11:00+10:00,52.0 +2024-10-30 07:11:00+10:00,52.0 +2024-10-30 07:16:00+10:00,56.0 +2024-10-30 07:16:00+10:00,56.0 +2024-10-30 07:20:00+10:00,49.0 +2024-10-30 07:20:00+10:00,49.0 +2024-10-30 07:22:00+10:00,48.0 +2024-10-30 07:22:00+10:00,48.0 +2024-10-30 07:25:00+10:00,49.0 +2024-10-30 07:25:00+10:00,49.0 +2024-10-30 07:35:00+10:00,82.0 +2024-10-30 07:35:00+10:00,82.0 +2024-10-30 07:45:00+10:00,75.0 +2024-10-30 07:45:00+10:00,75.0 +2024-10-30 07:49:00+10:00,82.5 +2024-10-30 07:49:00+10:00,82.5 +2024-10-30 07:54:00+10:00,74.0 +2024-10-30 07:54:00+10:00,74.0 +2024-10-30 07:58:00+10:00,76.0 +2024-10-30 07:58:00+10:00,76.0 +2024-10-30 08:04:00+10:00,69.0 +2024-10-30 08:04:00+10:00,69.0 +2024-10-30 08:08:00+10:00,75.0 +2024-10-30 08:08:00+10:00,75.0 +2024-10-30 08:12:00+10:00,68.0 +2024-10-30 08:12:00+10:00,68.0 +2024-10-30 08:18:00+10:00,72.5 +2024-10-30 08:18:00+10:00,72.5 +2024-10-30 08:21:00+10:00,67.0 +2024-10-30 08:21:00+10:00,67.0 +2024-10-30 08:27:00+10:00,65.0 +2024-10-30 08:27:00+10:00,65.0 +2024-10-30 08:33:00+10:00,59.0 +2024-10-30 08:33:00+10:00,59.0 +2024-10-30 08:40:00+10:00,58.0 +2024-10-30 08:40:00+10:00,58.0 +2024-10-30 08:42:00+10:00,64.0 +2024-10-30 08:42:00+10:00,64.0 +2024-10-30 08:47:00+10:00,69.0 +2024-10-30 08:47:00+10:00,69.0 +2024-10-30 08:49:00+10:00,60.0 +2024-10-30 08:49:00+10:00,60.0 +2024-10-30 08:54:00+10:00,56.0 +2024-10-30 08:54:00+10:00,56.0 +2024-10-30 08:56:00+10:00,58.0 +2024-10-30 08:56:00+10:00,58.0 +2024-10-30 09:00:00+10:00,60.0 +2024-10-30 09:00:00+10:00,60.0 +2024-10-30 09:18:00+10:00,58.0 +2024-10-30 09:18:00+10:00,58.0 +2024-10-30 09:19:00+10:00,61.0 +2024-10-30 09:19:00+10:00,61.0 +2024-10-30 09:29:00+10:00,62.00000000000001 +2024-10-30 09:29:00+10:00,62.00000000000001 +2024-10-30 09:33:00+10:00,83.0 +2024-10-30 09:33:00+10:00,83.0 +2024-10-30 09:35:00+10:00,89.0 +2024-10-30 09:35:00+10:00,89.0 +2024-10-30 09:38:00+10:00,83.0 +2024-10-30 09:38:00+10:00,83.0 +2024-10-30 09:44:00+10:00,66.0 +2024-10-30 09:44:00+10:00,66.0 +2024-10-30 09:45:00+10:00,65.01284790039062 +2024-10-30 09:45:00+10:00,65.01284790039062 +2024-10-30 09:49:00+10:00,58.0 +2024-10-30 09:49:00+10:00,58.0 +2024-10-30 09:50:00+10:00,58.0 +2024-10-30 09:50:00+10:00,58.0 +2024-10-30 09:54:00+10:00,58.0 +2024-10-30 09:54:00+10:00,58.0 +2024-10-30 09:55:00+10:00,59.0 +2024-10-30 09:55:00+10:00,59.0 +2024-10-30 10:02:00+10:00,63.0 +2024-10-30 10:02:00+10:00,63.0 +2024-10-30 10:10:00+10:00,66.0 +2024-10-30 10:10:00+10:00,66.0 +2024-10-30 10:14:00+10:00,59.0 +2024-10-30 10:14:00+10:00,59.0 +2024-10-30 10:16:00+10:00,60.0 +2024-10-30 10:16:00+10:00,60.0 +2024-10-30 10:20:00+10:00,63.0 +2024-10-30 10:20:00+10:00,63.0 +2024-10-30 10:21:00+10:00,63.0 +2024-10-30 10:21:00+10:00,63.0 +2024-10-30 10:25:00+10:00,65.0 +2024-10-30 10:25:00+10:00,65.0 +2024-10-30 10:33:00+10:00,58.0 +2024-10-30 10:33:00+10:00,58.0 +2024-10-30 10:35:00+10:00,59.0 +2024-10-30 10:35:00+10:00,59.0 +2024-10-30 10:44:00+10:00,57.0 +2024-10-30 10:44:00+10:00,57.0 +2024-10-30 10:46:00+10:00,54.0 +2024-10-30 10:46:00+10:00,54.0 +2024-10-30 10:53:00+10:00,58.0 +2024-10-30 10:53:00+10:00,58.0 +2024-10-30 10:58:00+10:00,66.0 +2024-10-30 10:58:00+10:00,66.0 +2024-10-30 10:59:00+10:00,76.0 +2024-10-30 10:59:00+10:00,76.0 +2024-10-30 11:01:00+10:00,89.0 +2024-10-30 11:01:00+10:00,89.0 +2024-10-30 11:08:00+10:00,72.0 +2024-10-30 11:08:00+10:00,72.0 +2024-10-30 11:10:00+10:00,64.0 +2024-10-30 11:10:00+10:00,64.0 +2024-10-30 11:18:00+10:00,68.0 +2024-10-30 11:18:00+10:00,68.0 +2024-10-30 11:25:00+10:00,69.0 +2024-10-30 11:25:00+10:00,69.0 +2024-10-30 11:32:00+10:00,69.0 +2024-10-30 11:32:00+10:00,69.0 +2024-10-30 11:39:00+10:00,72.0 +2024-10-30 11:39:00+10:00,72.0 +2024-10-30 11:40:00+10:00,69.0 +2024-10-30 11:40:00+10:00,69.0 +2024-10-30 11:46:00+10:00,76.0 +2024-10-30 11:46:00+10:00,76.0 +2024-10-30 11:54:00+10:00,70.0 +2024-10-30 11:54:00+10:00,70.0 +2024-10-30 11:55:00+10:00,71.0 +2024-10-30 11:55:00+10:00,71.0 +2024-10-30 12:03:00+10:00,69.0 +2024-10-30 12:03:00+10:00,69.0 +2024-10-30 12:08:00+10:00,75.0 +2024-10-30 12:08:00+10:00,75.0 +2024-10-30 12:11:00+10:00,78.0 +2024-10-30 12:11:00+10:00,78.0 +2024-10-30 12:15:00+10:00,73.0 +2024-10-30 12:15:00+10:00,73.0 +2024-10-30 12:23:00+10:00,73.0 +2024-10-30 12:23:00+10:00,73.0 +2024-10-30 12:27:00+10:00,73.0 +2024-10-30 12:27:00+10:00,73.0 +2024-10-30 12:34:00+10:00,68.0 +2024-10-30 12:34:00+10:00,68.0 +2024-10-30 12:40:00+10:00,65.0 +2024-10-30 12:40:00+10:00,65.0 +2024-10-30 12:43:00+10:00,67.0 +2024-10-30 12:43:00+10:00,67.0 +2024-10-30 12:47:00+10:00,72.0 +2024-10-30 12:47:00+10:00,72.0 +2024-10-30 12:48:00+10:00,101.0 +2024-10-30 12:48:00+10:00,101.0 +2024-10-30 12:52:00+10:00,89.0 +2024-10-30 12:52:00+10:00,89.0 +2024-10-30 12:55:00+10:00,69.0 +2024-10-30 12:55:00+10:00,69.0 +2024-10-30 13:01:00+10:00,63.0 +2024-10-30 13:01:00+10:00,63.0 +2024-10-30 13:08:00+10:00,65.0 +2024-10-30 13:08:00+10:00,65.0 +2024-10-30 13:11:00+10:00,63.0 +2024-10-30 13:11:00+10:00,63.0 +2024-10-30 13:19:00+10:00,61.0 +2024-10-30 13:19:00+10:00,61.0 +2024-10-30 13:21:00+10:00,61.0 +2024-10-30 13:21:00+10:00,61.0 +2024-10-30 13:28:00+10:00,63.0 +2024-10-30 13:28:00+10:00,63.0 +2024-10-30 13:31:00+10:00,63.0 +2024-10-30 13:31:00+10:00,63.0 +2024-10-30 13:39:00+10:00,74.0 +2024-10-30 13:39:00+10:00,74.0 +2024-10-30 13:42:00+10:00,60.0 +2024-10-30 13:42:00+10:00,60.0 +2024-10-30 13:48:00+10:00,62.00000000000001 +2024-10-30 13:48:00+10:00,62.00000000000001 +2024-10-30 13:53:00+10:00,60.0 +2024-10-30 13:53:00+10:00,60.0 +2024-10-30 13:57:00+10:00,60.0 +2024-10-30 13:57:00+10:00,60.0 +2024-10-30 14:03:00+10:00,58.0 +2024-10-30 14:03:00+10:00,58.0 +2024-10-30 14:05:00+10:00,60.0 +2024-10-30 14:05:00+10:00,60.0 +2024-10-30 14:20:00+10:00,65.0 +2024-10-30 14:20:00+10:00,65.0 +2024-10-30 14:23:00+10:00,65.0 +2024-10-30 14:23:00+10:00,65.0 +2024-10-30 14:28:00+10:00,67.0 +2024-10-30 14:28:00+10:00,67.0 +2024-10-30 14:32:00+10:00,67.0 +2024-10-30 14:32:00+10:00,67.0 +2024-10-30 14:37:00+10:00,57.0 +2024-10-30 14:37:00+10:00,57.0 +2024-10-30 14:40:00+10:00,67.0 +2024-10-30 14:40:00+10:00,67.0 +2024-10-30 14:47:00+10:00,64.0 +2024-10-30 14:47:00+10:00,64.0 +2024-10-30 14:53:00+10:00,64.0 +2024-10-30 14:53:00+10:00,64.0 +2024-10-30 14:55:00+10:00,68.0 +2024-10-30 14:55:00+10:00,68.0 +2024-10-30 15:02:00+10:00,63.0 +2024-10-30 15:02:00+10:00,63.0 +2024-10-30 15:08:00+10:00,56.0 +2024-10-30 15:08:00+10:00,56.0 +2024-10-30 15:13:00+10:00,68.0 +2024-10-30 15:13:00+10:00,68.0 +2024-10-30 15:16:00+10:00,63.0 +2024-10-30 15:16:00+10:00,63.0 +2024-10-30 15:22:00+10:00,68.0 +2024-10-30 15:22:00+10:00,68.0 +2024-10-30 15:26:00+10:00,58.0 +2024-10-30 15:26:00+10:00,58.0 +2024-10-30 15:32:00+10:00,65.0 +2024-10-30 15:32:00+10:00,65.0 +2024-10-30 15:37:00+10:00,64.0 +2024-10-30 15:37:00+10:00,64.0 +2024-10-30 15:44:00+10:00,63.0 +2024-10-30 15:44:00+10:00,63.0 +2024-10-30 15:45:00+10:00,63.0 +2024-10-30 15:45:00+10:00,63.0 +2024-10-30 15:50:00+10:00,62.00000000000001 +2024-10-30 15:50:00+10:00,62.00000000000001 +2024-10-30 15:56:00+10:00,59.0 +2024-10-30 15:56:00+10:00,59.0 +2024-10-30 16:00:00+10:00,56.0 +2024-10-30 16:00:00+10:00,56.0 +2024-10-30 16:08:00+10:00,61.0 +2024-10-30 16:08:00+10:00,61.0 +2024-10-30 16:13:00+10:00,60.0 +2024-10-30 16:13:00+10:00,60.0 +2024-10-30 16:15:00+10:00,66.0 +2024-10-30 16:15:00+10:00,66.0 +2024-10-30 16:23:00+10:00,65.0 +2024-10-30 16:23:00+10:00,65.0 +2024-10-30 16:28:00+10:00,65.0 +2024-10-30 16:28:00+10:00,65.0 +2024-10-30 16:37:00+10:00,57.0 +2024-10-30 16:37:00+10:00,57.0 +2024-10-30 16:44:00+10:00,78.0 +2024-10-30 16:44:00+10:00,78.0 +2024-10-30 16:50:00+10:00,59.0 +2024-10-30 16:50:00+10:00,59.0 +2024-10-30 16:52:00+10:00,61.0 +2024-10-30 16:52:00+10:00,61.0 +2024-10-30 16:58:00+10:00,56.0 +2024-10-30 16:58:00+10:00,56.0 +2024-10-30 17:01:00+10:00,56.0 +2024-10-30 17:01:00+10:00,56.0 +2024-10-30 17:08:00+10:00,52.0 +2024-10-30 17:08:00+10:00,52.0 +2024-10-30 17:10:00+10:00,51.0 +2024-10-30 17:10:00+10:00,51.0 +2024-10-30 17:16:00+10:00,55.0 +2024-10-30 17:16:00+10:00,55.0 +2024-10-30 17:23:00+10:00,81.0 +2024-10-30 17:23:00+10:00,81.0 +2024-10-30 17:25:00+10:00,50.0 +2024-10-30 17:25:00+10:00,50.0 +2024-10-30 17:33:00+10:00,55.0 +2024-10-30 17:33:00+10:00,55.0 +2024-10-30 17:35:00+10:00,56.0 +2024-10-30 17:35:00+10:00,56.0 +2024-10-30 17:42:00+10:00,56.0 +2024-10-30 17:42:00+10:00,56.0 +2024-10-30 17:46:00+10:00,56.0 +2024-10-30 17:46:00+10:00,56.0 +2024-10-30 17:47:00+10:00,60.134883880615234 +2024-10-30 17:47:00+10:00,60.134883880615234 +2024-10-30 17:48:00+10:00,57.75754547119141 +2024-10-30 17:48:00+10:00,57.75754547119141 +2024-10-30 17:51:00+10:00,56.0 +2024-10-30 17:51:00+10:00,56.0 +2024-10-30 17:55:00+10:00,55.0 +2024-10-30 17:55:00+10:00,55.0 +2024-10-30 18:00:00+10:00,51.0 +2024-10-30 18:00:00+10:00,51.0 +2024-10-30 18:07:00+10:00,55.0 +2024-10-30 18:07:00+10:00,55.0 +2024-10-30 18:12:00+10:00,63.0 +2024-10-30 18:12:00+10:00,63.0 +2024-10-30 18:17:00+10:00,74.0 +2024-10-30 18:17:00+10:00,74.0 +2024-10-30 18:25:00+10:00,80.0 +2024-10-30 18:25:00+10:00,80.0 +2024-10-30 18:34:00+10:00,79.0 +2024-10-30 18:34:00+10:00,79.0 +2024-10-30 18:38:00+10:00,86.0 +2024-10-30 18:38:00+10:00,86.0 +2024-10-30 18:40:00+10:00,86.0 +2024-10-30 18:40:00+10:00,86.0 +2024-10-30 18:56:00+10:00,87.0 +2024-10-30 18:56:00+10:00,87.0 +2024-10-30 19:03:00+10:00,83.0 +2024-10-30 19:03:00+10:00,83.0 +2024-10-30 19:09:00+10:00,92.0 +2024-10-30 19:09:00+10:00,92.00000000000001 +2024-10-30 19:10:00+10:00,96.0 +2024-10-30 19:10:00+10:00,96.0 +2024-10-30 19:18:00+10:00,86.0 +2024-10-30 19:18:00+10:00,86.0 +2024-10-30 19:22:00+10:00,83.0 +2024-10-30 19:22:00+10:00,83.0 +2024-10-30 19:26:00+10:00,86.0 +2024-10-30 19:26:00+10:00,86.0 +2024-10-30 19:35:00+10:00,93.0 +2024-10-30 19:35:00+10:00,93.0 +2024-10-30 19:48:00+10:00,83.0 +2024-10-30 19:48:00+10:00,83.0 +2024-10-30 19:54:00+10:00,79.20919799804688 +2024-10-30 19:54:00+10:00,79.20919799804688 +2024-10-30 19:55:00+10:00,80.0 +2024-10-30 19:55:00+10:00,80.0 +2024-10-30 19:56:00+10:00,79.0 +2024-10-30 19:56:00+10:00,79.0 +2024-10-30 20:01:00+10:00,80.0 +2024-10-30 20:01:00+10:00,80.0 +2024-10-30 20:09:00+10:00,74.0 +2024-10-30 20:09:00+10:00,74.0 +2024-10-30 20:12:00+10:00,72.0 +2024-10-30 20:12:00+10:00,72.0 +2024-10-30 20:18:00+10:00,85.0 +2024-10-30 20:18:00+10:00,85.0 +2024-10-30 20:20:00+10:00,81.0 +2024-10-30 20:20:00+10:00,81.0 +2024-10-30 20:28:00+10:00,73.0 +2024-10-30 20:28:00+10:00,73.0 +2024-10-30 20:33:00+10:00,73.0 +2024-10-30 20:33:00+10:00,73.0 +2024-10-30 20:36:00+10:00,72.0 +2024-10-30 20:36:00+10:00,72.0 +2024-10-30 20:40:00+10:00,78.0 +2024-10-30 20:40:00+10:00,78.0 +2024-10-30 20:45:00+10:00,80.0 +2024-10-30 20:45:00+10:00,80.0 +2024-10-30 20:50:00+10:00,73.0 +2024-10-30 20:50:00+10:00,73.0 +2024-10-30 20:55:00+10:00,84.0 +2024-10-30 20:55:00+10:00,84.0 +2024-10-30 21:02:00+10:00,84.0 +2024-10-30 21:02:00+10:00,84.0 +2024-10-30 21:06:00+10:00,80.0 +2024-10-30 21:06:00+10:00,80.0 +2024-10-30 21:11:00+10:00,76.0 +2024-10-30 21:11:00+10:00,76.0 +2024-10-30 21:15:00+10:00,77.00000000000001 +2024-10-30 21:15:00+10:00,77.00000000000001 +2024-10-30 21:18:00+10:00,71.0 +2024-10-30 21:18:00+10:00,71.0 +2024-10-30 21:20:00+10:00,72.0 +2024-10-30 21:20:00+10:00,72.0 +2024-10-30 21:26:00+10:00,79.0 +2024-10-30 21:26:00+10:00,79.0 +2024-10-30 22:10:00+10:00,68.56257861195195 +2024-10-30 22:10:00+10:00,68.56257861195195 +2024-10-30 22:12:00+10:00,64.0 +2024-10-30 22:12:00+10:00,64.0 +2024-10-30 22:18:00+10:00,67.0 +2024-10-30 22:18:00+10:00,67.0 +2024-10-30 22:20:00+10:00,63.0 +2024-10-30 22:20:00+10:00,63.0 +2024-10-30 22:25:00+10:00,63.0 +2024-10-30 22:25:00+10:00,63.0 +2024-10-30 22:34:00+10:00,58.0 +2024-10-30 22:34:00+10:00,58.0 +2024-10-30 22:35:00+10:00,58.0 +2024-10-30 22:35:00+10:00,58.0 +2024-10-30 22:40:00+10:00,54.0 +2024-10-30 22:40:00+10:00,54.0 +2024-10-30 22:49:00+10:00,55.0 +2024-10-30 22:49:00+10:00,55.0 +2024-10-30 22:50:00+10:00,56.0 +2024-10-30 22:50:00+10:00,56.0 +2024-10-30 22:56:00+10:00,58.0 +2024-10-30 22:56:00+10:00,58.0 +2024-10-30 23:02:00+10:00,57.0 +2024-10-30 23:02:00+10:00,57.0 +2024-10-30 23:07:00+10:00,58.0 +2024-10-30 23:07:00+10:00,58.0 +2024-10-30 23:10:00+10:00,55.0 +2024-10-30 23:10:00+10:00,55.0 +2024-10-30 23:17:00+10:00,64.0 +2024-10-30 23:17:00+10:00,64.0 +2024-10-30 23:21:00+10:00,64.50000000000001 +2024-10-30 23:21:00+10:00,64.50000000000001 +2024-10-30 23:28:00+10:00,68.0 +2024-10-30 23:28:00+10:00,68.0 +2024-10-30 23:31:00+10:00,66.0 +2024-10-30 23:31:00+10:00,66.0 +2024-10-30 23:35:00+10:00,65.09080505371094 +2024-10-30 23:35:00+10:00,65.09080505371094 +2024-10-30 23:36:00+10:00,66.0 +2024-10-30 23:36:00+10:00,66.0 +2024-10-30 23:40:00+10:00,63.0 +2024-10-30 23:40:00+10:00,63.0 +2024-10-30 23:46:00+10:00,63.0 +2024-10-30 23:46:00+10:00,63.0 +2024-10-30 23:51:00+10:00,59.0 +2024-10-30 23:51:00+10:00,59.0 +2024-10-30 23:54:00+10:00,63.0 +2024-10-30 23:54:00+10:00,63.0 +2024-10-30 23:57:00+10:00,59.0 +2024-10-30 23:57:00+10:00,59.0 +2024-10-31 00:01:00+10:00,59.0 +2024-10-31 00:01:00+10:00,59.0 +2024-10-31 00:05:00+10:00,58.0 +2024-10-31 00:05:00+10:00,58.0 +2024-10-31 00:13:00+10:00,55.0 +2024-10-31 00:13:00+10:00,55.0 +2024-10-31 00:17:00+10:00,56.0 +2024-10-31 00:17:00+10:00,56.0 +2024-10-31 00:21:00+10:00,56.0 +2024-10-31 00:21:00+10:00,56.0 +2024-10-31 00:22:00+10:00,55.0 +2024-10-31 00:22:00+10:00,55.0 +2024-10-31 00:26:00+10:00,55.0 +2024-10-31 00:26:00+10:00,55.0 +2024-10-31 00:32:00+10:00,55.0 +2024-10-31 00:32:00+10:00,55.0 +2024-10-31 00:38:00+10:00,58.0 +2024-10-31 00:38:00+10:00,58.0 +2024-10-31 00:40:00+10:00,58.0 +2024-10-31 00:40:00+10:00,58.0 +2024-10-31 00:45:00+10:00,58.0 +2024-10-31 00:45:00+10:00,58.0 +2024-10-31 00:51:00+10:00,56.5 +2024-10-31 00:51:00+10:00,56.5 +2024-10-31 00:59:00+10:00,66.0 +2024-10-31 00:59:00+10:00,66.0 +2024-10-31 01:05:00+10:00,59.0 +2024-10-31 01:05:00+10:00,59.0 +2024-10-31 01:08:00+10:00,60.0 +2024-10-31 01:08:00+10:00,60.0 +2024-10-31 01:11:00+10:00,60.0 +2024-10-31 01:11:00+10:00,60.0 +2024-10-31 01:16:00+10:00,59.0 +2024-10-31 01:16:00+10:00,59.0 +2024-10-31 01:22:00+10:00,53.0 +2024-10-31 01:22:00+10:00,53.0 +2024-10-31 01:28:00+10:00,54.0 +2024-10-31 01:28:00+10:00,54.0 +2024-10-31 01:29:00+10:00,54.0 +2024-10-31 01:29:00+10:00,54.0 +2024-10-31 01:34:00+10:00,52.0 +2024-10-31 01:34:00+10:00,52.0 +2024-10-31 01:35:00+10:00,51.41911697387695 +2024-10-31 01:35:00+10:00,51.41911697387695 +2024-10-31 01:42:00+10:00,52.0 +2024-10-31 01:42:00+10:00,52.0 +2024-10-31 01:45:00+10:00,52.0 +2024-10-31 01:45:00+10:00,52.0 +2024-10-31 01:51:00+10:00,52.0 +2024-10-31 01:51:00+10:00,52.0 +2024-10-31 01:55:00+10:00,53.0 +2024-10-31 01:55:00+10:00,53.0 +2024-10-31 01:58:00+10:00,54.0 +2024-10-31 01:58:00+10:00,54.0 +2024-10-31 02:01:00+10:00,53.0 +2024-10-31 02:01:00+10:00,53.0 +2024-10-31 02:07:00+10:00,52.0 +2024-10-31 02:07:00+10:00,52.0 +2024-10-31 02:11:00+10:00,52.0 +2024-10-31 02:11:00+10:00,52.0 +2024-10-31 02:15:00+10:00,51.0 +2024-10-31 02:15:00+10:00,51.0 +2024-10-31 02:20:00+10:00,49.0 +2024-10-31 02:20:00+10:00,49.0 +2024-10-31 02:27:00+10:00,47.0 +2024-10-31 02:27:00+10:00,47.0 +2024-10-31 02:32:00+10:00,49.0 +2024-10-31 02:32:00+10:00,49.0 +2024-10-31 02:33:00+10:00,50.0 +2024-10-31 02:33:00+10:00,50.0 +2024-10-31 02:38:00+10:00,49.0 +2024-10-31 02:38:00+10:00,49.0 +2024-10-31 02:42:00+10:00,53.0 +2024-10-31 02:42:00+10:00,53.0 +2024-10-31 02:49:00+10:00,52.0 +2024-10-31 02:49:00+10:00,52.0 +2024-10-31 02:50:00+10:00,54.0 +2024-10-31 02:50:00+10:00,54.0 +2024-10-31 02:58:00+10:00,52.0 +2024-10-31 02:58:00+10:00,52.0 +2024-10-31 03:01:00+10:00,52.0 +2024-10-31 03:01:00+10:00,52.0 +2024-10-31 03:02:00+10:00,51.0 +2024-10-31 03:02:00+10:00,51.0 +2024-10-31 03:06:00+10:00,52.0 +2024-10-31 03:06:00+10:00,52.0 +2024-10-31 03:14:00+10:00,48.0 +2024-10-31 03:14:00+10:00,48.0 +2024-10-31 03:16:00+10:00,49.0 +2024-10-31 03:16:00+10:00,49.0 +2024-10-31 03:23:00+10:00,49.0 +2024-10-31 03:23:00+10:00,49.0 +2024-10-31 03:26:00+10:00,48.0 +2024-10-31 03:26:00+10:00,48.0 +2024-10-31 03:32:00+10:00,48.0 +2024-10-31 03:32:00+10:00,48.0 +2024-10-31 03:33:00+10:00,47.0 +2024-10-31 03:33:00+10:00,47.0 +2024-10-31 03:35:00+10:00,44.90291976928711 +2024-10-31 03:35:00+10:00,44.90291976928711 +2024-10-31 03:38:00+10:00,48.0 +2024-10-31 03:38:00+10:00,48.0 +2024-10-31 03:42:00+10:00,46.00000000000001 +2024-10-31 03:42:00+10:00,46.00000000000001 +2024-10-31 03:47:00+10:00,48.0 +2024-10-31 03:47:00+10:00,48.0 +2024-10-31 03:50:00+10:00,48.0 +2024-10-31 03:50:00+10:00,48.0 +2024-10-31 03:55:00+10:00,49.0 +2024-10-31 03:55:00+10:00,49.0 +2024-10-31 04:00:00+10:00,48.0 +2024-10-31 04:00:00+10:00,48.0 +2024-10-31 04:02:00+10:00,49.0 +2024-10-31 04:02:00+10:00,49.0 +2024-10-31 04:05:00+10:00,48.0 +2024-10-31 04:05:00+10:00,48.0 +2024-10-31 04:10:00+10:00,49.0 +2024-10-31 04:10:00+10:00,49.0 +2024-10-31 04:15:00+10:00,49.0 +2024-10-31 04:15:00+10:00,49.0 +2024-10-31 04:20:00+10:00,63.0 +2024-10-31 04:20:00+10:00,63.0 +2024-10-31 04:25:00+10:00,64.0 +2024-10-31 04:25:00+10:00,64.0 +2024-10-31 04:31:00+10:00,51.0 +2024-10-31 04:31:00+10:00,51.0 +2024-10-31 04:32:00+10:00,59.0 +2024-10-31 04:32:00+10:00,59.0 +2024-10-31 04:36:00+10:00,45.0 +2024-10-31 04:36:00+10:00,45.0 +2024-10-31 04:40:00+10:00,44.0 +2024-10-31 04:40:00+10:00,44.0 +2024-10-31 04:49:00+10:00,50.0 +2024-10-31 04:49:00+10:00,50.0 +2024-10-31 04:51:00+10:00,49.0 +2024-10-31 04:51:00+10:00,49.0 +2024-10-31 04:55:00+10:00,52.0 +2024-10-31 04:55:00+10:00,52.0 +2024-10-31 05:01:00+10:00,49.0 +2024-10-31 05:01:00+10:00,49.0 +2024-10-31 05:02:00+10:00,48.0 +2024-10-31 05:02:00+10:00,48.0 +2024-10-31 05:09:00+10:00,49.0 +2024-10-31 05:09:00+10:00,49.0 +2024-10-31 05:11:00+10:00,49.0 +2024-10-31 05:11:00+10:00,49.0 +2024-10-31 05:19:00+10:00,50.0 +2024-10-31 05:19:00+10:00,50.0 +2024-10-31 05:20:00+10:00,50.0 +2024-10-31 05:20:00+10:00,50.0 +2024-10-31 05:26:00+10:00,50.0 +2024-10-31 05:26:00+10:00,50.0 +2024-10-31 05:33:00+10:00,49.0 +2024-10-31 05:33:00+10:00,49.0 +2024-10-31 05:35:00+10:00,50.109588623046875 +2024-10-31 05:35:00+10:00,50.109588623046875 +2024-10-31 05:40:00+10:00,50.0 +2024-10-31 05:40:00+10:00,50.0 +2024-10-31 05:45:00+10:00,50.0 +2024-10-31 05:45:00+10:00,50.0 +2024-10-31 05:50:00+10:00,58.0 +2024-10-31 05:50:00+10:00,58.0 +2024-10-31 05:52:00+10:00,44.0 +2024-10-31 05:52:00+10:00,44.0 +2024-10-31 05:55:00+10:00,51.0 +2024-10-31 05:55:00+10:00,51.0 +2024-10-31 06:01:00+10:00,54.0 +2024-10-31 06:01:00+10:00,54.0 +2024-10-31 06:08:00+10:00,64.0 +2024-10-31 06:08:00+10:00,64.0 +2024-10-31 06:18:00+10:00,67.0 +2024-10-31 06:18:00+10:00,67.0 +2024-10-31 06:19:00+10:00,59.0 +2024-10-31 06:19:00+10:00,59.0 +2024-10-31 06:22:00+10:00,66.0 +2024-10-31 06:22:00+10:00,66.0 +2024-10-31 06:26:00+10:00,59.0 +2024-10-31 06:26:00+10:00,59.0 +2024-10-31 06:34:00+10:00,65.0 +2024-10-31 06:34:00+10:00,65.0 +2024-10-31 06:38:00+10:00,74.0 +2024-10-31 06:38:00+10:00,74.0 +2024-10-31 06:43:00+10:00,65.0 +2024-10-31 06:43:00+10:00,65.0 +2024-10-31 06:51:00+10:00,68.0 +2024-10-31 06:51:00+10:00,68.0 +2024-10-31 06:57:00+10:00,62.00000000000001 +2024-10-31 06:57:00+10:00,62.00000000000001 +2024-10-31 07:02:00+10:00,67.0 +2024-10-31 07:02:00+10:00,67.0 +2024-10-31 07:06:00+10:00,61.0 +2024-10-31 07:06:00+10:00,61.0 +2024-10-31 07:14:00+10:00,63.0 +2024-10-31 07:14:00+10:00,63.0 +2024-10-31 07:15:00+10:00,60.0 +2024-10-31 07:15:00+10:00,60.0 +2024-10-31 07:24:00+10:00,58.0 +2024-10-31 07:24:00+10:00,58.0 +2024-10-31 07:27:00+10:00,57.0 +2024-10-31 07:27:00+10:00,57.0 +2024-10-31 07:31:00+10:00,65.0 +2024-10-31 07:31:00+10:00,65.0 +2024-10-31 07:38:00+10:00,66.0 +2024-10-31 07:38:00+10:00,66.0 +2024-10-31 07:46:00+10:00,65.0 +2024-10-31 07:46:00+10:00,65.0 +2024-10-31 07:52:00+10:00,59.0 +2024-10-31 07:52:00+10:00,59.0 +2024-10-31 07:57:00+10:00,60.0 +2024-10-31 07:57:00+10:00,60.0 +2024-10-31 08:04:00+10:00,56.0 +2024-10-31 08:04:00+10:00,56.0 +2024-10-31 08:06:00+10:00,56.0 +2024-10-31 08:06:00+10:00,56.0 +2024-10-31 08:12:00+10:00,57.0 +2024-10-31 08:12:00+10:00,57.0 +2024-10-31 08:13:00+10:00,58.32500457763672 +2024-10-31 08:13:00+10:00,58.32500457763672 +2024-10-31 08:19:00+10:00,59.0 +2024-10-31 08:19:00+10:00,59.0 +2024-10-31 08:22:00+10:00,80.0 +2024-10-31 08:22:00+10:00,80.0 +2024-10-31 08:27:00+10:00,55.0 +2024-10-31 08:27:00+10:00,55.0 +2024-10-31 08:31:00+10:00,60.0 +2024-10-31 08:31:00+10:00,60.0 +2024-10-31 08:35:00+10:00,58.0 +2024-10-31 08:35:00+10:00,58.0 +2024-10-31 08:44:00+10:00,68.0 +2024-10-31 08:44:00+10:00,68.0 +2024-10-31 08:46:00+10:00,54.0 +2024-10-31 08:46:00+10:00,54.0 +2024-10-31 08:50:00+10:00,62.00000000000001 +2024-10-31 08:50:00+10:00,62.00000000000001 +2024-10-31 08:57:00+10:00,58.0 +2024-10-31 08:57:00+10:00,58.0 +2024-10-31 09:04:00+10:00,56.0 +2024-10-31 09:04:00+10:00,56.0 +2024-10-31 09:06:00+10:00,56.0 +2024-10-31 09:06:00+10:00,56.0 +2024-10-31 09:13:00+10:00,60.0 +2024-10-31 09:13:00+10:00,60.0 +2024-10-31 09:18:00+10:00,60.0 +2024-10-31 09:18:00+10:00,60.0 +2024-10-31 09:22:00+10:00,57.0 +2024-10-31 09:22:00+10:00,57.0 +2024-10-31 09:26:00+10:00,75.18181818181817 +2024-10-31 09:26:00+10:00,75.18181818181817 +2024-10-31 09:28:00+10:00,65.0 +2024-10-31 09:28:00+10:00,65.0 +2024-10-31 09:34:00+10:00,59.0 +2024-10-31 09:34:00+10:00,59.0 +2024-10-31 09:36:00+10:00,57.0 +2024-10-31 09:36:00+10:00,57.0 +2024-10-31 09:45:00+10:00,58.0 +2024-10-31 09:45:00+10:00,58.0 +2024-10-31 09:47:00+10:00,55.0 +2024-10-31 09:47:00+10:00,55.0 +2024-10-31 09:51:00+10:00,66.0 +2024-10-31 09:51:00+10:00,66.0 +2024-10-31 09:56:00+10:00,58.0 +2024-10-31 09:56:00+10:00,58.0 +2024-10-31 10:00:00+10:00,59.0 +2024-10-31 10:00:00+10:00,59.0 +2024-10-31 10:05:00+10:00,56.0 +2024-10-31 10:05:00+10:00,56.0 +2024-10-31 10:10:00+10:00,59.0 +2024-10-31 10:10:00+10:00,59.0 +2024-10-31 10:19:00+10:00,64.0 +2024-10-31 10:19:00+10:00,64.0 +2024-10-31 10:21:00+10:00,62.00000000000001 +2024-10-31 10:21:00+10:00,62.00000000000001 +2024-10-31 10:27:00+10:00,64.0 +2024-10-31 10:27:00+10:00,64.0 +2024-10-31 10:43:00+10:00,65.0 +2024-10-31 10:43:00+10:00,65.0 +2024-10-31 10:46:00+10:00,58.0 +2024-10-31 10:46:00+10:00,58.0 +2024-10-31 10:53:00+10:00,66.0 +2024-10-31 10:53:00+10:00,66.0 +2024-10-31 10:58:00+10:00,54.0 +2024-10-31 10:58:00+10:00,54.0 +2024-10-31 11:02:00+10:00,56.0 +2024-10-31 11:02:00+10:00,56.0 +2024-10-31 11:27:00+10:00,68.0 +2024-10-31 11:27:00+10:00,68.0 +2024-10-31 11:45:00+10:00,71.0 +2024-10-31 11:45:00+10:00,71.0 +2024-10-31 12:09:00+10:00,103.00000000000001 +2024-10-31 12:09:00+10:00,103.0 +2024-10-31 12:11:00+10:00,68.0 +2024-10-31 12:11:00+10:00,68.0 +2024-10-31 12:28:00+10:00,88.0 +2024-10-31 12:28:00+10:00,88.0 +2024-10-31 12:38:00+10:00,82.0 +2024-10-31 12:38:00+10:00,82.0 +2024-10-31 12:40:00+10:00,76.0 +2024-10-31 12:40:00+10:00,76.0 +2024-10-31 12:45:00+10:00,80.0 +2024-10-31 12:45:00+10:00,80.0 +2024-10-31 12:50:00+10:00,77.00000000000001 +2024-10-31 12:50:00+10:00,77.00000000000001 +2024-10-31 12:55:00+10:00,99.0 +2024-10-31 12:55:00+10:00,99.0 +2024-10-31 13:01:00+10:00,102.0 +2024-10-31 13:01:00+10:00,102.0 +2024-10-31 13:13:00+10:00,93.0 +2024-10-31 13:13:00+10:00,93.0 +2024-10-31 13:16:00+10:00,94.0 +2024-10-31 13:16:00+10:00,94.0 +2024-10-31 13:24:00+10:00,102.0 +2024-10-31 13:24:00+10:00,102.0 +2024-10-31 13:27:00+10:00,102.0 +2024-10-31 13:27:00+10:00,102.0 +2024-10-31 13:34:00+10:00,100.0 +2024-10-31 13:34:00+10:00,100.0 +2024-10-31 13:37:00+10:00,109.0 +2024-10-31 13:37:00+10:00,109.0 +2024-10-31 13:41:00+10:00,108.0 +2024-10-31 13:41:00+10:00,108.0 +2024-10-31 13:43:00+10:00,110.78223419189452 +2024-10-31 13:43:00+10:00,110.78223419189453 +2024-10-31 13:47:00+10:00,108.0 +2024-10-31 13:47:00+10:00,108.0 +2024-10-31 13:54:00+10:00,102.0 +2024-10-31 13:54:00+10:00,102.0 +2024-10-31 13:56:00+10:00,100.0 +2024-10-31 13:56:00+10:00,100.0 +2024-10-31 14:02:00+10:00,100.0 +2024-10-31 14:02:00+10:00,100.0 +2024-10-31 14:06:00+10:00,113.0 +2024-10-31 14:06:00+10:00,113.0 +2024-10-31 14:17:00+10:00,108.0 +2024-10-31 14:17:00+10:00,108.0 +2024-10-31 14:23:00+10:00,105.0 +2024-10-31 14:23:00+10:00,105.0 +2024-10-31 14:28:00+10:00,103.0 +2024-10-31 14:28:00+10:00,103.0 +2024-10-31 14:33:00+10:00,100.0 +2024-10-31 14:33:00+10:00,100.0 +2024-10-31 15:00:00+10:00,92.0 +2024-10-31 15:00:00+10:00,92.00000000000001 +2024-10-31 15:04:00+10:00,94.0 +2024-10-31 15:04:00+10:00,94.0 +2024-10-31 15:07:00+10:00,98.0 +2024-10-31 15:07:00+10:00,98.0 +2024-10-31 15:13:00+10:00,85.0 +2024-10-31 15:13:00+10:00,85.0 +2024-10-31 15:17:00+10:00,85.0 +2024-10-31 15:17:00+10:00,85.0 +2024-10-31 15:22:00+10:00,90.0 +2024-10-31 15:22:00+10:00,90.0 +2024-10-31 15:26:00+10:00,83.0 +2024-10-31 15:26:00+10:00,83.0 +2024-10-31 15:32:00+10:00,81.0 +2024-10-31 15:32:00+10:00,81.0 +2024-10-31 15:35:00+10:00,87.58494186401366 +2024-10-31 15:35:00+10:00,87.58494186401366 +2024-10-31 15:40:00+10:00,91.0 +2024-10-31 15:40:00+10:00,91.0 +2024-10-31 15:47:00+10:00,84.0 +2024-10-31 15:47:00+10:00,84.0 +2024-10-31 15:51:00+10:00,87.0 +2024-10-31 15:51:00+10:00,87.0 +2024-10-31 15:59:00+10:00,79.0 +2024-10-31 15:59:00+10:00,79.0 +2024-10-31 16:04:00+10:00,85.0 +2024-10-31 16:04:00+10:00,85.0 +2024-10-31 16:05:00+10:00,84.0 +2024-10-31 16:05:00+10:00,84.0 +2024-10-31 16:11:00+10:00,82.0 +2024-10-31 16:11:00+10:00,82.0 +2024-10-31 16:19:00+10:00,86.0 +2024-10-31 16:19:00+10:00,86.0 +2024-10-31 16:21:00+10:00,83.0 +2024-10-31 16:21:00+10:00,83.0 +2024-10-31 16:28:00+10:00,64.0 +2024-10-31 16:28:00+10:00,64.0 +2024-10-31 16:30:00+10:00,69.0 +2024-10-31 16:30:00+10:00,69.0 +2024-10-31 16:36:00+10:00,81.0 +2024-10-31 16:36:00+10:00,81.0 +2024-10-31 16:44:00+10:00,83.0 +2024-10-31 16:44:00+10:00,83.0 +2024-10-31 16:49:00+10:00,78.0 +2024-10-31 16:49:00+10:00,78.0 +2024-10-31 16:53:00+10:00,76.0 +2024-10-31 16:53:00+10:00,76.0 +2024-10-31 16:59:00+10:00,75.0 +2024-10-31 16:59:00+10:00,75.0 +2024-10-31 17:01:00+10:00,74.0 +2024-10-31 17:01:00+10:00,74.0 +2024-10-31 17:05:00+10:00,76.0 +2024-10-31 17:05:00+10:00,76.0 +2024-10-31 17:12:00+10:00,86.0 +2024-10-31 17:12:00+10:00,86.0 +2024-10-31 17:20:00+10:00,83.0 +2024-10-31 17:20:00+10:00,83.0 +2024-10-31 17:24:00+10:00,80.0 +2024-10-31 17:24:00+10:00,80.0 +2024-10-31 17:30:00+10:00,73.0 +2024-10-31 17:30:00+10:00,73.0 +2024-10-31 17:34:00+10:00,88.0 +2024-10-31 17:34:00+10:00,88.0 +2024-10-31 17:36:00+10:00,96.0 +2024-10-31 17:36:00+10:00,96.0 +2024-10-31 17:43:00+10:00,90.0 +2024-10-31 17:43:00+10:00,90.0 +2024-10-31 17:53:00+10:00,107.0 +2024-10-31 17:53:00+10:00,107.00000000000001 +2024-10-31 18:05:00+10:00,98.0 +2024-10-31 18:05:00+10:00,98.0 +2024-10-31 18:09:00+10:00,105.0 +2024-10-31 18:09:00+10:00,105.0 +2024-10-31 18:12:00+10:00,104.0 +2024-10-31 18:12:00+10:00,104.0 +2024-10-31 18:17:00+10:00,104.0 +2024-10-31 18:17:00+10:00,104.0 +2024-10-31 18:23:00+10:00,106.0 +2024-10-31 18:23:00+10:00,106.0 +2024-10-31 18:28:00+10:00,114.0 +2024-10-31 18:28:00+10:00,114.0 +2024-10-31 18:31:00+10:00,110.0 +2024-10-31 18:31:00+10:00,110.0 +2024-10-31 18:38:00+10:00,95.0 +2024-10-31 18:38:00+10:00,95.0 +2024-10-31 18:42:00+10:00,105.0 +2024-10-31 18:42:00+10:00,105.0 +2024-10-31 18:48:00+10:00,104.0 +2024-10-31 18:48:00+10:00,104.0 +2024-10-31 18:52:00+10:00,104.0 +2024-10-31 18:52:00+10:00,104.0 +2024-10-31 19:05:00+10:00,93.0 +2024-10-31 19:05:00+10:00,93.0 +2024-10-31 19:11:00+10:00,106.0 +2024-10-31 19:11:00+10:00,106.0 +2024-10-31 19:17:00+10:00,108.0 +2024-10-31 19:17:00+10:00,108.0 +2024-10-31 19:34:00+10:00,84.0 +2024-10-31 19:34:00+10:00,84.0 +2024-10-31 19:38:00+10:00,80.0 +2024-10-31 19:38:00+10:00,80.0 +2024-10-31 19:39:00+10:00,80.59856414794922 +2024-10-31 19:39:00+10:00,80.59856414794922 +2024-10-31 19:40:00+10:00,80.0 +2024-10-31 19:40:00+10:00,80.0 +2024-10-31 19:44:00+10:00,80.0 +2024-10-31 19:44:00+10:00,80.0 +2024-10-31 19:48:00+10:00,84.0 +2024-10-31 19:48:00+10:00,84.0 +2024-10-31 19:52:00+10:00,79.0 +2024-10-31 19:52:00+10:00,79.0 +2024-10-31 19:59:00+10:00,74.0 +2024-10-31 19:59:00+10:00,74.0 +2024-10-31 20:01:00+10:00,81.0 +2024-10-31 20:01:00+10:00,81.0 +2024-10-31 20:08:00+10:00,66.0 +2024-10-31 20:08:00+10:00,66.0 +2024-10-31 20:12:00+10:00,72.0 +2024-10-31 20:12:00+10:00,72.0 +2024-10-31 20:14:00+10:00,71.0 +2024-10-31 20:14:00+10:00,71.0 +2024-10-31 20:16:00+10:00,72.0 +2024-10-31 20:16:00+10:00,72.0 +2024-10-31 20:23:00+10:00,73.0 +2024-10-31 20:23:00+10:00,73.0 +2024-10-31 20:26:00+10:00,72.0 +2024-10-31 20:26:00+10:00,72.0 +2024-10-31 20:34:00+10:00,73.0 +2024-10-31 20:34:00+10:00,73.0 +2024-10-31 20:37:00+10:00,73.0 +2024-10-31 20:37:00+10:00,73.0 +2024-10-31 20:42:00+10:00,72.0 +2024-10-31 20:42:00+10:00,72.0 +2024-10-31 20:46:00+10:00,65.0 +2024-10-31 20:46:00+10:00,65.0 +2024-10-31 20:50:00+10:00,63.0 +2024-10-31 20:50:00+10:00,63.0 +2024-10-31 20:56:00+10:00,71.0 +2024-10-31 20:56:00+10:00,71.0 +2024-10-31 21:01:00+10:00,66.0 +2024-10-31 21:01:00+10:00,66.0 +2024-10-31 21:06:00+10:00,67.0 +2024-10-31 21:06:00+10:00,67.0 +2024-10-31 21:11:00+10:00,67.0 +2024-10-31 21:11:00+10:00,67.0 +2024-10-31 21:17:00+10:00,67.0 +2024-10-31 21:17:00+10:00,67.0 +2024-10-31 22:02:00+10:00,67.79761505126953 +2024-10-31 22:02:00+10:00,67.79761505126953 +2024-10-31 22:03:00+10:00,69.0 +2024-10-31 22:03:00+10:00,69.0 +2024-10-31 22:04:00+10:00,71.0 +2024-10-31 22:04:00+10:00,71.0 +2024-10-31 22:06:00+10:00,67.0 +2024-10-31 22:06:00+10:00,67.0 +2024-10-31 22:14:00+10:00,67.0 +2024-10-31 22:14:00+10:00,67.0 +2024-10-31 22:20:00+10:00,67.0 +2024-10-31 22:20:00+10:00,67.0 +2024-10-31 22:22:00+10:00,67.0 +2024-10-31 22:22:00+10:00,67.0 +2024-10-31 22:25:00+10:00,70.0 +2024-10-31 22:25:00+10:00,70.0 +2024-10-31 22:32:00+10:00,68.0 +2024-10-31 22:32:00+10:00,68.0 +2024-10-31 22:33:00+10:00,71.0 +2024-10-31 22:33:00+10:00,71.0 +2024-10-31 22:35:00+10:00,71.0 +2024-10-31 22:35:00+10:00,71.0 +2024-10-31 22:43:00+10:00,69.0 +2024-10-31 22:43:00+10:00,69.0 +2024-10-31 22:45:00+10:00,70.0 +2024-10-31 22:45:00+10:00,70.0 +2024-10-31 22:50:00+10:00,69.0 +2024-10-31 22:50:00+10:00,69.0 +2024-10-31 22:57:00+10:00,67.0 +2024-10-31 22:57:00+10:00,67.0 +2024-10-31 23:04:00+10:00,68.0 +2024-10-31 23:04:00+10:00,68.0 +2024-10-31 23:09:00+10:00,67.0 +2024-10-31 23:09:00+10:00,67.0 +2024-10-31 23:15:00+10:00,63.0 +2024-10-31 23:15:00+10:00,63.0 +2024-10-31 23:16:00+10:00,63.0 +2024-10-31 23:16:00+10:00,63.0 +2024-10-31 23:20:00+10:00,72.0 +2024-10-31 23:20:00+10:00,72.0 +2024-10-31 23:21:00+10:00,61.0 +2024-10-31 23:21:00+10:00,61.0 +2024-10-31 23:26:00+10:00,70.0 +2024-10-31 23:26:00+10:00,70.0 +2024-10-31 23:31:00+10:00,67.0 +2024-10-31 23:31:00+10:00,67.0 +2024-10-31 23:36:00+10:00,63.0 +2024-10-31 23:36:00+10:00,63.0 +2024-10-31 23:40:00+10:00,61.49477005004883 +2024-10-31 23:40:00+10:00,61.49477005004883 +2024-10-31 23:43:00+10:00,62.00000000000001 +2024-10-31 23:43:00+10:00,62.00000000000001 +2024-10-31 23:46:00+10:00,62.00000000000001 +2024-10-31 23:46:00+10:00,62.00000000000001 +2024-10-31 23:50:00+10:00,63.0 +2024-10-31 23:50:00+10:00,63.0 +2024-10-31 23:51:00+10:00,61.0 +2024-10-31 23:51:00+10:00,61.0 +2024-10-31 23:59:00+10:00,63.0 +2024-10-31 23:59:00+10:00,63.0 +2024-11-01 00:00:00+10:00,63.0 +2024-11-01 00:00:00+10:00,63.0 +2024-11-01 00:06:00+10:00,63.0 +2024-11-01 00:06:00+10:00,63.0 +2024-11-01 00:10:00+10:00,63.0 +2024-11-01 00:10:00+10:00,63.0 +2024-11-01 00:19:00+10:00,63.0 +2024-11-01 00:19:00+10:00,63.0 +2024-11-01 00:21:00+10:00,65.0 +2024-11-01 00:21:00+10:00,65.0 +2024-11-01 00:24:00+10:00,63.0 +2024-11-01 00:24:00+10:00,63.0 +2024-11-01 00:26:00+10:00,63.0 +2024-11-01 00:26:00+10:00,63.0 +2024-11-01 00:31:00+10:00,63.0 +2024-11-01 00:31:00+10:00,63.0 +2024-11-01 00:35:00+10:00,67.0 +2024-11-01 00:35:00+10:00,67.0 +2024-11-01 00:42:00+10:00,64.0 +2024-11-01 00:42:00+10:00,64.0 +2024-11-01 00:46:00+10:00,64.0 +2024-11-01 00:46:00+10:00,64.0 +2024-11-01 00:51:00+10:00,66.0 +2024-11-01 00:51:00+10:00,66.0 +2024-11-01 00:54:00+10:00,63.0 +2024-11-01 00:54:00+10:00,63.0 +2024-11-01 00:55:00+10:00,64.0 +2024-11-01 00:55:00+10:00,64.0 +2024-11-01 01:01:00+10:00,63.0 +2024-11-01 01:01:00+10:00,63.0 +2024-11-01 01:05:00+10:00,69.0 +2024-11-01 01:05:00+10:00,69.0 +2024-11-01 01:12:00+10:00,79.0 +2024-11-01 01:12:00+10:00,79.0 +2024-11-01 01:15:00+10:00,71.0 +2024-11-01 01:15:00+10:00,71.0 +2024-11-01 01:20:00+10:00,72.0 +2024-11-01 01:20:00+10:00,72.0 +2024-11-01 01:21:00+10:00,73.0 +2024-11-01 01:21:00+10:00,73.0 +2024-11-01 01:26:00+10:00,65.0 +2024-11-01 01:26:00+10:00,65.0 +2024-11-01 01:35:00+10:00,63.0 +2024-11-01 01:35:00+10:00,63.0 +2024-11-01 01:37:00+10:00,62.00000000000001 +2024-11-01 01:37:00+10:00,62.00000000000001 +2024-11-01 01:40:00+10:00,67.96148681640625 +2024-11-01 01:40:00+10:00,67.96148681640625 +2024-11-01 01:44:00+10:00,63.0 +2024-11-01 01:44:00+10:00,63.0 +2024-11-01 01:45:00+10:00,65.0 +2024-11-01 01:45:00+10:00,65.0 +2024-11-01 01:55:00+10:00,58.0 +2024-11-01 01:55:00+10:00,58.0 +2024-11-01 01:56:00+10:00,60.0 +2024-11-01 01:56:00+10:00,60.0 +2024-11-01 02:01:00+10:00,59.0 +2024-11-01 02:01:00+10:00,59.0 +2024-11-01 02:07:00+10:00,57.0 +2024-11-01 02:07:00+10:00,57.0 +2024-11-01 02:10:00+10:00,54.0 +2024-11-01 02:10:00+10:00,54.0 +2024-11-01 02:16:00+10:00,57.0 +2024-11-01 02:16:00+10:00,57.0 +2024-11-01 02:21:00+10:00,55.0 +2024-11-01 02:21:00+10:00,55.0 +2024-11-01 02:28:00+10:00,58.0 +2024-11-01 02:28:00+10:00,58.0 +2024-11-01 02:30:00+10:00,58.0 +2024-11-01 02:30:00+10:00,58.0 +2024-11-01 02:36:00+10:00,58.0 +2024-11-01 02:36:00+10:00,58.0 +2024-11-01 02:41:00+10:00,52.0 +2024-11-01 02:41:00+10:00,52.0 +2024-11-01 02:45:00+10:00,64.0 +2024-11-01 02:45:00+10:00,64.0 +2024-11-01 02:46:00+10:00,59.0 +2024-11-01 02:46:00+10:00,59.0 +2024-11-01 02:53:00+10:00,55.0 +2024-11-01 02:53:00+10:00,55.0 +2024-11-01 02:55:00+10:00,55.0 +2024-11-01 02:55:00+10:00,55.0 +2024-11-01 03:00:00+10:00,56.0 +2024-11-01 03:00:00+10:00,56.0 +2024-11-01 03:10:00+10:00,58.0 +2024-11-01 03:10:00+10:00,58.0 +2024-11-01 03:11:00+10:00,58.0 +2024-11-01 03:11:00+10:00,58.0 +2024-11-01 03:17:00+10:00,54.0 +2024-11-01 03:17:00+10:00,54.0 +2024-11-01 03:20:00+10:00,52.0 +2024-11-01 03:20:00+10:00,52.0 +2024-11-01 03:27:00+10:00,52.0 +2024-11-01 03:27:00+10:00,52.0 +2024-11-01 03:30:00+10:00,60.0 +2024-11-01 03:30:00+10:00,60.0 +2024-11-01 03:35:00+10:00,56.0 +2024-11-01 03:35:00+10:00,56.0 +2024-11-01 03:41:00+10:00,59.18503379821777 +2024-11-01 03:41:00+10:00,59.18503379821777 +2024-11-01 03:48:00+10:00,58.0 +2024-11-01 03:48:00+10:00,58.0 +2024-11-01 03:52:00+10:00,63.0 +2024-11-01 03:52:00+10:00,63.0 +2024-11-01 03:54:00+10:00,56.0 +2024-11-01 03:54:00+10:00,56.0 +2024-11-01 04:00:00+10:00,54.0 +2024-11-01 04:00:00+10:00,54.0 +2024-11-01 04:05:00+10:00,53.0 +2024-11-01 04:05:00+10:00,53.0 +2024-11-01 04:07:00+10:00,54.0 +2024-11-01 04:07:00+10:00,54.0 +2024-11-01 04:12:00+10:00,55.0 +2024-11-01 04:12:00+10:00,55.0 +2024-11-01 04:19:00+10:00,54.0 +2024-11-01 04:19:00+10:00,54.0 +2024-11-01 04:22:00+10:00,54.0 +2024-11-01 04:22:00+10:00,54.0 +2024-11-01 04:24:00+10:00,51.0 +2024-11-01 04:24:00+10:00,51.0 +2024-11-01 04:26:00+10:00,54.0 +2024-11-01 04:26:00+10:00,54.0 +2024-11-01 04:32:00+10:00,55.0 +2024-11-01 04:32:00+10:00,55.0 +2024-11-01 04:36:00+10:00,55.0 +2024-11-01 04:36:00+10:00,55.0 +2024-11-01 04:44:00+10:00,55.0 +2024-11-01 04:44:00+10:00,55.0 +2024-11-01 04:47:00+10:00,54.0 +2024-11-01 04:47:00+10:00,54.0 +2024-11-01 04:54:00+10:00,54.0 +2024-11-01 04:54:00+10:00,54.0 +2024-11-01 04:56:00+10:00,46.00000000000001 +2024-11-01 04:56:00+10:00,46.00000000000001 +2024-11-01 05:00:00+10:00,52.0 +2024-11-01 05:00:00+10:00,52.0 +2024-11-01 05:07:00+10:00,55.0 +2024-11-01 05:07:00+10:00,55.0 +2024-11-01 05:13:00+10:00,54.0 +2024-11-01 05:13:00+10:00,54.0 +2024-11-01 05:20:00+10:00,54.0 +2024-11-01 05:20:00+10:00,54.0 +2024-11-01 05:23:00+10:00,55.0 +2024-11-01 05:23:00+10:00,55.0 +2024-11-01 05:24:00+10:00,58.0 +2024-11-01 05:24:00+10:00,58.0 +2024-11-01 05:26:00+10:00,53.0 +2024-11-01 05:26:00+10:00,53.0 +2024-11-01 05:33:00+10:00,52.0 +2024-11-01 05:33:00+10:00,52.0 +2024-11-01 05:38:00+10:00,59.0 +2024-11-01 05:38:00+10:00,59.0 +2024-11-01 05:40:00+10:00,57.72718620300293 +2024-11-01 05:40:00+10:00,57.72718620300293 +2024-11-01 05:49:00+10:00,89.0 +2024-11-01 05:49:00+10:00,89.0 +2024-11-01 05:55:00+10:00,66.0 +2024-11-01 05:55:00+10:00,66.0 +2024-11-01 06:00:00+10:00,70.0 +2024-11-01 06:00:00+10:00,70.0 +2024-11-01 06:01:00+10:00,75.0 +2024-11-01 06:01:00+10:00,75.0 +2024-11-01 06:17:00+10:00,80.0 +2024-11-01 06:17:00+10:00,80.0 +2024-11-01 06:18:00+10:00,91.0 +2024-11-01 06:18:00+10:00,91.0 +2024-11-01 06:30:00+10:00,92.00000000000001 +2024-11-01 06:30:00+10:00,92.0 +2024-11-01 06:34:00+10:00,75.0 +2024-11-01 06:34:00+10:00,75.0 +2024-11-01 06:42:00+10:00,75.0 +2024-11-01 06:42:00+10:00,75.0 +2024-11-01 06:54:00+10:00,70.0 +2024-11-01 06:54:00+10:00,70.0 +2024-11-01 07:08:00+10:00,78.0 +2024-11-01 07:08:00+10:00,78.0 +2024-11-01 07:14:00+10:00,76.0 +2024-11-01 07:14:00+10:00,76.0 +2024-11-01 07:15:00+10:00,75.0 +2024-11-01 07:15:00+10:00,75.0 +2024-11-01 07:24:00+10:00,86.0 +2024-11-01 07:24:00+10:00,86.0 +2024-11-01 07:26:00+10:00,73.0 +2024-11-01 07:26:00+10:00,73.0 +2024-11-01 07:32:00+10:00,78.0 +2024-11-01 07:32:00+10:00,78.0 +2024-11-01 07:38:00+10:00,122.0 +2024-11-01 07:38:00+10:00,122.0 +2024-11-01 07:41:00+10:00,83.0 +2024-11-01 07:41:00+10:00,83.0 +2024-11-01 07:42:00+10:00,86.53489571637027 +2024-11-01 07:42:00+10:00,86.53489571637027 +2024-11-01 07:43:00+10:00,87.83500000079275 +2024-11-01 07:43:00+10:00,87.83500000079275 +2024-11-01 07:44:00+10:00,86.04040404707544 +2024-11-01 07:44:00+10:00,86.04040404707544 +2024-11-01 07:45:00+10:00,85.80357142857143 +2024-11-01 07:45:00+10:00,85.80357142857143 +2024-11-01 07:48:00+10:00,90.0 +2024-11-01 07:48:00+10:00,90.0 +2024-11-01 07:49:00+10:00,81.0 +2024-11-01 07:49:00+10:00,81.0 +2024-11-01 07:51:00+10:00,86.0 +2024-11-01 07:51:00+10:00,86.0 +2024-11-01 07:52:00+10:00,86.78947368421052 +2024-11-01 07:52:00+10:00,86.78947368421052 +2024-11-01 07:53:00+10:00,89.27083333333334 +2024-11-01 07:53:00+10:00,89.27083333333334 +2024-11-01 07:54:00+10:00,87.69801980198021 +2024-11-01 07:54:00+10:00,87.69801980198021 +2024-11-01 07:55:00+10:00,87.4451219512195 +2024-11-01 07:55:00+10:00,87.4451219512195 +2024-11-01 07:58:00+10:00,91.0775075987842 +2024-11-01 07:58:00+10:00,91.0775075987842 +2024-11-01 07:59:00+10:00,88.5 +2024-11-01 07:59:00+10:00,88.5 +2024-11-01 08:00:00+10:00,88.44642857142857 +2024-11-01 08:00:00+10:00,88.44642857142857 +2024-11-01 08:01:00+10:00,86.30392156862744 +2024-11-01 08:01:00+10:00,86.30392156862744 +2024-11-01 08:02:00+10:00,87.87244897959184 +2024-11-01 08:02:00+10:00,87.87244897959184 +2024-11-01 08:03:00+10:00,87.87912087912089 +2024-11-01 08:03:00+10:00,87.87912087912089 +2024-11-01 08:04:00+10:00,90.38192771084337 +2024-11-01 08:04:00+10:00,90.38192771084336 +2024-11-01 08:05:00+10:00,86.19101123595505 +2024-11-01 08:05:00+10:00,86.19101123595505 +2024-11-01 08:06:00+10:00,101.0 +2024-11-01 08:06:00+10:00,101.0 +2024-11-01 08:12:00+10:00,101.0 +2024-11-01 08:12:00+10:00,101.0 +2024-11-01 08:15:00+10:00,113.37068965517241 +2024-11-01 08:15:00+10:00,113.3706896551724 +2024-11-01 08:16:00+10:00,115.64705882352942 +2024-11-01 08:16:00+10:00,115.64705882352942 +2024-11-01 08:20:00+10:00,91.0 +2024-11-01 08:20:00+10:00,91.0 +2024-11-01 08:23:00+10:00,93.0 +2024-11-01 08:23:00+10:00,93.0 +2024-11-01 08:28:00+10:00,100.0 +2024-11-01 08:28:00+10:00,100.0 +2024-11-01 08:40:00+10:00,78.0 +2024-11-01 08:40:00+10:00,78.0 +2024-11-01 08:43:00+10:00,86.0 +2024-11-01 08:43:00+10:00,86.0 +2024-11-01 08:48:00+10:00,67.0 +2024-11-01 08:48:00+10:00,67.0 +2024-11-01 08:55:00+10:00,66.25201034545897 +2024-11-01 08:55:00+10:00,66.25201034545897 +2024-11-01 08:56:00+10:00,68.0 +2024-11-01 08:56:00+10:00,68.0 +2024-11-01 09:03:00+10:00,76.0 +2024-11-01 09:03:00+10:00,76.0 +2024-11-01 09:08:00+10:00,62.00000000000001 +2024-11-01 09:08:00+10:00,62.00000000000001 +2024-11-01 09:12:00+10:00,67.0 +2024-11-01 09:12:00+10:00,67.0 +2024-11-01 09:20:00+10:00,65.0 +2024-11-01 09:20:00+10:00,65.0 +2024-11-01 09:25:00+10:00,71.0 +2024-11-01 09:25:00+10:00,71.0 +2024-11-01 09:30:00+10:00,65.50000000000001 +2024-11-01 09:30:00+10:00,65.50000000000001 +2024-11-01 09:31:00+10:00,69.0 +2024-11-01 09:31:00+10:00,69.0 +2024-11-01 09:40:00+10:00,63.0 +2024-11-01 09:40:00+10:00,63.0 +2024-11-01 09:42:00+10:00,66.0 +2024-11-01 09:42:00+10:00,66.0 +2024-11-01 09:50:00+10:00,70.0 +2024-11-01 09:50:00+10:00,70.0 +2024-11-01 09:52:00+10:00,71.0 +2024-11-01 09:52:00+10:00,71.0 +2024-11-01 09:59:00+10:00,69.0 +2024-11-01 09:59:00+10:00,69.0 +2024-11-01 10:04:00+10:00,62.00000000000001 +2024-11-01 10:04:00+10:00,62.00000000000001 +2024-11-01 10:06:00+10:00,64.0 +2024-11-01 10:06:00+10:00,64.0 +2024-11-01 10:15:00+10:00,63.0 +2024-11-01 10:15:00+10:00,63.0 +2024-11-01 10:18:00+10:00,66.0 +2024-11-01 10:18:00+10:00,66.0 +2024-11-01 10:22:00+10:00,66.0 +2024-11-01 10:22:00+10:00,66.0 +2024-11-01 10:28:00+10:00,66.0 +2024-11-01 10:28:00+10:00,66.0 +2024-11-01 10:35:00+10:00,66.0 +2024-11-01 10:35:00+10:00,66.0 +2024-11-01 10:37:00+10:00,63.0 +2024-11-01 10:37:00+10:00,63.0 +2024-11-01 10:41:00+10:00,61.0 +2024-11-01 10:41:00+10:00,61.0 +2024-11-01 10:46:00+10:00,62.00000000000001 +2024-11-01 10:46:00+10:00,62.00000000000001 +2024-11-01 10:51:00+10:00,60.0 +2024-11-01 10:51:00+10:00,60.0 +2024-11-01 10:58:00+10:00,56.0 +2024-11-01 10:58:00+10:00,56.0 +2024-11-01 11:05:00+10:00,63.0 +2024-11-01 11:05:00+10:00,63.0 +2024-11-01 11:07:00+10:00,62.00000000000001 +2024-11-01 11:07:00+10:00,62.00000000000001 +2024-11-01 11:13:00+10:00,56.0 +2024-11-01 11:13:00+10:00,56.0 +2024-11-01 11:15:00+10:00,61.0 +2024-11-01 11:15:00+10:00,61.0 +2024-11-01 11:23:00+10:00,60.0 +2024-11-01 11:23:00+10:00,60.0 +2024-11-01 11:26:00+10:00,57.0 +2024-11-01 11:26:00+10:00,57.0 +2024-11-01 11:34:00+10:00,58.0 +2024-11-01 11:34:00+10:00,58.0 +2024-11-01 11:40:00+10:00,61.0 +2024-11-01 11:40:00+10:00,61.0 +2024-11-01 11:41:00+10:00,60.0 +2024-11-01 11:41:00+10:00,60.0 +2024-11-01 11:49:00+10:00,56.0 +2024-11-01 11:49:00+10:00,56.0 +2024-11-01 11:51:00+10:00,56.0 +2024-11-01 11:51:00+10:00,56.0 +2024-11-01 11:55:00+10:00,56.0 +2024-11-01 11:55:00+10:00,56.0 +2024-11-01 12:05:00+10:00,55.0 +2024-11-01 12:05:00+10:00,55.0 +2024-11-01 12:08:00+10:00,55.0 +2024-11-01 12:08:00+10:00,55.0 +2024-11-01 12:14:00+10:00,59.0 +2024-11-01 12:14:00+10:00,59.0 +2024-11-01 12:18:00+10:00,63.0 +2024-11-01 12:18:00+10:00,63.0 +2024-11-01 12:21:00+10:00,63.0 +2024-11-01 12:21:00+10:00,63.0 +2024-11-01 12:27:00+10:00,65.0 +2024-11-01 12:27:00+10:00,65.0 +2024-11-01 12:31:00+10:00,59.0 +2024-11-01 12:31:00+10:00,59.0 +2024-11-01 12:37:00+10:00,61.0 +2024-11-01 12:37:00+10:00,61.0 +2024-11-01 12:41:00+10:00,64.0 +2024-11-01 12:41:00+10:00,64.0 +2024-11-01 12:49:00+10:00,57.0 +2024-11-01 12:49:00+10:00,57.0 +2024-11-01 12:55:00+10:00,53.0 +2024-11-01 12:55:00+10:00,53.0 +2024-11-01 12:58:00+10:00,59.0 +2024-11-01 12:58:00+10:00,59.0 +2024-11-01 13:02:00+10:00,63.0 +2024-11-01 13:02:00+10:00,63.0 +2024-11-01 13:10:00+10:00,71.0 +2024-11-01 13:10:00+10:00,71.0 +2024-11-01 13:11:00+10:00,72.68331146240233 +2024-11-01 13:11:00+10:00,72.68331146240233 +2024-11-01 13:17:00+10:00,74.0 +2024-11-01 13:17:00+10:00,74.0 +2024-11-01 13:42:00+10:00,91.0 +2024-11-01 13:42:00+10:00,91.0 +2024-11-01 14:13:00+10:00,100.77631578947367 +2024-11-01 14:13:00+10:00,100.77631578947368 +2024-11-01 14:14:00+10:00,92.49999999999999 +2024-11-01 14:14:00+10:00,92.5 +2024-11-01 14:24:00+10:00,60.0 +2024-11-01 14:24:00+10:00,60.0 +2024-11-01 14:27:00+10:00,60.0 +2024-11-01 14:27:00+10:00,60.0 +2024-11-01 14:33:00+10:00,61.0 +2024-11-01 14:33:00+10:00,61.0 +2024-11-01 14:36:00+10:00,65.0 +2024-11-01 14:36:00+10:00,65.0 +2024-11-01 14:41:00+10:00,79.0 +2024-11-01 14:41:00+10:00,79.0 +2024-11-01 14:59:00+10:00,88.0 +2024-11-01 14:59:00+10:00,88.0 +2024-11-01 15:03:00+10:00,84.0 +2024-11-01 15:03:00+10:00,84.0 +2024-11-01 15:10:00+10:00,84.0 +2024-11-01 15:10:00+10:00,84.0 +2024-11-01 15:13:00+10:00,84.0 +2024-11-01 15:13:00+10:00,84.0 +2024-11-01 15:20:00+10:00,80.78771591186523 +2024-11-01 15:20:00+10:00,80.78771591186523 +2024-11-01 15:25:00+10:00,81.0 +2024-11-01 15:25:00+10:00,81.0 +2024-11-01 15:28:00+10:00,75.0 +2024-11-01 15:28:00+10:00,75.0 +2024-11-01 15:30:00+10:00,76.0 +2024-11-01 15:30:00+10:00,76.0 +2024-11-01 15:39:00+10:00,72.0 +2024-11-01 15:39:00+10:00,72.0 +2024-11-01 15:45:00+10:00,73.0 +2024-11-01 15:45:00+10:00,73.0 +2024-11-01 15:47:00+10:00,72.0 +2024-11-01 15:47:00+10:00,72.0 +2024-11-01 15:50:00+10:00,73.01448440551758 +2024-11-01 15:50:00+10:00,73.01448440551758 +2024-11-01 15:52:00+10:00,73.0 +2024-11-01 15:52:00+10:00,73.0 +2024-11-01 15:55:00+10:00,67.0 +2024-11-01 15:55:00+10:00,67.0 +2024-11-01 16:03:00+10:00,84.0 +2024-11-01 16:03:00+10:00,84.0 +2024-11-01 16:06:00+10:00,74.0 +2024-11-01 16:06:00+10:00,74.0 +2024-11-01 16:23:00+10:00,61.0 +2024-11-01 16:23:00+10:00,61.0 +2024-11-01 16:29:00+10:00,63.0 +2024-11-01 16:29:00+10:00,63.0 +2024-11-01 16:35:00+10:00,72.0 +2024-11-01 16:35:00+10:00,72.0 +2024-11-01 16:38:00+10:00,64.0 +2024-11-01 16:38:00+10:00,64.0 +2024-11-01 16:44:00+10:00,63.0 +2024-11-01 16:44:00+10:00,63.0 +2024-11-01 16:45:00+10:00,59.0 +2024-11-01 16:45:00+10:00,59.0 +2024-11-01 16:52:00+10:00,79.0 +2024-11-01 16:52:00+10:00,79.0 +2024-11-01 16:57:00+10:00,64.0 +2024-11-01 16:57:00+10:00,64.0 +2024-11-01 17:05:00+10:00,59.0 +2024-11-01 17:05:00+10:00,59.0 +2024-11-01 17:13:00+10:00,76.0 +2024-11-01 17:13:00+10:00,76.0 +2024-11-01 17:14:00+10:00,75.72 +2024-11-01 17:14:00+10:00,75.72 +2024-11-01 17:15:00+10:00,78.96534653225198 +2024-11-01 17:15:00+10:00,78.96534653225198 +2024-11-01 17:16:00+10:00,84.30102040816328 +2024-11-01 17:16:00+10:00,84.30102040816328 +2024-11-01 17:17:00+10:00,91.11668757947602 +2024-11-01 17:17:00+10:00,91.11668757947602 +2024-11-01 17:18:00+10:00,82.37113402061857 +2024-11-01 17:18:00+10:00,82.37113402061857 +2024-11-01 17:19:00+10:00,80.15217391304348 +2024-11-01 17:19:00+10:00,80.15217391304348 +2024-11-01 17:20:00+10:00,78.75490196078432 +2024-11-01 17:20:00+10:00,78.75490196078432 +2024-11-01 17:21:00+10:00,79.68 +2024-11-01 17:21:00+10:00,79.68 +2024-11-01 17:22:00+10:00,78.0 +2024-11-01 17:22:00+10:00,78.0 +2024-11-01 17:23:00+10:00,79.94736841940154 +2024-11-01 17:23:00+10:00,79.94736841940154 +2024-11-01 17:24:00+10:00,80.34946236475753 +2024-11-01 17:24:00+10:00,80.34946236475753 +2024-11-01 17:25:00+10:00,79.7878787878788 +2024-11-01 17:25:00+10:00,79.7878787878788 +2024-11-01 17:26:00+10:00,82.4320987654321 +2024-11-01 17:26:00+10:00,82.4320987654321 +2024-11-01 17:27:00+10:00,88.77777777958397 +2024-11-01 17:27:00+10:00,88.77777777958397 +2024-11-01 17:28:00+10:00,91.35643564356435 +2024-11-01 17:28:00+10:00,91.35643564356435 +2024-11-01 17:29:00+10:00,93.6328125 +2024-11-01 17:29:00+10:00,93.6328125 +2024-11-01 17:32:00+10:00,99.0 +2024-11-01 17:32:00+10:00,99.0 +2024-11-01 17:33:00+10:00,99.48412698412699 +2024-11-01 17:33:00+10:00,99.484126984127 +2024-11-01 17:34:00+10:00,103.0 +2024-11-01 17:34:00+10:00,103.00000000000001 +2024-11-01 17:35:00+10:00,96.45333333333332 +2024-11-01 17:35:00+10:00,96.45333333333332 +2024-11-01 17:36:00+10:00,88.67021276595746 +2024-11-01 17:36:00+10:00,88.67021276595746 +2024-11-01 17:37:00+10:00,85.93827160493825 +2024-11-01 17:37:00+10:00,85.93827160493825 +2024-11-01 17:45:00+10:00,75.0 +2024-11-01 17:45:00+10:00,75.0 +2024-11-01 17:49:00+10:00,58.0 +2024-11-01 17:49:00+10:00,58.0 +2024-11-01 17:52:00+10:00,59.0 +2024-11-01 17:52:00+10:00,59.0 +2024-11-01 17:58:00+10:00,59.0 +2024-11-01 17:58:00+10:00,59.0 +2024-11-01 18:00:00+10:00,63.0 +2024-11-01 18:00:00+10:00,63.0 +2024-11-01 18:08:00+10:00,60.0 +2024-11-01 18:08:00+10:00,60.0 +2024-11-01 18:13:00+10:00,58.101131439208984 +2024-11-01 18:13:00+10:00,58.101131439208984 +2024-11-01 18:17:00+10:00,55.0 +2024-11-01 18:17:00+10:00,55.0 +2024-11-01 18:22:00+10:00,59.0 +2024-11-01 18:22:00+10:00,59.0 +2024-11-01 18:40:00+10:00,82.0 +2024-11-01 18:40:00+10:00,82.0 +2024-11-01 18:44:00+10:00,89.0 +2024-11-01 18:44:00+10:00,89.0 +2024-11-01 18:49:00+10:00,85.0 +2024-11-01 18:49:00+10:00,85.0 +2024-11-01 18:52:00+10:00,88.0 +2024-11-01 18:52:00+10:00,88.0 +2024-11-01 19:04:00+10:00,87.0 +2024-11-01 19:04:00+10:00,87.0 +2024-11-01 19:07:00+10:00,85.0 +2024-11-01 19:07:00+10:00,85.0 +2024-11-01 19:14:00+10:00,86.0 +2024-11-01 19:14:00+10:00,86.0 +2024-11-01 19:16:00+10:00,96.0 +2024-11-01 19:16:00+10:00,96.0 +2024-11-01 19:21:00+10:00,85.0 +2024-11-01 19:21:00+10:00,85.0 +2024-11-01 19:26:00+10:00,85.0 +2024-11-01 19:26:00+10:00,85.0 +2024-11-01 19:33:00+10:00,91.0 +2024-11-01 19:33:00+10:00,91.0 +2024-11-01 20:00:00+10:00,77.00000000000001 +2024-11-01 20:00:00+10:00,77.00000000000001 +2024-11-01 20:04:00+10:00,67.0 +2024-11-01 20:04:00+10:00,67.0 +2024-11-01 20:05:00+10:00,67.0 +2024-11-01 20:05:00+10:00,67.0 +2024-11-01 20:13:00+10:00,80.0 +2024-11-01 20:13:00+10:00,80.0 +2024-11-01 20:16:00+10:00,90.0 +2024-11-01 20:16:00+10:00,90.0 +2024-11-01 20:23:00+10:00,80.0 +2024-11-01 20:23:00+10:00,80.0 +2024-11-01 20:25:00+10:00,80.0 +2024-11-01 20:25:00+10:00,80.0 +2024-11-01 20:27:00+10:00,73.0 +2024-11-01 20:27:00+10:00,73.0 +2024-11-01 20:33:00+10:00,72.0 +2024-11-01 20:33:00+10:00,72.0 +2024-11-01 20:35:00+10:00,73.0 +2024-11-01 20:35:00+10:00,73.0 +2024-11-01 20:40:00+10:00,80.0 +2024-11-01 20:40:00+10:00,80.0 +2024-11-01 20:45:00+10:00,73.0 +2024-11-01 20:45:00+10:00,73.0 +2024-11-01 20:51:00+10:00,73.0 +2024-11-01 20:51:00+10:00,73.0 +2024-11-01 20:59:00+10:00,70.0 +2024-11-01 20:59:00+10:00,70.0 +2024-11-01 21:04:00+10:00,102.0 +2024-11-01 21:04:00+10:00,102.0 +2024-11-01 21:33:00+10:00,68.0 +2024-11-01 21:33:00+10:00,68.0 +2024-11-01 21:34:00+10:00,75.0 +2024-11-01 21:34:00+10:00,75.0 +2024-11-01 21:37:00+10:00,74.0 +2024-11-01 21:37:00+10:00,74.0 +2024-11-01 21:40:00+10:00,76.03330993652344 +2024-11-01 21:40:00+10:00,76.03330993652344 +2024-11-01 21:41:00+10:00,72.0 +2024-11-01 21:41:00+10:00,72.0 +2024-11-01 21:48:00+10:00,77.00000000000001 +2024-11-01 21:48:00+10:00,77.00000000000001 +2024-11-01 21:51:00+10:00,71.0 +2024-11-01 21:51:00+10:00,71.0 +2024-11-01 21:55:00+10:00,73.0 +2024-11-01 21:55:00+10:00,73.0 +2024-11-01 22:02:00+10:00,72.0 +2024-11-01 22:02:00+10:00,72.0 +2024-11-01 22:04:00+10:00,73.0 +2024-11-01 22:04:00+10:00,73.0 +2024-11-01 22:05:00+10:00,73.0 +2024-11-01 22:05:00+10:00,73.0 +2024-11-01 22:13:00+10:00,79.0 +2024-11-01 22:13:00+10:00,79.0 +2024-11-01 22:18:00+10:00,80.0 +2024-11-01 22:18:00+10:00,80.0 +2024-11-01 22:20:00+10:00,79.0 +2024-11-01 22:20:00+10:00,79.0 +2024-11-01 22:28:00+10:00,72.0 +2024-11-01 22:28:00+10:00,72.0 +2024-11-01 22:31:00+10:00,71.0 +2024-11-01 22:31:00+10:00,71.0 +2024-11-01 22:34:00+10:00,69.0 +2024-11-01 22:34:00+10:00,69.0 +2024-11-01 22:39:00+10:00,68.0 +2024-11-01 22:39:00+10:00,68.0 +2024-11-01 22:45:00+10:00,64.0 +2024-11-01 22:45:00+10:00,64.0 +2024-11-01 22:47:00+10:00,63.0 +2024-11-01 22:47:00+10:00,63.0 +2024-11-01 22:50:00+10:00,64.0 +2024-11-01 22:50:00+10:00,64.0 +2024-11-01 22:56:00+10:00,63.0 +2024-11-01 22:56:00+10:00,63.0 +2024-11-01 23:00:00+10:00,64.0 +2024-11-01 23:00:00+10:00,64.0 +2024-11-01 23:04:00+10:00,60.0 +2024-11-01 23:04:00+10:00,60.0 +2024-11-01 23:06:00+10:00,62.00000000000001 +2024-11-01 23:06:00+10:00,62.00000000000001 +2024-11-01 23:12:00+10:00,55.0 +2024-11-01 23:12:00+10:00,55.0 +2024-11-01 23:17:00+10:00,64.0 +2024-11-01 23:17:00+10:00,64.0 +2024-11-01 23:20:00+10:00,66.0 +2024-11-01 23:20:00+10:00,66.0 +2024-11-01 23:26:00+10:00,67.0 +2024-11-01 23:26:00+10:00,67.0 +2024-11-01 23:31:00+10:00,64.0 +2024-11-01 23:31:00+10:00,64.0 +2024-11-01 23:37:00+10:00,63.0 +2024-11-01 23:37:00+10:00,63.0 +2024-11-01 23:39:00+10:00,61.0 +2024-11-01 23:39:00+10:00,61.0 +2024-11-01 23:40:00+10:00,60.74302291870117 +2024-11-01 23:40:00+10:00,60.74302291870117 +2024-11-01 23:48:00+10:00,60.0 +2024-11-01 23:48:00+10:00,60.0 +2024-11-01 23:53:00+10:00,57.0 +2024-11-01 23:53:00+10:00,57.0 +2024-11-01 23:58:00+10:00,58.0 +2024-11-01 23:58:00+10:00,58.0 +2024-11-02 00:02:00+10:00,58.0 +2024-11-02 00:02:00+10:00,58.0 +2024-11-02 00:07:00+10:00,59.0 +2024-11-02 00:07:00+10:00,59.0 +2024-11-02 00:11:00+10:00,59.0 +2024-11-02 00:11:00+10:00,59.0 +2024-11-02 00:19:00+10:00,58.0 +2024-11-02 00:19:00+10:00,58.0 +2024-11-02 00:20:00+10:00,59.0 +2024-11-02 00:20:00+10:00,59.0 +2024-11-02 00:25:00+10:00,58.0 +2024-11-02 00:25:00+10:00,58.0 +2024-11-02 00:27:00+10:00,52.0 +2024-11-02 00:27:00+10:00,52.0 +2024-11-02 00:33:00+10:00,58.0 +2024-11-02 00:33:00+10:00,58.0 +2024-11-02 00:35:00+10:00,56.0 +2024-11-02 00:35:00+10:00,56.0 +2024-11-02 00:43:00+10:00,55.0 +2024-11-02 00:43:00+10:00,55.0 +2024-11-02 00:48:00+10:00,55.0 +2024-11-02 00:48:00+10:00,55.0 +2024-11-02 00:54:00+10:00,55.0 +2024-11-02 00:54:00+10:00,55.0 +2024-11-02 00:56:00+10:00,56.0 +2024-11-02 00:56:00+10:00,56.0 +2024-11-02 00:57:00+10:00,55.0 +2024-11-02 00:57:00+10:00,55.0 +2024-11-02 01:03:00+10:00,57.0 +2024-11-02 01:03:00+10:00,57.0 +2024-11-02 01:05:00+10:00,58.0 +2024-11-02 01:05:00+10:00,58.0 +2024-11-02 01:11:00+10:00,56.0 +2024-11-02 01:11:00+10:00,56.0 +2024-11-02 01:20:00+10:00,59.0 +2024-11-02 01:20:00+10:00,59.0 +2024-11-02 01:23:00+10:00,58.0 +2024-11-02 01:23:00+10:00,58.0 +2024-11-02 01:25:00+10:00,60.0 +2024-11-02 01:25:00+10:00,60.0 +2024-11-02 01:30:00+10:00,61.0 +2024-11-02 01:30:00+10:00,61.0 +2024-11-02 01:36:00+10:00,57.0 +2024-11-02 01:36:00+10:00,57.0 +2024-11-02 01:40:00+10:00,61.018987655639656 +2024-11-02 01:40:00+10:00,61.018987655639656 +2024-11-02 01:43:00+10:00,64.0 +2024-11-02 01:43:00+10:00,64.0 +2024-11-02 01:49:00+10:00,64.0 +2024-11-02 01:49:00+10:00,64.0 +2024-11-02 01:55:00+10:00,54.0 +2024-11-02 01:55:00+10:00,54.0 +2024-11-02 01:56:00+10:00,53.0 +2024-11-02 01:56:00+10:00,53.0 +2024-11-02 02:00:00+10:00,54.0 +2024-11-02 02:00:00+10:00,54.0 +2024-11-02 02:06:00+10:00,55.0 +2024-11-02 02:06:00+10:00,55.0 +2024-11-02 02:08:00+10:00,55.0 +2024-11-02 02:08:00+10:00,55.0 +2024-11-02 02:13:00+10:00,53.0 +2024-11-02 02:13:00+10:00,53.0 +2024-11-02 02:17:00+10:00,54.0 +2024-11-02 02:17:00+10:00,54.0 +2024-11-02 02:21:00+10:00,55.0 +2024-11-02 02:21:00+10:00,55.0 +2024-11-02 02:25:00+10:00,55.0 +2024-11-02 02:25:00+10:00,55.0 +2024-11-02 02:32:00+10:00,56.0 +2024-11-02 02:32:00+10:00,56.0 +2024-11-02 02:35:00+10:00,55.0 +2024-11-02 02:35:00+10:00,55.0 +2024-11-02 02:36:00+10:00,55.0 +2024-11-02 02:36:00+10:00,55.0 +2024-11-02 02:43:00+10:00,53.0 +2024-11-02 02:43:00+10:00,53.0 +2024-11-02 02:45:00+10:00,55.0 +2024-11-02 02:45:00+10:00,55.0 +2024-11-02 02:50:00+10:00,59.0 +2024-11-02 02:50:00+10:00,59.0 +2024-11-02 02:58:00+10:00,55.0 +2024-11-02 02:58:00+10:00,55.0 +2024-11-02 03:00:00+10:00,54.0 +2024-11-02 03:00:00+10:00,54.0 +2024-11-02 03:06:00+10:00,64.0 +2024-11-02 03:06:00+10:00,64.0 +2024-11-02 03:12:00+10:00,53.0 +2024-11-02 03:12:00+10:00,53.0 +2024-11-02 03:16:00+10:00,64.0 +2024-11-02 03:16:00+10:00,64.0 +2024-11-02 03:22:00+10:00,67.0 +2024-11-02 03:22:00+10:00,67.0 +2024-11-02 03:26:00+10:00,67.0 +2024-11-02 03:26:00+10:00,67.0 +2024-11-02 03:33:00+10:00,52.0 +2024-11-02 03:33:00+10:00,52.0 +2024-11-02 03:36:00+10:00,50.0 +2024-11-02 03:36:00+10:00,50.0 +2024-11-02 03:37:00+10:00,52.0 +2024-11-02 03:37:00+10:00,52.0 +2024-11-02 03:40:00+10:00,50.02891159057617 +2024-11-02 03:40:00+10:00,50.02891159057617 +2024-11-02 03:41:00+10:00,51.0 +2024-11-02 03:41:00+10:00,51.0 +2024-11-02 03:48:00+10:00,50.0 +2024-11-02 03:48:00+10:00,50.0 +2024-11-02 03:52:00+10:00,50.0 +2024-11-02 03:52:00+10:00,50.0 +2024-11-02 03:59:00+10:00,52.0 +2024-11-02 03:59:00+10:00,52.0 +2024-11-02 04:01:00+10:00,52.0 +2024-11-02 04:01:00+10:00,52.0 +2024-11-02 04:06:00+10:00,55.0 +2024-11-02 04:06:00+10:00,55.0 +2024-11-02 04:07:00+10:00,52.0 +2024-11-02 04:07:00+10:00,52.0 +2024-11-02 04:10:00+10:00,55.0 +2024-11-02 04:10:00+10:00,55.0 +2024-11-02 04:16:00+10:00,56.0 +2024-11-02 04:16:00+10:00,56.0 +2024-11-02 04:23:00+10:00,62.00000000000001 +2024-11-02 04:23:00+10:00,62.00000000000001 +2024-11-02 04:26:00+10:00,62.00000000000001 +2024-11-02 04:26:00+10:00,62.00000000000001 +2024-11-02 04:31:00+10:00,56.0 +2024-11-02 04:31:00+10:00,56.0 +2024-11-02 04:35:00+10:00,63.0 +2024-11-02 04:35:00+10:00,63.0 +2024-11-02 04:36:00+10:00,63.0 +2024-11-02 04:36:00+10:00,63.0 +2024-11-02 04:45:00+10:00,58.0 +2024-11-02 04:45:00+10:00,58.0 +2024-11-02 04:46:00+10:00,52.0 +2024-11-02 04:46:00+10:00,52.0 +2024-11-02 04:51:00+10:00,57.0 +2024-11-02 04:51:00+10:00,57.0 +2024-11-02 04:56:00+10:00,63.0 +2024-11-02 04:56:00+10:00,63.0 +2024-11-02 05:02:00+10:00,55.0 +2024-11-02 05:02:00+10:00,55.0 +2024-11-02 05:06:00+10:00,57.0 +2024-11-02 05:06:00+10:00,57.0 +2024-11-02 05:07:00+10:00,53.0 +2024-11-02 05:07:00+10:00,53.0 +2024-11-02 05:13:00+10:00,60.0 +2024-11-02 05:13:00+10:00,60.0 +2024-11-02 05:16:00+10:00,58.0 +2024-11-02 05:16:00+10:00,58.0 +2024-11-02 05:21:00+10:00,60.0 +2024-11-02 05:21:00+10:00,60.0 +2024-11-02 05:25:00+10:00,55.0 +2024-11-02 05:25:00+10:00,55.0 +2024-11-02 05:32:00+10:00,56.0 +2024-11-02 05:32:00+10:00,56.0 +2024-11-02 05:37:00+10:00,56.0 +2024-11-02 05:37:00+10:00,56.0 +2024-11-02 05:40:00+10:00,54.107303619384766 +2024-11-02 05:40:00+10:00,54.107303619384766 +2024-11-02 05:44:00+10:00,55.0 +2024-11-02 05:44:00+10:00,55.0 +2024-11-02 05:50:00+10:00,55.0 +2024-11-02 05:50:00+10:00,55.0 +2024-11-02 05:53:00+10:00,56.0 +2024-11-02 05:53:00+10:00,56.0 +2024-11-02 05:55:00+10:00,55.0 +2024-11-02 05:55:00+10:00,55.0 +2024-11-02 06:00:00+10:00,55.0 +2024-11-02 06:00:00+10:00,55.0 +2024-11-02 06:07:00+10:00,58.5 +2024-11-02 06:07:00+10:00,58.5 +2024-11-02 06:14:00+10:00,58.0 +2024-11-02 06:14:00+10:00,58.0 +2024-11-02 06:16:00+10:00,61.0 +2024-11-02 06:16:00+10:00,61.0 +2024-11-02 06:22:00+10:00,66.0 +2024-11-02 06:22:00+10:00,66.0 +2024-11-02 06:28:00+10:00,63.0 +2024-11-02 06:28:00+10:00,63.0 +2024-11-02 06:32:00+10:00,65.0 +2024-11-02 06:32:00+10:00,65.0 +2024-11-02 06:35:00+10:00,60.0 +2024-11-02 06:35:00+10:00,60.0 +2024-11-02 06:37:00+10:00,57.0 +2024-11-02 06:37:00+10:00,57.0 +2024-11-02 06:41:00+10:00,65.0 +2024-11-02 06:41:00+10:00,65.0 +2024-11-02 06:46:00+10:00,59.0 +2024-11-02 06:46:00+10:00,59.0 +2024-11-02 06:53:00+10:00,67.0 +2024-11-02 06:53:00+10:00,67.0 +2024-11-02 06:58:00+10:00,63.0 +2024-11-02 06:58:00+10:00,63.0 +2024-11-02 07:02:00+10:00,65.0 +2024-11-02 07:02:00+10:00,65.0 +2024-11-02 07:13:00+10:00,70.0 +2024-11-02 07:13:00+10:00,70.0 +2024-11-02 07:17:00+10:00,81.0 +2024-11-02 07:17:00+10:00,81.0 +2024-11-02 07:23:00+10:00,81.0 +2024-11-02 07:23:00+10:00,81.0 +2024-11-02 07:33:00+10:00,76.0 +2024-11-02 07:33:00+10:00,76.0 +2024-11-02 07:37:00+10:00,75.0 +2024-11-02 07:37:00+10:00,75.0 +2024-11-02 07:41:00+10:00,76.0 +2024-11-02 07:41:00+10:00,76.0 +2024-11-02 07:47:00+10:00,65.0 +2024-11-02 07:47:00+10:00,65.0 +2024-11-02 07:51:00+10:00,85.0 +2024-11-02 07:51:00+10:00,85.0 +2024-11-02 07:59:00+10:00,70.0 +2024-11-02 07:59:00+10:00,70.0 +2024-11-02 08:02:00+10:00,70.0 +2024-11-02 08:02:00+10:00,70.0 +2024-11-02 08:06:00+10:00,75.0 +2024-11-02 08:06:00+10:00,75.0 +2024-11-02 08:10:00+10:00,75.0 +2024-11-02 08:10:00+10:00,75.0 +2024-11-02 08:14:00+10:00,84.0 +2024-11-02 08:14:00+10:00,84.0 +2024-11-02 08:17:00+10:00,154.00000000000003 +2024-11-02 08:17:00+10:00,154.00000000000003 +2024-11-02 08:18:00+10:00,93.67777777777778 +2024-11-02 08:18:00+10:00,93.67777777777778 +2024-11-02 08:21:00+10:00,70.0 +2024-11-02 08:21:00+10:00,70.0 +2024-11-02 08:23:00+10:00,77.00000000000001 +2024-11-02 08:23:00+10:00,77.00000000000001 +2024-11-02 08:25:00+10:00,84.0 +2024-11-02 08:25:00+10:00,84.0 +2024-11-02 08:26:00+10:00,83.48275862068967 +2024-11-02 08:26:00+10:00,83.48275862068967 +2024-11-02 08:27:00+10:00,83.11581568298222 +2024-11-02 08:27:00+10:00,83.11581568298222 +2024-11-02 08:28:00+10:00,79.73737373737373 +2024-11-02 08:28:00+10:00,79.73737373737373 +2024-11-02 08:29:00+10:00,86.46249999999999 +2024-11-02 08:29:00+10:00,86.46249999999999 +2024-11-02 08:32:00+10:00,90.0 +2024-11-02 08:32:00+10:00,90.0 +2024-11-02 08:33:00+10:00,97.98666666666666 +2024-11-02 08:33:00+10:00,97.98666666666666 +2024-11-02 08:34:00+10:00,89.7857142857143 +2024-11-02 08:34:00+10:00,89.7857142857143 +2024-11-02 08:42:00+10:00,98.0 +2024-11-02 08:42:00+10:00,98.00000000000001 +2024-11-02 08:54:00+10:00,64.0 +2024-11-02 08:54:00+10:00,64.0 +2024-11-02 08:57:00+10:00,65.0 +2024-11-02 08:57:00+10:00,65.0 +2024-11-02 09:04:00+10:00,69.0 +2024-11-02 09:04:00+10:00,69.0 +2024-11-02 09:05:00+10:00,67.0 +2024-11-02 09:05:00+10:00,67.0 +2024-11-02 09:17:00+10:00,87.0 +2024-11-02 09:17:00+10:00,87.0 +2024-11-02 09:21:00+10:00,73.0 +2024-11-02 09:21:00+10:00,73.0 +2024-11-02 09:26:00+10:00,92.0 +2024-11-02 09:26:00+10:00,92.00000000000001 +2024-11-02 09:31:00+10:00,86.0 +2024-11-02 09:31:00+10:00,86.0 +2024-11-02 09:43:00+10:00,92.00000000000001 +2024-11-02 09:43:00+10:00,92.0 +2024-11-02 09:50:00+10:00,92.00000000000001 +2024-11-02 09:50:00+10:00,92.0 +2024-11-02 09:51:00+10:00,105.0 +2024-11-02 09:51:00+10:00,105.0 +2024-11-02 10:00:00+10:00,94.0 +2024-11-02 10:00:00+10:00,94.0 +2024-11-02 10:14:00+10:00,96.0 +2024-11-02 10:14:00+10:00,96.0 +2024-11-02 10:16:00+10:00,98.0 +2024-11-02 10:16:00+10:00,98.0 +2024-11-02 10:20:00+10:00,106.0 +2024-11-02 10:20:00+10:00,106.0 +2024-11-02 10:29:00+10:00,88.0 +2024-11-02 10:29:00+10:00,88.0 +2024-11-02 10:32:00+10:00,98.0 +2024-11-02 10:32:00+10:00,98.0 +2024-11-02 10:35:00+10:00,100.0 +2024-11-02 10:35:00+10:00,100.0 +2024-11-02 10:39:00+10:00,87.0 +2024-11-02 10:39:00+10:00,87.0 +2024-11-02 10:45:00+10:00,85.0 +2024-11-02 10:45:00+10:00,85.0 +2024-11-02 10:47:00+10:00,87.0 +2024-11-02 10:47:00+10:00,87.0 +2024-11-02 10:53:00+10:00,85.0 +2024-11-02 10:53:00+10:00,85.0 +2024-11-02 10:57:00+10:00,86.0 +2024-11-02 10:57:00+10:00,86.0 +2024-11-02 11:01:00+10:00,81.0 +2024-11-02 11:01:00+10:00,81.0 +2024-11-02 11:07:00+10:00,79.0 +2024-11-02 11:07:00+10:00,79.0 +2024-11-02 11:08:00+10:00,79.0 +2024-11-02 11:08:00+10:00,79.0 +2024-11-02 11:11:00+10:00,77.00000000000001 +2024-11-02 11:11:00+10:00,77.00000000000001 +2024-11-02 11:15:00+10:00,104.0 +2024-11-02 11:15:00+10:00,104.0 +2024-11-02 11:22:00+10:00,95.0 +2024-11-02 11:22:00+10:00,95.0 +2024-11-02 12:01:00+10:00,70.0 +2024-11-02 12:01:00+10:00,70.0 +2024-11-02 12:15:00+10:00,114.348484844243 +2024-11-02 12:15:00+10:00,114.348484844243 +2024-11-02 12:24:00+10:00,72.0 +2024-11-02 12:24:00+10:00,72.0 +2024-11-02 12:27:00+10:00,73.0 +2024-11-02 12:27:00+10:00,73.0 +2024-11-02 12:32:00+10:00,74.26386260986328 +2024-11-02 12:32:00+10:00,74.26386260986328 +2024-11-02 12:33:00+10:00,75.3682975769043 +2024-11-02 12:33:00+10:00,75.3682975769043 +2024-11-02 12:34:00+10:00,73.0 +2024-11-02 12:34:00+10:00,73.0 +2024-11-02 12:38:00+10:00,76.0 +2024-11-02 12:38:00+10:00,76.0 +2024-11-02 12:42:00+10:00,79.0 +2024-11-02 12:42:00+10:00,79.0 +2024-11-02 12:46:00+10:00,76.0 +2024-11-02 12:46:00+10:00,76.0 +2024-11-02 12:52:00+10:00,75.0 +2024-11-02 12:52:00+10:00,75.0 +2024-11-02 12:57:00+10:00,74.0 +2024-11-02 12:57:00+10:00,74.0 +2024-11-02 13:02:00+10:00,76.0 +2024-11-02 13:02:00+10:00,76.0 +2024-11-02 13:09:00+10:00,76.0 +2024-11-02 13:09:00+10:00,76.0 +2024-11-02 13:10:00+10:00,73.0 +2024-11-02 13:10:00+10:00,73.0 +2024-11-02 13:18:00+10:00,74.0 +2024-11-02 13:18:00+10:00,74.0 +2024-11-02 13:19:00+10:00,73.0 +2024-11-02 13:19:00+10:00,73.0 +2024-11-02 13:24:00+10:00,73.0 +2024-11-02 13:24:00+10:00,73.0 +2024-11-02 13:27:00+10:00,73.0 +2024-11-02 13:27:00+10:00,73.0 +2024-11-02 13:31:00+10:00,70.0 +2024-11-02 13:31:00+10:00,70.0 +2024-11-02 13:36:00+10:00,69.0 +2024-11-02 13:36:00+10:00,69.0 +2024-11-02 13:45:00+10:00,70.0 +2024-11-02 13:45:00+10:00,70.0 +2024-11-02 13:49:00+10:00,67.0 +2024-11-02 13:49:00+10:00,67.0 +2024-11-02 13:51:00+10:00,67.0 +2024-11-02 13:51:00+10:00,67.0 +2024-11-02 14:00:00+10:00,68.0 +2024-11-02 14:00:00+10:00,68.0 +2024-11-02 14:02:00+10:00,64.0 +2024-11-02 14:02:00+10:00,64.0 +2024-11-02 14:10:00+10:00,67.0 +2024-11-02 14:10:00+10:00,67.0 +2024-11-02 14:11:00+10:00,66.0 +2024-11-02 14:11:00+10:00,66.0 +2024-11-02 14:15:00+10:00,66.0 +2024-11-02 14:15:00+10:00,66.0 +2024-11-02 14:20:00+10:00,68.0 +2024-11-02 14:20:00+10:00,68.0 +2024-11-02 14:25:00+10:00,70.82994842529297 +2024-11-02 14:25:00+10:00,70.82994842529297 +2024-11-02 14:29:00+10:00,70.0 +2024-11-02 14:29:00+10:00,70.0 +2024-11-02 14:31:00+10:00,71.0 +2024-11-02 14:31:00+10:00,71.0 +2024-11-02 14:37:00+10:00,85.0 +2024-11-02 14:37:00+10:00,85.0 +2024-11-02 14:45:00+10:00,86.0 +2024-11-02 14:45:00+10:00,86.0 +2024-11-02 14:53:00+10:00,83.0 +2024-11-02 14:53:00+10:00,83.0 +2024-11-02 15:11:00+10:00,71.0 +2024-11-02 15:11:00+10:00,71.0 +2024-11-02 15:30:00+10:00,89.0 +2024-11-02 15:30:00+10:00,89.0 +2024-11-02 15:33:00+10:00,89.0 +2024-11-02 15:33:00+10:00,89.0 +2024-11-02 15:39:00+10:00,85.0 +2024-11-02 15:39:00+10:00,85.0 +2024-11-02 15:41:00+10:00,83.0 +2024-11-02 15:41:00+10:00,83.0 +2024-11-02 15:50:00+10:00,94.87341772378244 +2024-11-02 15:50:00+10:00,94.87341772378245 +2024-11-02 15:54:00+10:00,92.0 +2024-11-02 15:54:00+10:00,92.00000000000001 +2024-11-02 15:56:00+10:00,85.0 +2024-11-02 15:56:00+10:00,85.0 +2024-11-02 16:00:00+10:00,79.0 +2024-11-02 16:00:00+10:00,79.0 +2024-11-02 16:09:00+10:00,76.0 +2024-11-02 16:09:00+10:00,76.0 +2024-11-02 16:14:00+10:00,79.0 +2024-11-02 16:14:00+10:00,79.0 +2024-11-02 16:15:00+10:00,77.00000000000001 +2024-11-02 16:15:00+10:00,77.00000000000001 +2024-11-02 16:21:00+10:00,78.0 +2024-11-02 16:21:00+10:00,78.0 +2024-11-02 16:31:00+10:00,103.8315789461136 +2024-11-02 16:31:00+10:00,103.8315789461136 +2024-11-02 16:33:00+10:00,94.0 +2024-11-02 16:33:00+10:00,94.0 +2024-11-02 16:38:00+10:00,100.0 +2024-11-02 16:38:00+10:00,100.0 +2024-11-02 16:45:00+10:00,87.0 +2024-11-02 16:45:00+10:00,87.0 +2024-11-02 16:54:00+10:00,92.0 +2024-11-02 16:54:00+10:00,92.00000000000001 +2024-11-02 16:58:00+10:00,93.0 +2024-11-02 16:58:00+10:00,93.0 +2024-11-02 17:04:00+10:00,90.0 +2024-11-02 17:04:00+10:00,90.0 +2024-11-02 17:09:00+10:00,87.0 +2024-11-02 17:09:00+10:00,87.0 +2024-11-02 17:13:00+10:00,90.0 +2024-11-02 17:13:00+10:00,90.0 +2024-11-02 17:29:00+10:00,81.0 +2024-11-02 17:29:00+10:00,81.0 +2024-11-02 17:34:00+10:00,90.0 +2024-11-02 17:34:00+10:00,90.0 +2024-11-02 17:38:00+10:00,100.0 +2024-11-02 17:38:00+10:00,100.0 +2024-11-02 17:41:00+10:00,99.0 +2024-11-02 17:41:00+10:00,99.0 +2024-11-02 17:45:00+10:00,102.0 +2024-11-02 17:45:00+10:00,102.0 +2024-11-02 17:53:00+10:00,104.0 +2024-11-02 17:53:00+10:00,104.0 +2024-11-02 17:56:00+10:00,100.0 +2024-11-02 17:56:00+10:00,100.0 +2024-11-02 18:04:00+10:00,97.0 +2024-11-02 18:04:00+10:00,97.0 +2024-11-02 18:08:00+10:00,89.0 +2024-11-02 18:08:00+10:00,89.0 +2024-11-02 18:13:00+10:00,104.0 +2024-11-02 18:13:00+10:00,104.0 +2024-11-02 18:16:00+10:00,101.0 +2024-11-02 18:16:00+10:00,101.0 +2024-11-02 18:20:00+10:00,96.0 +2024-11-02 18:20:00+10:00,96.0 +2024-11-02 18:26:00+10:00,100.0 +2024-11-02 18:26:00+10:00,100.0 +2024-11-02 18:34:00+10:00,104.0 +2024-11-02 18:34:00+10:00,104.0 +2024-11-02 18:36:00+10:00,99.0 +2024-11-02 18:36:00+10:00,99.0 +2024-11-02 18:37:00+10:00,103.58255767822266 +2024-11-02 18:37:00+10:00,103.58255767822266 +2024-11-02 18:44:00+10:00,100.0 +2024-11-02 18:44:00+10:00,100.0 +2024-11-02 18:48:00+10:00,91.0 +2024-11-02 18:48:00+10:00,91.0 +2024-11-02 18:54:00+10:00,99.0 +2024-11-02 18:54:00+10:00,99.0 +2024-11-02 18:55:00+10:00,99.0 +2024-11-02 18:55:00+10:00,99.0 +2024-11-02 19:03:00+10:00,100.0 +2024-11-02 19:03:00+10:00,100.0 +2024-11-02 19:09:00+10:00,97.0 +2024-11-02 19:09:00+10:00,97.0 +2024-11-02 19:13:00+10:00,91.0 +2024-11-02 19:13:00+10:00,91.0 +2024-11-02 19:19:00+10:00,84.0 +2024-11-02 19:19:00+10:00,84.0 +2024-11-02 19:22:00+10:00,85.0 +2024-11-02 19:22:00+10:00,85.0 +2024-11-02 19:27:00+10:00,85.0 +2024-11-02 19:27:00+10:00,85.0 +2024-11-02 19:30:00+10:00,87.0 +2024-11-02 19:30:00+10:00,87.0 +2024-11-02 19:37:00+10:00,99.0 +2024-11-02 19:37:00+10:00,99.0 +2024-11-02 19:44:00+10:00,98.0 +2024-11-02 19:44:00+10:00,98.0 +2024-11-02 19:47:00+10:00,94.0 +2024-11-02 19:47:00+10:00,94.0 +2024-11-02 19:55:00+10:00,87.0 +2024-11-02 19:55:00+10:00,87.0 +2024-11-02 20:02:00+10:00,98.0 +2024-11-02 20:02:00+10:00,98.0 +2024-11-02 20:09:00+10:00,99.0 +2024-11-02 20:09:00+10:00,99.0 +2024-11-02 20:10:00+10:00,92.00000000000001 +2024-11-02 20:10:00+10:00,92.0 +2024-11-02 20:13:00+10:00,93.50930786132812 +2024-11-02 20:13:00+10:00,93.50930786132812 +2024-11-02 20:17:00+10:00,106.0 +2024-11-02 20:17:00+10:00,106.0 +2024-11-02 20:23:00+10:00,84.0 +2024-11-02 20:23:00+10:00,84.0 +2024-11-02 20:26:00+10:00,91.0 +2024-11-02 20:26:00+10:00,91.0 +2024-11-02 20:31:00+10:00,100.0 +2024-11-02 20:31:00+10:00,100.0 +2024-11-02 20:38:00+10:00,77.00000000000001 +2024-11-02 20:38:00+10:00,77.00000000000001 +2024-11-02 20:41:00+10:00,84.0 +2024-11-02 20:41:00+10:00,84.0 +2024-11-02 20:45:00+10:00,78.0 +2024-11-02 20:45:00+10:00,78.0 +2024-11-02 20:51:00+10:00,80.0 +2024-11-02 20:51:00+10:00,80.0 +2024-11-02 20:57:00+10:00,73.0 +2024-11-02 20:57:00+10:00,73.0 +2024-11-02 21:02:00+10:00,72.0 +2024-11-02 21:02:00+10:00,72.0 +2024-11-02 21:08:00+10:00,73.0 +2024-11-02 21:08:00+10:00,73.0 +2024-11-02 21:12:00+10:00,70.0 +2024-11-02 21:12:00+10:00,70.0 +2024-11-02 21:18:00+10:00,79.0 +2024-11-02 21:18:00+10:00,79.0 +2024-11-02 21:20:00+10:00,77.00000000000001 +2024-11-02 21:20:00+10:00,77.00000000000001 +2024-11-02 21:26:00+10:00,79.0 +2024-11-02 21:26:00+10:00,79.0 +2024-11-02 21:27:00+10:00,80.0 +2024-11-02 21:27:00+10:00,80.0 +2024-11-02 21:31:00+10:00,74.0 +2024-11-02 21:31:00+10:00,74.0 +2024-11-02 21:35:00+10:00,77.00000000000001 +2024-11-02 21:35:00+10:00,77.00000000000001 +2024-11-02 21:42:00+10:00,77.59527587890625 +2024-11-02 21:42:00+10:00,77.59527587890625 +2024-11-02 21:45:00+10:00,80.0 +2024-11-02 21:45:00+10:00,80.0 +2024-11-02 21:47:00+10:00,79.0 +2024-11-02 21:47:00+10:00,79.0 +2024-11-02 21:52:00+10:00,79.0 +2024-11-02 21:52:00+10:00,79.0 +2024-11-02 21:56:00+10:00,76.0 +2024-11-02 21:56:00+10:00,76.0 +2024-11-02 21:58:00+10:00,74.0 +2024-11-02 21:58:00+10:00,74.0 +2024-11-02 22:01:00+10:00,73.0 +2024-11-02 22:01:00+10:00,73.0 +2024-11-02 22:09:00+10:00,70.0 +2024-11-02 22:09:00+10:00,70.0 +2024-11-02 22:12:00+10:00,71.0 +2024-11-02 22:12:00+10:00,71.0 +2024-11-02 22:16:00+10:00,72.0 +2024-11-02 22:16:00+10:00,72.0 +2024-11-02 22:22:00+10:00,70.0 +2024-11-02 22:22:00+10:00,70.0 +2024-11-02 22:29:00+10:00,72.0 +2024-11-02 22:29:00+10:00,72.0 +2024-11-02 22:31:00+10:00,72.0 +2024-11-02 22:31:00+10:00,72.0 +2024-11-02 22:35:00+10:00,73.0 +2024-11-02 22:35:00+10:00,73.0 +2024-11-02 22:43:00+10:00,72.0 +2024-11-02 22:43:00+10:00,72.0 +2024-11-02 22:45:00+10:00,71.0 +2024-11-02 22:45:00+10:00,71.0 +2024-11-02 22:52:00+10:00,72.0 +2024-11-02 22:52:00+10:00,72.0 +2024-11-02 22:55:00+10:00,73.0 +2024-11-02 22:55:00+10:00,73.0 +2024-11-02 22:56:00+10:00,73.0 +2024-11-02 22:56:00+10:00,73.0 +2024-11-02 23:01:00+10:00,72.0 +2024-11-02 23:01:00+10:00,72.0 +2024-11-02 23:05:00+10:00,74.0 +2024-11-02 23:05:00+10:00,74.0 +2024-11-02 23:12:00+10:00,73.0 +2024-11-02 23:12:00+10:00,73.0 +2024-11-02 23:15:00+10:00,73.0 +2024-11-02 23:15:00+10:00,73.0 +2024-11-02 23:20:00+10:00,73.0 +2024-11-02 23:20:00+10:00,73.0 +2024-11-02 23:25:00+10:00,73.0 +2024-11-02 23:25:00+10:00,73.0 +2024-11-02 23:31:00+10:00,73.0 +2024-11-02 23:31:00+10:00,73.0 +2024-11-02 23:35:00+10:00,79.0 +2024-11-02 23:35:00+10:00,79.0 +2024-11-02 23:40:00+10:00,75.01344299316406 +2024-11-02 23:40:00+10:00,75.01344299316406 +2024-11-02 23:41:00+10:00,76.0 +2024-11-02 23:41:00+10:00,76.0 +2024-11-02 23:43:00+10:00,72.0 +2024-11-02 23:43:00+10:00,72.0 +2024-11-02 23:45:00+10:00,74.0 +2024-11-02 23:45:00+10:00,74.0 +2024-11-02 23:50:00+10:00,73.0 +2024-11-02 23:50:00+10:00,73.0 +2024-11-02 23:55:00+10:00,73.0 +2024-11-02 23:55:00+10:00,73.0 +2024-11-03 00:03:00+10:00,84.0 +2024-11-03 00:03:00+10:00,84.0 +2024-11-03 00:06:00+10:00,83.0 +2024-11-03 00:06:00+10:00,83.0 +2024-11-03 00:11:00+10:00,83.0 +2024-11-03 00:11:00+10:00,83.0 +2024-11-03 00:13:00+10:00,84.0 +2024-11-03 00:13:00+10:00,84.0 +2024-11-03 00:15:00+10:00,84.0 +2024-11-03 00:15:00+10:00,84.0 +2024-11-03 00:22:00+10:00,80.0 +2024-11-03 00:22:00+10:00,80.0 +2024-11-03 00:26:00+10:00,80.0 +2024-11-03 00:26:00+10:00,80.0 +2024-11-03 00:30:00+10:00,80.0 +2024-11-03 00:30:00+10:00,80.0 +2024-11-03 00:35:00+10:00,72.0 +2024-11-03 00:35:00+10:00,72.0 +2024-11-03 00:40:00+10:00,72.0 +2024-11-03 00:40:00+10:00,72.0 +2024-11-03 00:45:00+10:00,79.0 +2024-11-03 00:45:00+10:00,79.0 +2024-11-03 00:54:00+10:00,80.0 +2024-11-03 00:54:00+10:00,80.0 +2024-11-03 00:55:00+10:00,74.0 +2024-11-03 00:55:00+10:00,74.0 +2024-11-03 01:02:00+10:00,73.0 +2024-11-03 01:02:00+10:00,73.0 +2024-11-03 01:06:00+10:00,73.0 +2024-11-03 01:06:00+10:00,73.0 +2024-11-03 01:10:00+10:00,72.0 +2024-11-03 01:10:00+10:00,72.0 +2024-11-03 01:12:00+10:00,69.0 +2024-11-03 01:12:00+10:00,69.0 +2024-11-03 01:18:00+10:00,72.0 +2024-11-03 01:18:00+10:00,72.0 +2024-11-03 01:20:00+10:00,73.0 +2024-11-03 01:20:00+10:00,73.0 +2024-11-03 01:30:00+10:00,72.0 +2024-11-03 01:30:00+10:00,72.0 +2024-11-03 01:33:00+10:00,72.0 +2024-11-03 01:33:00+10:00,72.0 +2024-11-03 01:35:00+10:00,72.0 +2024-11-03 01:35:00+10:00,72.0 +2024-11-03 01:40:00+10:00,71.49160766601562 +2024-11-03 01:40:00+10:00,71.49160766601562 +2024-11-03 01:41:00+10:00,72.0 +2024-11-03 01:41:00+10:00,72.0 +2024-11-03 01:42:00+10:00,71.0 +2024-11-03 01:42:00+10:00,71.0 +2024-11-03 01:46:00+10:00,72.0 +2024-11-03 01:46:00+10:00,72.0 +2024-11-03 01:50:00+10:00,72.0 +2024-11-03 01:50:00+10:00,72.0 +2024-11-03 01:55:00+10:00,71.0 +2024-11-03 01:55:00+10:00,71.0 +2024-11-03 02:04:00+10:00,67.0 +2024-11-03 02:04:00+10:00,67.0 +2024-11-03 02:08:00+10:00,71.0 +2024-11-03 02:08:00+10:00,71.0 +2024-11-03 02:10:00+10:00,73.0 +2024-11-03 02:10:00+10:00,73.0 +2024-11-03 02:12:00+10:00,77.0 +2024-11-03 02:12:00+10:00,77.0 +2024-11-03 02:17:00+10:00,72.0 +2024-11-03 02:17:00+10:00,72.0 +2024-11-03 02:22:00+10:00,72.0 +2024-11-03 02:22:00+10:00,72.0 +2024-11-03 02:27:00+10:00,72.0 +2024-11-03 02:27:00+10:00,72.0 +2024-11-03 02:31:00+10:00,72.0 +2024-11-03 02:31:00+10:00,72.0 +2024-11-03 02:37:00+10:00,67.0 +2024-11-03 02:37:00+10:00,67.0 +2024-11-03 02:41:00+10:00,58.0 +2024-11-03 02:41:00+10:00,58.0 +2024-11-03 02:42:00+10:00,61.99999999999999 +2024-11-03 02:42:00+10:00,61.99999999999999 +2024-11-03 02:46:00+10:00,60.0 +2024-11-03 02:46:00+10:00,60.0 +2024-11-03 02:52:00+10:00,58.0 +2024-11-03 02:52:00+10:00,58.0 +2024-11-03 02:56:00+10:00,57.0 +2024-11-03 02:56:00+10:00,57.0 +2024-11-03 03:02:00+10:00,59.0 +2024-11-03 03:02:00+10:00,59.0 +2024-11-03 03:06:00+10:00,64.0 +2024-11-03 03:06:00+10:00,64.0 +2024-11-03 03:11:00+10:00,60.0 +2024-11-03 03:11:00+10:00,60.0 +2024-11-03 03:12:00+10:00,60.0 +2024-11-03 03:12:00+10:00,60.0 +2024-11-03 03:17:00+10:00,60.0 +2024-11-03 03:17:00+10:00,60.0 +2024-11-03 03:20:00+10:00,61.0 +2024-11-03 03:20:00+10:00,61.0 +2024-11-03 03:26:00+10:00,63.0 +2024-11-03 03:26:00+10:00,63.0 +2024-11-03 03:33:00+10:00,69.0 +2024-11-03 03:33:00+10:00,69.0 +2024-11-03 03:38:00+10:00,54.0 +2024-11-03 03:38:00+10:00,54.0 +2024-11-03 03:40:00+10:00,68.10528564453125 +2024-11-03 03:40:00+10:00,68.10528564453125 +2024-11-03 03:42:00+10:00,71.0 +2024-11-03 03:42:00+10:00,71.0 +2024-11-03 03:50:00+10:00,63.0 +2024-11-03 03:50:00+10:00,63.0 +2024-11-03 03:51:00+10:00,63.0 +2024-11-03 03:51:00+10:00,63.0 +2024-11-03 03:55:00+10:00,62.00000000000001 +2024-11-03 03:55:00+10:00,62.00000000000001 +2024-11-03 04:01:00+10:00,53.0 +2024-11-03 04:01:00+10:00,53.0 +2024-11-03 04:09:00+10:00,67.0 +2024-11-03 04:09:00+10:00,67.0 +2024-11-03 04:10:00+10:00,70.0 +2024-11-03 04:10:00+10:00,70.0 +2024-11-03 04:12:00+10:00,74.0 +2024-11-03 04:12:00+10:00,74.0 +2024-11-03 04:17:00+10:00,58.0 +2024-11-03 04:17:00+10:00,58.0 +2024-11-03 04:21:00+10:00,52.0 +2024-11-03 04:21:00+10:00,52.0 +2024-11-03 04:26:00+10:00,57.0 +2024-11-03 04:26:00+10:00,57.0 +2024-11-03 04:35:00+10:00,53.0 +2024-11-03 04:35:00+10:00,53.0 +2024-11-03 04:37:00+10:00,53.0 +2024-11-03 04:37:00+10:00,53.0 +2024-11-03 04:42:00+10:00,57.0 +2024-11-03 04:42:00+10:00,57.0 +2024-11-03 04:44:00+10:00,59.0 +2024-11-03 04:44:00+10:00,59.0 +2024-11-03 04:49:00+10:00,55.0 +2024-11-03 04:49:00+10:00,55.0 +2024-11-03 04:54:00+10:00,60.0 +2024-11-03 04:54:00+10:00,60.0 +2024-11-03 04:56:00+10:00,60.0 +2024-11-03 04:56:00+10:00,60.0 +2024-11-03 05:01:00+10:00,60.0 +2024-11-03 05:01:00+10:00,60.0 +2024-11-03 05:05:00+10:00,56.0 +2024-11-03 05:05:00+10:00,56.0 +2024-11-03 05:12:00+10:00,58.0 +2024-11-03 05:12:00+10:00,58.0 +2024-11-03 05:16:00+10:00,58.0 +2024-11-03 05:16:00+10:00,58.0 +2024-11-03 05:21:00+10:00,58.0 +2024-11-03 05:21:00+10:00,58.0 +2024-11-03 05:26:00+10:00,61.0 +2024-11-03 05:26:00+10:00,61.0 +2024-11-03 05:27:00+10:00,61.0 +2024-11-03 05:27:00+10:00,61.0 +2024-11-03 05:31:00+10:00,72.0 +2024-11-03 05:31:00+10:00,72.0 +2024-11-03 05:35:00+10:00,72.0 +2024-11-03 05:35:00+10:00,72.0 +2024-11-03 05:40:00+10:00,73.32099533081055 +2024-11-03 05:40:00+10:00,73.32099533081055 +2024-11-03 05:48:00+10:00,63.0 +2024-11-03 05:48:00+10:00,63.0 +2024-11-03 05:52:00+10:00,69.0 +2024-11-03 05:52:00+10:00,69.0 +2024-11-03 05:55:00+10:00,66.0 +2024-11-03 05:55:00+10:00,66.0 +2024-11-03 05:58:00+10:00,68.0 +2024-11-03 05:58:00+10:00,68.0 +2024-11-03 06:02:00+10:00,63.0 +2024-11-03 06:02:00+10:00,63.0 +2024-11-03 06:06:00+10:00,66.0 +2024-11-03 06:06:00+10:00,66.0 +2024-11-03 06:12:00+10:00,60.0 +2024-11-03 06:12:00+10:00,60.0 +2024-11-03 06:15:00+10:00,58.0 +2024-11-03 06:15:00+10:00,58.0 +2024-11-03 06:23:00+10:00,72.0 +2024-11-03 06:23:00+10:00,72.0 +2024-11-03 06:28:00+10:00,61.0 +2024-11-03 06:28:00+10:00,61.0 +2024-11-03 06:29:00+10:00,63.0 +2024-11-03 06:29:00+10:00,63.0 +2024-11-03 06:34:00+10:00,78.0 +2024-11-03 06:34:00+10:00,78.0 +2024-11-03 06:40:00+10:00,73.0 +2024-11-03 06:40:00+10:00,73.0 +2024-11-03 06:43:00+10:00,65.0 +2024-11-03 06:43:00+10:00,65.0 +2024-11-03 06:47:00+10:00,60.0 +2024-11-03 06:47:00+10:00,60.0 +2024-11-03 06:53:00+10:00,56.0 +2024-11-03 06:53:00+10:00,56.0 +2024-11-03 09:00:00+10:00,76.0 +2024-11-03 09:00:00+10:00,76.0 +2024-11-03 09:02:00+10:00,66.0 +2024-11-03 09:02:00+10:00,66.0 +2024-11-03 09:07:00+10:00,98.0 +2024-11-03 09:07:00+10:00,98.0 +2024-11-03 09:08:00+10:00,95.29661016949154 +2024-11-03 09:08:00+10:00,95.29661016949154 +2024-11-03 09:11:00+10:00,77.00000000000001 +2024-11-03 09:11:00+10:00,77.00000000000001 +2024-11-03 09:13:00+10:00,85.0 +2024-11-03 09:13:00+10:00,85.0 +2024-11-03 09:18:00+10:00,74.0 +2024-11-03 09:18:00+10:00,74.0 +2024-11-03 09:22:00+10:00,59.0 +2024-11-03 09:22:00+10:00,59.0 +2024-11-03 09:27:00+10:00,69.0 +2024-11-03 09:27:00+10:00,69.0 +2024-11-03 09:32:00+10:00,83.0 +2024-11-03 09:32:00+10:00,83.0 +2024-11-03 09:36:00+10:00,63.0 +2024-11-03 09:36:00+10:00,63.0 +2024-11-03 09:42:00+10:00,74.0 +2024-11-03 09:42:00+10:00,74.0 +2024-11-03 09:47:00+10:00,76.0 +2024-11-03 09:47:00+10:00,76.0 +2024-11-03 09:51:00+10:00,69.0 +2024-11-03 09:51:00+10:00,69.0 +2024-11-03 09:53:00+10:00,84.0 +2024-11-03 09:53:00+10:00,84.0 +2024-11-03 09:58:00+10:00,70.79998016357422 +2024-11-03 09:58:00+10:00,70.79998016357422 +2024-11-03 09:59:00+10:00,68.0 +2024-11-03 09:59:00+10:00,68.0 +2024-11-03 10:02:00+10:00,76.0 +2024-11-03 10:02:00+10:00,76.0 +2024-11-03 10:07:00+10:00,76.0 +2024-11-03 10:07:00+10:00,76.0 +2024-11-03 10:14:00+10:00,74.0 +2024-11-03 10:14:00+10:00,74.0 +2024-11-03 10:19:00+10:00,74.0 +2024-11-03 10:19:00+10:00,74.0 +2024-11-03 10:24:00+10:00,87.0 +2024-11-03 10:24:00+10:00,87.0 +2024-11-03 10:27:00+10:00,93.0 +2024-11-03 10:27:00+10:00,93.0 +2024-11-03 10:35:00+10:00,85.0 +2024-11-03 10:35:00+10:00,85.0 +2024-11-03 10:38:00+10:00,92.00000000000001 +2024-11-03 10:38:00+10:00,92.0 +2024-11-03 10:44:00+10:00,98.0 +2024-11-03 10:44:00+10:00,98.0 +2024-11-03 10:46:00+10:00,100.00537872314452 +2024-11-03 10:46:00+10:00,100.00537872314453 +2024-11-03 10:48:00+10:00,100.0 +2024-11-03 10:48:00+10:00,100.0 +2024-11-03 10:52:00+10:00,98.0 +2024-11-03 10:52:00+10:00,98.0 +2024-11-03 10:57:00+10:00,103.0 +2024-11-03 10:57:00+10:00,103.0 +2024-11-03 11:00:00+10:00,104.0 +2024-11-03 11:00:00+10:00,104.0 +2024-11-03 11:02:00+10:00,105.0 +2024-11-03 11:02:00+10:00,105.0 +2024-11-03 11:03:00+10:00,105.0 +2024-11-03 11:03:00+10:00,105.0 +2024-11-03 11:05:00+10:00,109.0 +2024-11-03 11:05:00+10:00,109.0 +2024-11-03 11:11:00+10:00,111.0 +2024-11-03 11:11:00+10:00,111.0 +2024-11-03 11:13:00+10:00,116.0 +2024-11-03 11:13:00+10:00,116.0 +2024-11-03 11:16:00+10:00,116.0 +2024-11-03 11:16:00+10:00,116.0 +2024-11-03 11:18:00+10:00,112.0 +2024-11-03 11:18:00+10:00,112.0 +2024-11-03 11:21:00+10:00,114.0 +2024-11-03 11:21:00+10:00,114.0 +2024-11-03 11:25:00+10:00,109.0 +2024-11-03 11:25:00+10:00,109.0 +2024-11-03 11:27:00+10:00,91.0 +2024-11-03 11:27:00+10:00,91.0 +2024-11-03 11:30:00+10:00,90.0 +2024-11-03 11:30:00+10:00,90.0 +2024-11-03 11:36:00+10:00,85.0 +2024-11-03 11:36:00+10:00,85.0 +2024-11-03 11:43:00+10:00,85.0 +2024-11-03 11:43:00+10:00,85.0 +2024-11-03 11:50:00+10:00,87.0 +2024-11-03 11:50:00+10:00,87.0 +2024-11-03 11:52:00+10:00,91.0 +2024-11-03 11:52:00+10:00,91.0 +2024-11-03 11:56:00+10:00,84.0 +2024-11-03 11:56:00+10:00,84.0 +2024-11-03 12:01:00+10:00,84.0 +2024-11-03 12:01:00+10:00,84.0 +2024-11-03 12:07:00+10:00,84.0 +2024-11-03 12:07:00+10:00,84.0 +2024-11-03 12:12:00+10:00,78.28348541259766 +2024-11-03 12:12:00+10:00,78.28348541259766 +2024-11-03 12:14:00+10:00,73.0 +2024-11-03 12:14:00+10:00,73.0 +2024-11-03 12:18:00+10:00,81.0 +2024-11-03 12:18:00+10:00,81.0 +2024-11-03 12:21:00+10:00,76.0 +2024-11-03 12:21:00+10:00,76.0 +2024-11-03 12:26:00+10:00,78.0 +2024-11-03 12:26:00+10:00,78.0 +2024-11-03 12:31:00+10:00,91.0 +2024-11-03 12:31:00+10:00,91.0 +2024-11-03 12:37:00+10:00,73.0 +2024-11-03 12:37:00+10:00,73.0 +2024-11-03 12:43:00+10:00,72.0 +2024-11-03 12:43:00+10:00,72.0 +2024-11-03 12:46:00+10:00,68.0 +2024-11-03 12:46:00+10:00,68.0 +2024-11-03 12:51:00+10:00,71.0 +2024-11-03 12:51:00+10:00,71.0 +2024-11-03 12:58:00+10:00,68.0 +2024-11-03 12:58:00+10:00,68.0 +2024-11-03 13:02:00+10:00,65.0 +2024-11-03 13:02:00+10:00,65.0 +2024-11-03 13:06:00+10:00,68.0 +2024-11-03 13:06:00+10:00,68.0 +2024-11-03 13:10:00+10:00,69.0 +2024-11-03 13:10:00+10:00,69.0 +2024-11-03 13:18:00+10:00,72.0 +2024-11-03 13:18:00+10:00,72.0 +2024-11-03 13:23:00+10:00,66.0 +2024-11-03 13:23:00+10:00,66.0 +2024-11-03 13:25:00+10:00,68.0 +2024-11-03 13:25:00+10:00,68.0 +2024-11-03 13:30:00+10:00,58.0 +2024-11-03 13:30:00+10:00,58.0 +2024-11-03 13:37:00+10:00,66.0 +2024-11-03 13:37:00+10:00,66.0 +2024-11-03 13:45:00+10:00,88.0 +2024-11-03 13:45:00+10:00,88.0 +2024-11-03 13:47:00+10:00,76.0 +2024-11-03 13:47:00+10:00,76.0 +2024-11-03 13:53:00+10:00,69.0 +2024-11-03 13:53:00+10:00,69.0 +2024-11-03 13:59:00+10:00,64.0 +2024-11-03 13:59:00+10:00,64.0 +2024-11-03 14:01:00+10:00,59.0 +2024-11-03 14:01:00+10:00,59.0 +2024-11-03 14:07:00+10:00,65.0 +2024-11-03 14:07:00+10:00,65.0 +2024-11-03 14:13:00+10:00,66.22722625732422 +2024-11-03 14:13:00+10:00,66.22722625732422 +2024-11-03 14:20:00+10:00,66.0 +2024-11-03 14:20:00+10:00,66.0 +2024-11-03 14:23:00+10:00,66.0 +2024-11-03 14:23:00+10:00,66.0 +2024-11-03 14:26:00+10:00,64.0 +2024-11-03 14:26:00+10:00,64.0 +2024-11-03 14:38:00+10:00,94.0 +2024-11-03 14:38:00+10:00,94.0 +2024-11-03 14:39:00+10:00,90.0 +2024-11-03 14:39:00+10:00,90.0 +2024-11-03 15:05:00+10:00,76.0 +2024-11-03 15:05:00+10:00,76.0 +2024-11-03 15:12:00+10:00,70.0 +2024-11-03 15:12:00+10:00,70.0 +2024-11-03 15:17:00+10:00,70.68333334426084 +2024-11-03 15:17:00+10:00,70.68333334426084 +2024-11-03 15:18:00+10:00,71.0 +2024-11-03 15:18:00+10:00,71.0 +2024-11-03 15:34:00+10:00,98.0 +2024-11-03 15:34:00+10:00,98.0 +2024-11-03 15:35:00+10:00,91.0 +2024-11-03 15:35:00+10:00,91.0 +2024-11-03 15:36:00+10:00,86.93442623439381 +2024-11-03 15:36:00+10:00,86.93442623439381 +2024-11-03 15:59:00+10:00,94.0 +2024-11-03 15:59:00+10:00,94.0 +2024-11-03 16:14:00+10:00,85.0 +2024-11-03 16:14:00+10:00,85.0 +2024-11-03 16:18:00+10:00,75.0 +2024-11-03 16:18:00+10:00,75.0 +2024-11-03 16:22:00+10:00,84.0 +2024-11-03 16:22:00+10:00,84.0 +2024-11-03 16:26:00+10:00,86.0 +2024-11-03 16:26:00+10:00,86.0 +2024-11-03 16:33:00+10:00,79.0 +2024-11-03 16:33:00+10:00,79.0 +2024-11-03 16:36:00+10:00,77.44588470458984 +2024-11-03 16:36:00+10:00,77.44588470458984 +2024-11-03 16:39:00+10:00,80.0 +2024-11-03 16:39:00+10:00,80.0 +2024-11-03 16:45:00+10:00,74.0 +2024-11-03 16:45:00+10:00,74.0 +2024-11-03 16:47:00+10:00,79.0 +2024-11-03 16:47:00+10:00,79.0 +2024-11-03 16:51:00+10:00,74.0 +2024-11-03 16:51:00+10:00,74.0 +2024-11-03 16:55:00+10:00,75.0 +2024-11-03 16:55:00+10:00,75.0 +2024-11-03 17:01:00+10:00,72.0 +2024-11-03 17:01:00+10:00,72.0 +2024-11-03 17:10:00+10:00,93.0 +2024-11-03 17:10:00+10:00,93.0 +2024-11-03 17:13:00+10:00,87.0 +2024-11-03 17:13:00+10:00,87.0 +2024-11-03 17:15:00+10:00,84.0 +2024-11-03 17:15:00+10:00,84.0 +2024-11-03 17:23:00+10:00,81.0 +2024-11-03 17:23:00+10:00,81.0 +2024-11-03 17:26:00+10:00,90.0 +2024-11-03 17:26:00+10:00,90.0 +2024-11-03 17:34:00+10:00,77.00000000000001 +2024-11-03 17:34:00+10:00,77.00000000000001 +2024-11-03 17:37:00+10:00,80.0 +2024-11-03 17:37:00+10:00,80.0 +2024-11-03 17:47:00+10:00,91.24242424242424 +2024-11-03 17:47:00+10:00,91.24242424242424 +2024-11-03 17:51:00+10:00,87.0 +2024-11-03 17:51:00+10:00,87.0 +2024-11-03 17:56:00+10:00,92.31818181818181 +2024-11-03 17:56:00+10:00,92.3181818181818 +2024-11-03 17:57:00+10:00,89.05357142857143 +2024-11-03 17:57:00+10:00,89.05357142857143 +2024-11-03 18:02:00+10:00,100.60000000000001 +2024-11-03 18:02:00+10:00,100.6 +2024-11-03 18:03:00+10:00,89.0 +2024-11-03 18:03:00+10:00,89.0 +2024-11-03 18:05:00+10:00,97.0 +2024-11-03 18:05:00+10:00,97.0 +2024-11-03 18:06:00+10:00,88.29184549179972 +2024-11-03 18:06:00+10:00,88.29184549179972 +2024-11-03 18:08:00+10:00,86.68354430379746 +2024-11-03 18:08:00+10:00,86.68354430379746 +2024-11-03 18:09:00+10:00,86.76237623762376 +2024-11-03 18:09:00+10:00,86.76237623762376 +2024-11-03 18:10:00+10:00,90.93939393939394 +2024-11-03 18:10:00+10:00,90.93939393939394 +2024-11-03 18:12:00+10:00,72.0 +2024-11-03 18:12:00+10:00,72.0 +2024-11-03 18:15:00+10:00,92.00000000000001 +2024-11-03 18:15:00+10:00,92.0 +2024-11-03 18:30:00+10:00,82.0 +2024-11-03 18:30:00+10:00,82.0 +2024-11-03 18:32:00+10:00,87.0 +2024-11-03 18:32:00+10:00,87.0 +2024-11-03 18:36:00+10:00,94.0 +2024-11-03 18:36:00+10:00,94.0 +2024-11-03 18:45:00+10:00,104.0 +2024-11-03 18:45:00+10:00,104.0 +2024-11-03 18:47:00+10:00,97.0 +2024-11-03 18:47:00+10:00,97.0 +2024-11-03 18:54:00+10:00,93.0 +2024-11-03 18:54:00+10:00,93.0 +2024-11-03 18:59:00+10:00,92.00000000000001 +2024-11-03 18:59:00+10:00,92.0 +2024-11-03 19:04:00+10:00,86.0 +2024-11-03 19:04:00+10:00,86.0 +2024-11-03 19:08:00+10:00,85.0 +2024-11-03 19:08:00+10:00,85.0 +2024-11-03 19:15:00+10:00,76.58184051513672 +2024-11-03 19:15:00+10:00,76.58184051513672 +2024-11-03 19:20:00+10:00,76.0 +2024-11-03 19:20:00+10:00,76.0 +2024-11-03 19:25:00+10:00,78.0 +2024-11-03 19:25:00+10:00,78.0 +2024-11-03 19:26:00+10:00,75.0 +2024-11-03 19:26:00+10:00,75.0 +2024-11-03 19:35:00+10:00,73.0 +2024-11-03 19:35:00+10:00,73.0 +2024-11-03 19:37:00+10:00,74.0 +2024-11-03 19:37:00+10:00,74.0 +2024-11-03 19:40:00+10:00,74.4124870300293 +2024-11-03 19:40:00+10:00,74.4124870300293 +2024-11-03 19:42:00+10:00,74.0 +2024-11-03 19:42:00+10:00,74.0 +2024-11-03 19:47:00+10:00,69.0 +2024-11-03 19:47:00+10:00,69.0 +2024-11-03 19:50:00+10:00,72.0 +2024-11-03 19:50:00+10:00,72.0 +2024-11-03 19:56:00+10:00,73.0 +2024-11-03 19:56:00+10:00,73.0 +2024-11-03 20:01:00+10:00,80.0 +2024-11-03 20:01:00+10:00,80.0 +2024-11-03 20:08:00+10:00,95.0 +2024-11-03 20:08:00+10:00,95.0 +2024-11-03 20:13:00+10:00,70.0 +2024-11-03 20:13:00+10:00,70.0 +2024-11-03 20:19:00+10:00,74.0 +2024-11-03 20:19:00+10:00,74.0 +2024-11-03 20:22:00+10:00,73.0 +2024-11-03 20:22:00+10:00,73.0 +2024-11-03 20:30:00+10:00,66.0 +2024-11-03 20:30:00+10:00,66.0 +2024-11-03 20:31:00+10:00,67.0 +2024-11-03 20:31:00+10:00,67.0 +2024-11-03 20:35:00+10:00,67.0 +2024-11-03 20:35:00+10:00,67.0 +2024-11-03 20:41:00+10:00,67.0 +2024-11-03 20:41:00+10:00,67.0 +2024-11-03 20:48:00+10:00,68.0 +2024-11-03 20:48:00+10:00,68.0 +2024-11-03 20:55:00+10:00,71.0 +2024-11-03 20:55:00+10:00,71.0 +2024-11-03 20:56:00+10:00,68.0 +2024-11-03 20:56:00+10:00,68.0 +2024-11-03 21:01:00+10:00,70.0 +2024-11-03 21:01:00+10:00,70.0 +2024-11-03 21:06:00+10:00,72.0 +2024-11-03 21:06:00+10:00,72.0 +2024-11-03 21:14:00+10:00,71.0 +2024-11-03 21:14:00+10:00,71.0 +2024-11-03 21:17:00+10:00,72.0 +2024-11-03 21:17:00+10:00,72.0 +2024-11-03 21:20:00+10:00,72.0 +2024-11-03 21:20:00+10:00,72.0 +2024-11-03 21:30:00+10:00,69.0 +2024-11-03 21:30:00+10:00,69.0 +2024-11-03 21:34:00+10:00,67.0 +2024-11-03 21:34:00+10:00,67.0 +2024-11-03 21:38:00+10:00,67.0 +2024-11-03 21:38:00+10:00,67.0 +2024-11-03 21:40:00+10:00,69.10861206054688 +2024-11-03 21:40:00+10:00,69.10861206054688 +2024-11-03 21:42:00+10:00,67.0 +2024-11-03 21:42:00+10:00,67.0 +2024-11-03 21:46:00+10:00,68.0 +2024-11-03 21:46:00+10:00,68.0 +2024-11-03 21:50:00+10:00,65.0 +2024-11-03 21:50:00+10:00,65.0 +2024-11-03 21:56:00+10:00,64.0 +2024-11-03 21:56:00+10:00,64.0 +2024-11-03 22:03:00+10:00,72.0 +2024-11-03 22:03:00+10:00,72.0 +2024-11-03 22:06:00+10:00,72.0 +2024-11-03 22:06:00+10:00,72.0 +2024-11-03 22:12:00+10:00,68.0 +2024-11-03 22:12:00+10:00,68.0 +2024-11-03 22:17:00+10:00,68.0 +2024-11-03 22:17:00+10:00,68.0 +2024-11-03 22:20:00+10:00,63.0 +2024-11-03 22:20:00+10:00,63.0 +2024-11-03 22:25:00+10:00,67.0 +2024-11-03 22:25:00+10:00,67.0 +2024-11-03 22:30:00+10:00,71.0 +2024-11-03 22:30:00+10:00,71.0 +2024-11-03 22:34:00+10:00,65.0 +2024-11-03 22:34:00+10:00,65.0 +2024-11-03 22:35:00+10:00,69.0 +2024-11-03 22:35:00+10:00,69.0 +2024-11-03 22:41:00+10:00,63.0 +2024-11-03 22:41:00+10:00,63.0 +2024-11-03 22:46:00+10:00,63.0 +2024-11-03 22:46:00+10:00,63.0 +2024-11-03 22:50:00+10:00,60.0 +2024-11-03 22:50:00+10:00,60.0 +2024-11-03 22:55:00+10:00,61.0 +2024-11-03 22:55:00+10:00,61.0 +2024-11-03 22:56:00+10:00,60.0 +2024-11-03 22:56:00+10:00,60.0 +2024-11-03 23:01:00+10:00,63.0 +2024-11-03 23:01:00+10:00,63.0 +2024-11-03 23:05:00+10:00,63.0 +2024-11-03 23:05:00+10:00,63.0 +2024-11-03 23:10:00+10:00,57.0 +2024-11-03 23:10:00+10:00,57.0 +2024-11-03 23:17:00+10:00,58.0 +2024-11-03 23:17:00+10:00,58.0 +2024-11-03 23:20:00+10:00,58.0 +2024-11-03 23:20:00+10:00,58.0 +2024-11-03 23:25:00+10:00,52.0 +2024-11-03 23:25:00+10:00,52.0 +2024-11-03 23:29:00+10:00,55.0 +2024-11-03 23:29:00+10:00,55.0 +2024-11-03 23:31:00+10:00,55.0 +2024-11-03 23:31:00+10:00,55.0 +2024-11-03 23:39:00+10:00,55.0 +2024-11-03 23:39:00+10:00,55.0 +2024-11-03 23:40:00+10:00,54.45999526977539 +2024-11-03 23:40:00+10:00,54.45999526977539 +2024-11-03 23:41:00+10:00,55.0 +2024-11-03 23:41:00+10:00,55.0 +2024-11-03 23:48:00+10:00,57.0 +2024-11-03 23:48:00+10:00,57.0 +2024-11-03 23:50:00+10:00,65.0 +2024-11-03 23:50:00+10:00,65.0 +2024-11-03 23:55:00+10:00,67.0 +2024-11-03 23:55:00+10:00,67.0 +2024-11-03 23:56:00+10:00,67.0 +2024-11-03 23:56:00+10:00,67.0 +2024-11-04 00:02:00+10:00,62.00000000000001 +2024-11-04 00:02:00+10:00,62.00000000000001 +2024-11-04 00:05:00+10:00,60.0 +2024-11-04 00:05:00+10:00,60.0 +2024-11-04 00:14:00+10:00,60.0 +2024-11-04 00:14:00+10:00,60.0 +2024-11-04 00:19:00+10:00,58.0 +2024-11-04 00:19:00+10:00,58.0 +2024-11-04 00:22:00+10:00,63.0 +2024-11-04 00:22:00+10:00,63.0 +2024-11-04 00:25:00+10:00,59.0 +2024-11-04 00:25:00+10:00,59.0 +2024-11-04 00:29:00+10:00,58.0 +2024-11-04 00:29:00+10:00,58.0 +2024-11-04 00:30:00+10:00,58.0 +2024-11-04 00:30:00+10:00,58.0 +2024-11-04 00:38:00+10:00,63.0 +2024-11-04 00:38:00+10:00,63.0 +2024-11-04 00:41:00+10:00,66.0 +2024-11-04 00:41:00+10:00,66.0 +2024-11-04 00:49:00+10:00,63.0 +2024-11-04 00:49:00+10:00,63.0 +2024-11-04 00:50:00+10:00,63.0 +2024-11-04 00:50:00+10:00,63.0 +2024-11-04 00:56:00+10:00,61.99999999999999 +2024-11-04 00:56:00+10:00,61.99999999999999 +2024-11-04 01:03:00+10:00,63.0 +2024-11-04 01:03:00+10:00,63.0 +2024-11-04 01:08:00+10:00,63.0 +2024-11-04 01:08:00+10:00,63.0 +2024-11-04 01:12:00+10:00,60.0 +2024-11-04 01:12:00+10:00,60.0 +2024-11-04 01:15:00+10:00,60.0 +2024-11-04 01:15:00+10:00,60.0 +2024-11-04 01:22:00+10:00,63.0 +2024-11-04 01:22:00+10:00,63.0 +2024-11-04 01:26:00+10:00,59.0 +2024-11-04 01:26:00+10:00,59.0 +2024-11-04 01:29:00+10:00,60.0 +2024-11-04 01:29:00+10:00,60.0 +2024-11-04 01:35:00+10:00,60.0 +2024-11-04 01:35:00+10:00,60.0 +2024-11-04 01:36:00+10:00,59.0 +2024-11-04 01:36:00+10:00,59.0 +2024-11-04 01:40:00+10:00,60.598018646240234 +2024-11-04 01:40:00+10:00,60.598018646240234 +2024-11-04 01:41:00+10:00,60.0 +2024-11-04 01:41:00+10:00,60.0 +2024-11-04 01:48:00+10:00,58.0 +2024-11-04 01:48:00+10:00,58.0 +2024-11-04 01:50:00+10:00,58.0 +2024-11-04 01:50:00+10:00,58.0 +2024-11-04 01:56:00+10:00,58.0 +2024-11-04 01:56:00+10:00,58.0 +2024-11-04 02:05:00+10:00,53.0 +2024-11-04 02:05:00+10:00,53.0 +2024-11-04 02:09:00+10:00,63.0 +2024-11-04 02:09:00+10:00,63.0 +2024-11-04 02:13:00+10:00,58.0 +2024-11-04 02:13:00+10:00,58.0 +2024-11-04 02:17:00+10:00,59.0 +2024-11-04 02:17:00+10:00,59.0 +2024-11-04 02:21:00+10:00,55.0 +2024-11-04 02:21:00+10:00,55.0 +2024-11-04 02:26:00+10:00,63.0 +2024-11-04 02:26:00+10:00,63.0 +2024-11-04 02:31:00+10:00,59.0 +2024-11-04 02:31:00+10:00,59.0 +2024-11-04 02:35:00+10:00,57.0 +2024-11-04 02:35:00+10:00,57.0 +2024-11-04 02:42:00+10:00,56.0 +2024-11-04 02:42:00+10:00,56.0 +2024-11-04 02:49:00+10:00,54.0 +2024-11-04 02:49:00+10:00,54.0 +2024-11-04 02:53:00+10:00,54.0 +2024-11-04 02:53:00+10:00,54.0 +2024-11-04 02:56:00+10:00,54.0 +2024-11-04 02:56:00+10:00,54.0 +2024-11-04 02:57:00+10:00,54.0 +2024-11-04 02:57:00+10:00,54.0 +2024-11-04 03:03:00+10:00,52.0 +2024-11-04 03:03:00+10:00,52.0 +2024-11-04 03:05:00+10:00,53.0 +2024-11-04 03:05:00+10:00,53.0 +2024-11-04 03:13:00+10:00,52.0 +2024-11-04 03:13:00+10:00,52.0 +2024-11-04 03:16:00+10:00,51.0 +2024-11-04 03:16:00+10:00,51.0 +2024-11-04 03:23:00+10:00,52.0 +2024-11-04 03:23:00+10:00,52.0 +2024-11-04 03:26:00+10:00,56.0 +2024-11-04 03:26:00+10:00,56.0 +2024-11-04 03:28:00+10:00,53.0 +2024-11-04 03:28:00+10:00,53.0 +2024-11-04 03:32:00+10:00,44.0 +2024-11-04 03:32:00+10:00,44.0 +2024-11-04 03:38:00+10:00,49.0 +2024-11-04 03:38:00+10:00,49.0 +2024-11-04 03:40:00+10:00,55.58098220825195 +2024-11-04 03:40:00+10:00,55.58098220825195 +2024-11-04 03:44:00+10:00,58.0 +2024-11-04 03:44:00+10:00,58.0 +2024-11-04 03:48:00+10:00,48.0 +2024-11-04 03:48:00+10:00,48.0 +2024-11-04 03:51:00+10:00,50.0 +2024-11-04 03:51:00+10:00,50.0 +2024-11-04 03:56:00+10:00,54.0 +2024-11-04 03:56:00+10:00,54.0 +2024-11-04 04:00:00+10:00,50.0 +2024-11-04 04:00:00+10:00,50.0 +2024-11-04 04:02:00+10:00,48.0 +2024-11-04 04:02:00+10:00,48.0 +2024-11-04 04:08:00+10:00,49.0 +2024-11-04 04:08:00+10:00,49.0 +2024-11-04 04:11:00+10:00,49.0 +2024-11-04 04:11:00+10:00,49.0 +2024-11-04 04:18:00+10:00,49.0 +2024-11-04 04:18:00+10:00,49.0 +2024-11-04 04:24:00+10:00,55.0 +2024-11-04 04:24:00+10:00,55.0 +2024-11-04 04:26:00+10:00,62.00000000000001 +2024-11-04 04:26:00+10:00,62.00000000000001 +2024-11-04 04:30:00+10:00,59.0 +2024-11-04 04:30:00+10:00,59.0 +2024-11-04 04:36:00+10:00,52.0 +2024-11-04 04:36:00+10:00,52.0 +2024-11-04 04:41:00+10:00,54.0 +2024-11-04 04:41:00+10:00,54.0 +2024-11-04 04:47:00+10:00,55.0 +2024-11-04 04:47:00+10:00,55.0 +2024-11-04 04:50:00+10:00,54.0 +2024-11-04 04:50:00+10:00,54.0 +2024-11-04 04:58:00+10:00,49.0 +2024-11-04 04:58:00+10:00,49.0 +2024-11-04 05:04:00+10:00,55.0 +2024-11-04 05:04:00+10:00,55.0 +2024-11-04 05:06:00+10:00,53.0 +2024-11-04 05:06:00+10:00,53.0 +2024-11-04 05:11:00+10:00,46.00000000000001 +2024-11-04 05:11:00+10:00,46.00000000000001 +2024-11-04 05:19:00+10:00,71.0 +2024-11-04 05:19:00+10:00,71.0 +2024-11-04 05:24:00+10:00,67.0 +2024-11-04 05:24:00+10:00,67.0 +2024-11-04 05:28:00+10:00,57.0 +2024-11-04 05:28:00+10:00,57.0 +2024-11-04 05:29:00+10:00,64.0 +2024-11-04 05:29:00+10:00,64.0 +2024-11-04 05:33:00+10:00,55.0 +2024-11-04 05:33:00+10:00,55.0 +2024-11-04 05:39:00+10:00,59.0 +2024-11-04 05:39:00+10:00,59.0 +2024-11-04 05:41:00+10:00,59.12444686889648 +2024-11-04 05:41:00+10:00,59.124446868896484 +2024-11-04 05:44:00+10:00,60.0 +2024-11-04 05:44:00+10:00,60.0 +2024-11-04 05:46:00+10:00,68.0 +2024-11-04 05:46:00+10:00,68.0 +2024-11-04 05:50:00+10:00,67.0 +2024-11-04 05:50:00+10:00,67.0 +2024-11-04 05:55:00+10:00,63.0 +2024-11-04 05:55:00+10:00,63.0 +2024-11-04 06:04:00+10:00,63.0 +2024-11-04 06:04:00+10:00,63.0 +2024-11-04 06:05:00+10:00,64.0 +2024-11-04 06:05:00+10:00,64.0 +2024-11-04 06:08:00+10:00,69.0 +2024-11-04 06:08:00+10:00,69.0 +2024-11-04 06:14:00+10:00,71.0 +2024-11-04 06:14:00+10:00,71.0 +2024-11-04 06:15:00+10:00,63.0 +2024-11-04 06:15:00+10:00,63.0 +2024-11-04 06:58:00+10:00,66.0 +2024-11-04 06:58:00+10:00,66.0 +2024-11-04 07:04:00+10:00,59.0 +2024-11-04 07:04:00+10:00,59.0 +2024-11-04 07:07:00+10:00,65.0 +2024-11-04 07:07:00+10:00,65.0 +2024-11-04 07:13:00+10:00,66.0 +2024-11-04 07:13:00+10:00,66.0 +2024-11-04 07:17:00+10:00,60.0 +2024-11-04 07:17:00+10:00,60.0 +2024-11-04 07:23:00+10:00,71.0 +2024-11-04 07:23:00+10:00,71.0 +2024-11-04 07:27:00+10:00,73.0 +2024-11-04 07:27:00+10:00,73.0 +2024-11-04 07:34:00+10:00,69.0 +2024-11-04 07:34:00+10:00,69.0 +2024-11-04 07:40:00+10:00,72.0 +2024-11-04 07:40:00+10:00,72.0 +2024-11-04 07:42:00+10:00,68.0 +2024-11-04 07:42:00+10:00,68.0 +2024-11-04 07:47:00+10:00,70.0 +2024-11-04 07:47:00+10:00,70.0 +2024-11-04 07:52:00+10:00,70.0 +2024-11-04 07:52:00+10:00,70.0 +2024-11-04 07:57:00+10:00,67.0 +2024-11-04 07:57:00+10:00,67.0 +2024-11-04 08:01:00+10:00,69.0 +2024-11-04 08:01:00+10:00,69.0 +2024-11-04 08:09:00+10:00,87.0 +2024-11-04 08:09:00+10:00,87.0 +2024-11-04 08:11:00+10:00,93.0 +2024-11-04 08:11:00+10:00,93.0 +2024-11-04 08:13:00+10:00,84.36567164179105 +2024-11-04 08:13:00+10:00,84.36567164179105 +2024-11-04 08:14:00+10:00,86.0 +2024-11-04 08:14:00+10:00,86.0 +2024-11-04 08:15:00+10:00,84.0 +2024-11-04 08:15:00+10:00,84.0 +2024-11-04 08:16:00+10:00,92.0 +2024-11-04 08:16:00+10:00,92.00000000000001 +2024-11-04 08:17:00+10:00,91.94 +2024-11-04 08:17:00+10:00,91.94 +2024-11-04 08:20:00+10:00,82.0 +2024-11-04 08:20:00+10:00,82.0 +2024-11-04 08:22:00+10:00,80.0 +2024-11-04 08:22:00+10:00,80.0 +2024-11-04 08:25:00+10:00,97.17857142857144 +2024-11-04 08:25:00+10:00,97.17857142857143 +2024-11-04 08:26:00+10:00,93.4438775510204 +2024-11-04 08:26:00+10:00,93.4438775510204 +2024-11-04 08:27:00+10:00,93.10843373493977 +2024-11-04 08:27:00+10:00,93.10843373493977 +2024-11-04 08:28:00+10:00,92.99009900990099 +2024-11-04 08:28:00+10:00,92.990099009901 +2024-11-04 08:29:00+10:00,91.76923076923076 +2024-11-04 08:29:00+10:00,91.76923076923076 +2024-11-04 08:30:00+10:00,93.78125 +2024-11-04 08:30:00+10:00,93.78125 +2024-11-04 08:31:00+10:00,94.0103092783505 +2024-11-04 08:31:00+10:00,94.0103092783505 +2024-11-04 08:32:00+10:00,96.81976744186046 +2024-11-04 08:32:00+10:00,96.81976744186046 +2024-11-04 08:33:00+10:00,94.0 +2024-11-04 08:33:00+10:00,94.0 +2024-11-04 08:37:00+10:00,91.0 +2024-11-04 08:37:00+10:00,91.0 +2024-11-04 08:41:00+10:00,84.0 +2024-11-04 08:41:00+10:00,84.0 +2024-11-04 08:43:00+10:00,71.30162811279297 +2024-11-04 08:43:00+10:00,71.30162811279297 +2024-11-04 08:49:00+10:00,68.0 +2024-11-04 08:49:00+10:00,68.0 +2024-11-04 08:55:00+10:00,63.0 +2024-11-04 08:55:00+10:00,63.0 +2024-11-04 09:00:00+10:00,64.0 +2024-11-04 09:00:00+10:00,64.0 +2024-11-04 09:03:00+10:00,68.0 +2024-11-04 09:03:00+10:00,68.0 +2024-11-04 09:08:00+10:00,65.0 +2024-11-04 09:08:00+10:00,65.0 +2024-11-04 09:12:00+10:00,67.0 +2024-11-04 09:12:00+10:00,67.0 +2024-11-04 09:15:00+10:00,68.0 +2024-11-04 09:15:00+10:00,68.0 +2024-11-04 09:25:00+10:00,65.0 +2024-11-04 09:25:00+10:00,65.0 +2024-11-04 09:30:00+10:00,60.0 +2024-11-04 09:30:00+10:00,60.0 +2024-11-04 09:35:00+10:00,63.0 +2024-11-04 09:35:00+10:00,63.0 +2024-11-04 09:39:00+10:00,71.0 +2024-11-04 09:39:00+10:00,71.0 +2024-11-04 09:45:00+10:00,67.0 +2024-11-04 09:45:00+10:00,67.0 +2024-11-04 09:50:00+10:00,76.0 +2024-11-04 09:50:00+10:00,76.0 +2024-11-04 09:53:00+10:00,75.0 +2024-11-04 09:53:00+10:00,75.0 +2024-11-04 09:56:00+10:00,72.0 +2024-11-04 09:56:00+10:00,72.0 +2024-11-04 10:01:00+10:00,81.0 +2024-11-04 10:01:00+10:00,81.0 +2024-11-04 10:09:00+10:00,65.0 +2024-11-04 10:09:00+10:00,65.0 +2024-11-04 10:14:00+10:00,65.0 +2024-11-04 10:14:00+10:00,65.0 +2024-11-04 10:16:00+10:00,66.0 +2024-11-04 10:16:00+10:00,66.0 +2024-11-04 10:21:00+10:00,63.729063034057624 +2024-11-04 10:21:00+10:00,63.729063034057624 +2024-11-04 10:29:00+10:00,63.0 +2024-11-04 10:29:00+10:00,63.0 +2024-11-04 10:32:00+10:00,57.0 +2024-11-04 10:32:00+10:00,57.0 +2024-11-04 10:38:00+10:00,59.0 +2024-11-04 10:38:00+10:00,59.0 +2024-11-04 10:43:00+10:00,60.0 +2024-11-04 10:43:00+10:00,60.0 +2024-11-04 10:45:00+10:00,59.0 +2024-11-04 10:45:00+10:00,59.0 +2024-11-04 10:48:00+10:00,58.866666666666674 +2024-11-04 10:48:00+10:00,58.866666666666674 +2024-11-04 10:49:00+10:00,59.98255814646566 +2024-11-04 10:49:00+10:00,59.98255814646566 +2024-11-04 10:50:00+10:00,57.02325581395348 +2024-11-04 10:50:00+10:00,57.02325581395348 +2024-11-04 10:51:00+10:00,57.96152938241914 +2024-11-04 10:51:00+10:00,57.96152938241914 +2024-11-04 10:52:00+10:00,58.31240581269315 +2024-11-04 10:52:00+10:00,58.31240581269315 +2024-11-04 10:53:00+10:00,56.610465116279066 +2024-11-04 10:53:00+10:00,56.610465116279066 +2024-11-04 10:54:00+10:00,64.13750000542029 +2024-11-04 10:54:00+10:00,64.13750000542029 +2024-11-04 10:55:00+10:00,56.0 +2024-11-04 10:55:00+10:00,56.0 +2024-11-04 10:56:00+10:00,76.0 +2024-11-04 10:56:00+10:00,76.0 +2024-11-04 10:57:00+10:00,72.39641943734016 +2024-11-04 10:57:00+10:00,72.39641943734016 +2024-11-04 10:58:00+10:00,66.77860696517412 +2024-11-04 10:58:00+10:00,66.77860696517412 +2024-11-04 10:59:00+10:00,61.88481675267844 +2024-11-04 10:59:00+10:00,61.88481675267844 +2024-11-04 11:00:00+10:00,55.31428571428572 +2024-11-04 11:00:00+10:00,55.31428571428572 +2024-11-04 11:01:00+10:00,62.665990011691875 +2024-11-04 11:01:00+10:00,62.665990011691875 +2024-11-04 11:02:00+10:00,64.58425584255842 +2024-11-04 11:02:00+10:00,64.58425584255842 +2024-11-04 11:03:00+10:00,65.50000000000001 +2024-11-04 11:03:00+10:00,65.50000000000001 +2024-11-04 11:13:00+10:00,52.0 +2024-11-04 11:13:00+10:00,52.0 +2024-11-04 11:17:00+10:00,56.0 +2024-11-04 11:17:00+10:00,56.0 +2024-11-04 11:24:00+10:00,52.0 +2024-11-04 11:24:00+10:00,52.0 +2024-11-04 11:25:00+10:00,52.0 +2024-11-04 11:25:00+10:00,52.0 +2024-11-04 11:29:59+10:00,57.0 +2024-11-04 11:30:00+10:00,57.0 +2024-11-04 11:30:00+10:00,57.0 +2024-11-04 11:35:00+10:00,55.0 +2024-11-04 11:35:00+10:00,55.0 +2024-11-04 11:49:00+10:00,67.0 +2024-11-04 11:49:00+10:00,67.0 +2024-11-04 11:55:00+10:00,78.0 +2024-11-04 11:55:00+10:00,78.0 +2024-11-04 11:59:00+10:00,74.0 +2024-11-04 11:59:00+10:00,74.0 +2024-11-04 12:04:32+10:00,76.0 +2024-11-04 12:05:00+10:00,79.0 +2024-11-04 12:05:00+10:00,79.0 +2024-11-04 12:05:32+10:00,82.0 +2024-11-04 12:06:32+10:00,78.0 +2024-11-04 12:07:00+10:00,78.0 +2024-11-04 12:07:00+10:00,78.0 +2024-11-04 12:10:00+10:00,81.0 +2024-11-04 12:10:00+10:00,81.0 +2024-11-04 12:14:00+10:00,80.0 +2024-11-04 12:14:00+10:00,80.0 +2024-11-04 12:18:00+10:00,91.0 +2024-11-04 12:18:00+10:00,91.0 +2024-11-04 12:22:00+10:00,88.0 +2024-11-04 12:22:00+10:00,88.0 +2024-11-04 12:26:55+10:00,66.0 +2024-11-04 12:27:00+10:00,66.0 +2024-11-04 12:27:00+10:00,66.0 +2024-11-04 12:28:55+10:00,98.5 +2024-11-04 12:29:00+10:00,98.5 +2024-11-04 12:29:00+10:00,98.5 +2024-11-04 12:29:55+10:00,100.98412698412696 +2024-11-04 12:30:00+10:00,100.98412698412696 +2024-11-04 12:30:00+10:00,100.98412698412697 +2024-11-04 12:30:55+10:00,107.5 +2024-11-04 12:31:00+10:00,105.9818181818182 +2024-11-04 12:31:00+10:00,105.9818181818182 +2024-11-04 12:31:55+10:00,104.0 +2024-11-04 12:33:55+10:00,93.0 +2024-11-04 12:34:00+10:00,93.0 +2024-11-04 12:34:00+10:00,93.0 +2024-11-04 12:35:00+10:00,90.0 +2024-11-04 12:35:00+10:00,90.0 +2024-11-04 12:41:00+10:00,93.0 +2024-11-04 12:41:00+10:00,93.0 +2024-11-04 12:48:31+10:00,90.0 +2024-11-04 12:49:00+10:00,90.0 +2024-11-04 12:49:00+10:00,90.0 +2024-11-04 12:52:00+10:00,73.0 +2024-11-04 12:52:00+10:00,73.0 diff --git a/packages/syft-extras/.archive/examples/fedhr/page.html b/packages/syft-extras/.archive/examples/fedhr/page.html new file mode 100644 index 00000000000..620520ecc2a --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/page.html @@ -0,0 +1,852 @@ + + + + + + Personal Heart Rate Monitor + + + + + + + + +
    +
    +
    +
    +
    +
    ❤️
    +
    +
    +

    Madhava's Live Heart Rate

    +
    +
    +
    +
    Current Time:
    +
    +
    +
    +
    Last Reading:
    +
    +
    +
    +
    Next Update:
    +
    60s
    + +
    +
    +
    + +
    +
    +
    📈
    +
    +
    Current Rate
    +
    -- BPM
    +
    +
    +
    +
    📊
    +
    +
    Average Rate
    +
    -- BPM
    +
    +
    +
    +
    ⬇️
    +
    +
    Minimum Rate
    +
    -- BPM
    +
    +
    +
    +
    ⬆️
    +
    +
    Maximum Rate
    +
    -- BPM
    +
    +
    +
    +
    +
    Last 3 Hours
    +
    + +
    +
    +
    +
    +
    Last 48 Hours
    +
    + +
    +
    +
    + + + + diff --git a/packages/syft-extras/.archive/examples/fedhr/request_log.txt b/packages/syft-extras/.archive/examples/fedhr/request_log.txt new file mode 100644 index 00000000000..4c6aafaa569 --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/request_log.txt @@ -0,0 +1,14084 @@ +Timestamp: 2024-11-04 12:58:31.317294 +Request Type: POST +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + content-length: 389845 + accept: */* + automation-name: SyftBox HR + automation-aggregation: Minutes + automation-id: DED61538-82A2-40CD-99BA-442DFF051D14 + accept-language: en-AU,en;q=0.9 + accept-encoding: gzip, deflate, br + upload-draft-interop-version: 5 + session-id: 20E2BA35-4498-4E3C-87FF-487799969D8E + user-agent: Auto%20Export/20241009 CFNetwork/1568.100.1.2.1 Darwin/24.0.0 + upload-complete: ?1 + automation-period: Since Last Sync + content-type: application/json +Body: +{ + "data" : { + "metrics" : [ + { + "data" : [ + { + "date" : "2024-10-29 00:03:00 +1000", + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63 + }, + { + "date" : "2024-10-29 00:09:00 +1000", + "Avg" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "Max" : 60 + }, + { + "Min" : 64, + "Avg" : 64, + "date" : "2024-10-29 00:11:00 +1000", + "Max" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 55.58563232421875, + "Max" : 55.58563232421875, + "source" : "Madhava’s Apple Watch", + "Min" : 55.58563232421875, + "date" : "2024-10-29 00:17:00 +1000" + }, + { + "date" : "2024-10-29 00:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Avg" : 54, + "Max" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Min" : 54, + "Avg" : 54, + "date" : "2024-10-29 00:21:00 +1000" + }, + { + "Avg" : 53, + "Max" : 53, + "Min" : 53, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 00:27:00 +1000" + }, + { + "Avg" : 53, + "date" : "2024-10-29 00:33:00 +1000", + "Min" : 53, + "source" : "Madhava’s Apple Watch", + "Max" : 53 + }, + { + "Max" : 53, + "Min" : 53, + "date" : "2024-10-29 00:35:00 +1000", + "Avg" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 52, + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "date" : "2024-10-29 00:38:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 53, + "Avg" : 53, + "Min" : 53, + "date" : "2024-10-29 00:44:00 +1000" + }, + { + "Avg" : 52, + "Max" : 52, + "Min" : 52, + "date" : "2024-10-29 00:45:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 00:51:00 +1000", + "Max" : 49, + "Min" : 49, + "source" : "Madhava’s Apple Watch", + "Avg" : 49 + }, + { + "Avg" : 49, + "Min" : 49, + "source" : "Madhava’s Apple Watch", + "Max" : 49, + "date" : "2024-10-29 00:57:00 +1000" + }, + { + "date" : "2024-10-29 01:02:00 +1000", + "Avg" : 53, + "Max" : 53, + "Min" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 01:03:00 +1000", + "Max" : 49, + "Min" : 49, + "source" : "Madhava’s Apple Watch", + "Avg" : 49 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "date" : "2024-10-29 01:08:00 +1000" + }, + { + "Max" : 49, + "Min" : 49, + "Avg" : 49, + "date" : "2024-10-29 01:12:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 48, + "Max" : 48, + "date" : "2024-10-29 01:19:00 +1000", + "Min" : 48, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 50, + "Avg" : 50, + "Max" : 50, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 01:23:00 +1000" + }, + { + "date" : "2024-10-29 01:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 50, + "Max" : 50, + "Avg" : 50 + }, + { + "Avg" : 50, + "date" : "2024-10-29 01:32:00 +1000", + "Max" : 50, + "source" : "Madhava’s Apple Watch", + "Min" : 50 + }, + { + "date" : "2024-10-29 01:33:00 +1000", + "Avg" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52 + }, + { + "Max" : 51.888980865478516, + "date" : "2024-10-29 01:35:00 +1000", + "Min" : 51.888980865478516, + "Avg" : 51.888980865478516, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "date" : "2024-10-29 01:38:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 01:42:00 +1000", + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 01:50:00 +1000", + "Max" : 53, + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Avg" : 53 + }, + { + "date" : "2024-10-29 01:54:00 +1000", + "Avg" : 54, + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "Min" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "Max" : 55, + "Min" : 55, + "date" : "2024-10-29 01:56:00 +1000" + }, + { + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 02:03:00 +1000", + "Min" : 63 + }, + { + "date" : "2024-10-29 02:04:00 +1000", + "Max" : 64, + "source" : "Madhava’s Apple Watch", + "Avg" : 64, + "Min" : 64 + }, + { + "Avg" : 63, + "Max" : 63, + "Min" : 63, + "date" : "2024-10-29 02:08:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63, + "Avg" : 63, + "date" : "2024-10-29 02:10:00 +1000" + }, + { + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 02:16:00 +1000" + }, + { + "Avg" : 62.000000000000007, + "Min" : 62.000000000000007, + "Max" : 62.000000000000007, + "date" : "2024-10-29 02:20:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 02:25:00 +1000", + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "Max" : 57 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 56, + "Max" : 56, + "Avg" : 56, + "date" : "2024-10-29 02:30:00 +1000" + }, + { + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "Min" : 57, + "date" : "2024-10-29 02:33:00 +1000", + "Avg" : 57 + }, + { + "Min" : 55, + "date" : "2024-10-29 02:36:00 +1000", + "Avg" : 55, + "Max" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 02:42:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Max" : 53, + "Min" : 53 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 50, + "Max" : 50, + "Avg" : 50, + "date" : "2024-10-29 02:47:00 +1000" + }, + { + "date" : "2024-10-29 02:55:00 +1000", + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52 + }, + { + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-29 02:58:00 +1000" + }, + { + "Max" : 53, + "date" : "2024-10-29 03:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Min" : 53 + }, + { + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "date" : "2024-10-29 03:03:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-29 03:05:00 +1000", + "Max" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 03:15:00 +1000", + "Max" : 53, + "Avg" : 53, + "Min" : 53 + }, + { + "Min" : 53, + "Avg" : 53, + "date" : "2024-10-29 03:18:00 +1000", + "Max" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 53, + "date" : "2024-10-29 03:21:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Max" : 53 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Min" : 55, + "date" : "2024-10-29 03:27:00 +1000", + "Avg" : 55 + }, + { + "date" : "2024-10-29 03:31:00 +1000", + "Max" : 55, + "Min" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 03:33:00 +1000", + "Max" : 51, + "Avg" : 51, + "Min" : 51 + }, + { + "Max" : 54.013828277587891, + "Min" : 54.013828277587891, + "date" : "2024-10-29 03:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 54.013828277587891 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Max" : 53, + "date" : "2024-10-29 03:39:00 +1000", + "Avg" : 53 + }, + { + "Min" : 61, + "date" : "2024-10-29 03:42:00 +1000", + "Max" : 61, + "source" : "Madhava’s Apple Watch", + "Avg" : 61 + }, + { + "Avg" : 63, + "Max" : 63, + "date" : "2024-10-29 03:48:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 63, + "date" : "2024-10-29 03:52:00 +1000" + }, + { + "Min" : 63, + "date" : "2024-10-29 03:59:00 +1000", + "Max" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 60, + "date" : "2024-10-29 04:03:00 +1000", + "Min" : 60, + "Max" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 04:04:00 +1000", + "Min" : 58 + }, + { + "date" : "2024-10-29 04:06:00 +1000", + "Max" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "Min" : 66 + }, + { + "Max" : 60, + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "date" : "2024-10-29 04:11:00 +1000", + "Avg" : 60 + }, + { + "date" : "2024-10-29 04:16:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Min" : 58, + "Max" : 58 + }, + { + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 04:20:00 +1000", + "Avg" : 52, + "Min" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 04:30:00 +1000", + "Min" : 52, + "Avg" : 52, + "Max" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-29 04:31:00 +1000" + }, + { + "Avg" : 53, + "source" : "Madhava’s Apple Watch", + "Max" : 53, + "Min" : 53, + "date" : "2024-10-29 04:39:00 +1000" + }, + { + "Max" : 53, + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Min" : 53, + "date" : "2024-10-29 04:42:00 +1000" + }, + { + "Min" : 52, + "Max" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 04:46:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "date" : "2024-10-29 04:54:00 +1000", + "Min" : 52, + "Max" : 52 + }, + { + "Max" : 51, + "Avg" : 51, + "Min" : 51, + "date" : "2024-10-29 04:56:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 49, + "date" : "2024-10-29 05:00:00 +1000", + "Min" : 49, + "Max" : 49, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 05:06:00 +1000", + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 05:08:00 +1000", + "Max" : 53, + "Min" : 53, + "Avg" : 53 + }, + { + "Min" : 69, + "Avg" : 69, + "date" : "2024-10-29 05:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 69 + }, + { + "date" : "2024-10-29 05:19:00 +1000", + "Max" : 65, + "Avg" : 65, + "Min" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Min" : 60, + "date" : "2024-10-29 05:20:00 +1000", + "Max" : 60 + }, + { + "Avg" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 05:27:00 +1000", + "Max" : 58 + }, + { + "Avg" : 61, + "Max" : 61, + "Min" : 61, + "date" : "2024-10-29 05:33:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 57.186943054199226, + "source" : "Madhava’s Apple Watch", + "Max" : 57.186943054199226, + "date" : "2024-10-29 05:35:00 +1000", + "Min" : 57.186943054199226 + }, + { + "date" : "2024-10-29 05:36:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Avg" : 54, + "Min" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 05:37:00 +1000", + "Max" : 56, + "Min" : 56, + "Avg" : 56 + }, + { + "Avg" : 55, + "Max" : 55, + "Min" : 55, + "date" : "2024-10-29 05:41:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 55, + "date" : "2024-10-29 05:48:00 +1000", + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "date" : "2024-10-29 05:53:00 +1000", + "Max" : 53, + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Avg" : 53 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 05:58:00 +1000", + "Min" : 52, + "Avg" : 52, + "Max" : 52 + }, + { + "date" : "2024-10-29 06:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Avg" : 53, + "Max" : 53 + }, + { + "Avg" : 72, + "date" : "2024-10-29 06:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Max" : 72 + }, + { + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 06:09:00 +1000", + "Min" : 52, + "Max" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Min" : 52, + "date" : "2024-10-29 06:14:00 +1000", + "Avg" : 52 + }, + { + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-29 06:15:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 06:24:00 +1000", + "Avg" : 52, + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52 + }, + { + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "date" : "2024-10-29 06:25:00 +1000", + "Avg" : 52 + }, + { + "date" : "2024-10-29 06:30:00 +1000", + "Avg" : 52, + "Min" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 52, + "date" : "2024-10-29 06:36:00 +1000", + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "Avg" : 52 + }, + { + "Max" : 53, + "Avg" : 53, + "Min" : 53, + "date" : "2024-10-29 06:37:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Avg" : 53, + "date" : "2024-10-29 06:40:00 +1000", + "Max" : 53 + }, + { + "Min" : 56, + "date" : "2024-10-29 07:29:00 +1000", + "Max" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 07:31:00 +1000", + "Avg" : 57, + "Max" : 57 + }, + { + "date" : "2024-10-29 07:38:00 +1000", + "Max" : 59, + "Min" : 58, + "Avg" : 58.5, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 07:44:00 +1000", + "Max" : 56, + "Avg" : 56 + }, + { + "Min" : 57, + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "date" : "2024-10-29 07:48:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 89, + "date" : "2024-10-29 07:51:00 +1000", + "Max" : 89, + "Min" : 89 + }, + { + "Avg" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Max" : 92.000000000000014, + "date" : "2024-10-29 08:05:00 +1000", + "Min" : 92.000000000000014 + }, + { + "Min" : 94, + "Avg" : 94, + "Max" : 94, + "date" : "2024-10-29 08:12:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 86, + "date" : "2024-10-29 08:20:00 +1000", + "Min" : 86, + "source" : "Madhava’s Apple Watch", + "Max" : 86 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 84, + "date" : "2024-10-29 08:27:00 +1000", + "Min" : 84, + "Max" : 84 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Min" : 65, + "date" : "2024-10-29 08:35:00 +1000", + "Avg" : 65 + }, + { + "Avg" : 76, + "Max" : 76, + "Min" : 76, + "date" : "2024-10-29 08:37:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 08:43:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 70, + "Min" : 70, + "Max" : 70 + }, + { + "Avg" : 74, + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "date" : "2024-10-29 08:54:00 +1000", + "Max" : 74 + }, + { + "Min" : 65, + "date" : "2024-10-29 08:57:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Avg" : 65 + }, + { + "Avg" : 72, + "date" : "2024-10-29 09:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Max" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-10-29 09:07:00 +1000", + "Min" : 63, + "Avg" : 63 + }, + { + "Avg" : 74, + "Max" : 74, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 09:11:00 +1000", + "Min" : 74 + }, + { + "Min" : 69, + "Avg" : 69, + "Max" : 69, + "date" : "2024-10-29 09:16:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 09:21:00 +1000", + "Min" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "Max" : 66 + }, + { + "date" : "2024-10-29 09:25:00 +1000", + "Avg" : 58, + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 09:30:00 +1000", + "Min" : 59, + "Avg" : 59 + }, + { + "date" : "2024-10-29 09:39:00 +1000", + "Max" : 65, + "Avg" : 65, + "Min" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 62.767238616943359, + "Avg" : 59.9101036381781, + "Min" : 57, + "date" : "2024-10-29 09:40:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 65, + "Avg" : 65, + "date" : "2024-10-29 09:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 65 + }, + { + "Max" : 59, + "Avg" : 59, + "date" : "2024-10-29 09:54:00 +1000", + "Min" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Max" : 59, + "date" : "2024-10-29 09:57:00 +1000", + "Avg" : 59 + }, + { + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "date" : "2024-10-29 10:01:00 +1000", + "Min" : 59 + }, + { + "Min" : 71, + "Avg" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 10:09:00 +1000", + "Max" : 71 + }, + { + "date" : "2024-10-29 10:11:00 +1000", + "Avg" : 67, + "Max" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "Max" : 102, + "Avg" : 102, + "Min" : 102, + "date" : "2024-10-29 10:14:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 89, + "Avg" : 93.60919539940879, + "source" : "Madhava’s Apple Watch", + "Max" : 95, + "date" : "2024-10-29 10:15:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 96, + "Min" : 96, + "Avg" : 96, + "date" : "2024-10-29 10:16:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 77.344262295081975, + "Max" : 79, + "date" : "2024-10-29 10:21:00 +1000", + "Min" : 75 + }, + { + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 10:22:00 +1000", + "Max" : 75, + "Avg" : 73.626760563380287 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "Min" : 74, + "date" : "2024-10-29 10:24:00 +1000", + "Avg" : 74 + }, + { + "Max" : 67, + "date" : "2024-10-29 10:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67 + }, + { + "Avg" : 107, + "date" : "2024-10-29 10:28:00 +1000", + "Min" : 105, + "source" : "Madhava’s Apple Watch", + "Max" : 109 + }, + { + "Min" : 77.000000000000014, + "Avg" : 84.780898876404493, + "date" : "2024-10-29 10:29:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 94 + }, + { + "Min" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 10:30:00 +1000", + "Max" : 98, + "Avg" : 87.862282878411904 + }, + { + "Avg" : 99.465909090909093, + "source" : "Madhava’s Apple Watch", + "Max" : 127, + "date" : "2024-10-29 10:31:00 +1000", + "Min" : 79 + }, + { + "date" : "2024-10-29 10:32:00 +1000", + "Max" : 80, + "Min" : 77.000000000000014, + "Avg" : 79.200000000000003, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 79, + "Avg" : 80.214285714285722, + "source" : "Madhava’s Apple Watch", + "Max" : 81, + "date" : "2024-10-29 10:33:00 +1000" + }, + { + "Max" : 66, + "Min" : 66, + "Avg" : 66, + "date" : "2024-10-29 10:39:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 10:40:00 +1000", + "Min" : 65, + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Avg" : 65 + }, + { + "date" : "2024-10-29 10:48:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "Avg" : 68, + "Min" : 68 + }, + { + "date" : "2024-10-29 10:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 59, + "Max" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 66.894126892089844, + "Min" : 65, + "Max" : 68.788253784179688, + "date" : "2024-10-29 11:00:00 +1000" + }, + { + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Max" : 60, + "date" : "2024-10-29 11:04:00 +1000" + }, + { + "Max" : 56, + "source" : "Madhava’s Apple Watch", + "Avg" : 56, + "Min" : 56, + "date" : "2024-10-29 11:10:00 +1000" + }, + { + "Min" : 59, + "Avg" : 59, + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 11:13:00 +1000" + }, + { + "date" : "2024-10-29 11:15:00 +1000", + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Max" : 60 + }, + { + "date" : "2024-10-29 11:24:00 +1000", + "Max" : 62.000000000000007, + "Avg" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 11:29:00 +1000", + "Max" : 63, + "Avg" : 63, + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Avg" : 58, + "date" : "2024-10-29 11:33:00 +1000", + "Min" : 58 + }, + { + "Avg" : 58, + "date" : "2024-10-29 11:39:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "Min" : 57, + "Max" : 57, + "date" : "2024-10-29 11:42:00 +1000" + }, + { + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 11:45:00 +1000", + "Max" : 56 + }, + { + "Min" : 56, + "Max" : 56, + "source" : "Madhava’s Apple Watch", + "Avg" : 56, + "date" : "2024-10-29 11:51:00 +1000" + }, + { + "date" : "2024-10-29 11:57:00 +1000", + "Min" : 77.000000000000014, + "Max" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "Avg" : 77.000000000000014 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "date" : "2024-10-29 12:03:00 +1000", + "Min" : 64, + "Avg" : 64 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 80, + "date" : "2024-10-29 12:07:00 +1000", + "Min" : 80, + "Avg" : 80 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "date" : "2024-10-29 12:16:00 +1000", + "Max" : 59, + "Avg" : 59 + }, + { + "Min" : 63, + "date" : "2024-10-29 12:23:00 +1000", + "Max" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 65, + "date" : "2024-10-29 12:30:00 +1000", + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65 + }, + { + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 12:33:00 +1000", + "Min" : 63 + }, + { + "Min" : 65, + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Max" : 65, + "date" : "2024-10-29 12:40:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66, + "date" : "2024-10-29 12:42:00 +1000", + "Max" : 66 + }, + { + "Max" : 64, + "Min" : 64, + "Avg" : 64, + "date" : "2024-10-29 12:47:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 68, + "Avg" : 68, + "Max" : 68, + "date" : "2024-10-29 12:50:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 63, + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 12:59:00 +1000" + }, + { + "Min" : 65, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 13:00:00 +1000", + "Max" : 65, + "Avg" : 65 + }, + { + "date" : "2024-10-29 13:05:00 +1000", + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Min" : 66 + }, + { + "Min" : 60, + "Avg" : 60, + "Max" : 60, + "date" : "2024-10-29 13:14:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "date" : "2024-10-29 13:19:00 +1000", + "Min" : 60, + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 61, + "date" : "2024-10-29 13:20:00 +1000", + "Min" : 61, + "Max" : 61 + }, + { + "date" : "2024-10-29 13:25:00 +1000", + "Min" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59 + }, + { + "date" : "2024-10-29 13:32:00 +1000", + "Min" : 61, + "Avg" : 61, + "source" : "Madhava’s Apple Watch", + "Max" : 61 + }, + { + "Avg" : 64, + "date" : "2024-10-29 13:37:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64 + }, + { + "Avg" : 59, + "Max" : 59, + "date" : "2024-10-29 13:44:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59 + }, + { + "Min" : 59, + "date" : "2024-10-29 13:47:00 +1000", + "Avg" : 59, + "Max" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 13:52:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 81, + "Min" : 81, + "Max" : 81 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 13:57:00 +1000", + "Max" : 78, + "Min" : 78, + "Avg" : 78 + }, + { + "Avg" : 90, + "Min" : 90, + "Max" : 90, + "date" : "2024-10-29 14:00:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 84, + "Avg" : 84, + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "date" : "2024-10-29 14:09:00 +1000" + }, + { + "Max" : 87, + "Min" : 87, + "source" : "Madhava’s Apple Watch", + "Avg" : 87, + "date" : "2024-10-29 14:12:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 88, + "Min" : 88, + "Avg" : 88, + "date" : "2024-10-29 14:15:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63, + "date" : "2024-10-29 14:23:00 +1000", + "Avg" : 63 + }, + { + "Min" : 67, + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Avg" : 67, + "date" : "2024-10-29 14:27:00 +1000" + }, + { + "Max" : 65, + "date" : "2024-10-29 14:31:00 +1000", + "Min" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 14:39:00 +1000", + "Max" : 64, + "Avg" : 64, + "Min" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64, + "date" : "2024-10-29 14:41:00 +1000" + }, + { + "Avg" : 66.237007141113281, + "Min" : 66.237007141113281, + "Max" : 66.237007141113281, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 14:46:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Max" : 66, + "Min" : 66, + "date" : "2024-10-29 14:47:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "date" : "2024-10-29 14:51:00 +1000", + "Avg" : 74, + "Min" : 74 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "date" : "2024-10-29 14:59:00 +1000", + "Avg" : 64, + "Max" : 64 + }, + { + "Min" : 73, + "source" : "Madhava’s Apple Watch", + "Avg" : 73, + "date" : "2024-10-29 15:00:00 +1000", + "Max" : 73 + }, + { + "date" : "2024-10-29 15:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Min" : 66, + "Avg" : 66 + }, + { + "Avg" : 65, + "date" : "2024-10-29 15:12:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Min" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 15:20:00 +1000", + "Avg" : 60, + "Max" : 60, + "Min" : 60 + }, + { + "Avg" : 71, + "date" : "2024-10-29 15:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Min" : 71 + }, + { + "date" : "2024-10-29 15:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64, + "Avg" : 64 + }, + { + "Avg" : 63, + "Max" : 63, + "date" : "2024-10-29 15:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 63 + }, + { + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "Max" : 63.555355072021477, + "Avg" : 61.777677536010735, + "date" : "2024-10-29 15:39:00 +1000" + }, + { + "Max" : 60, + "date" : "2024-10-29 15:43:00 +1000", + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Min" : 60 + }, + { + "date" : "2024-10-29 15:47:00 +1000", + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Max" : 65, + "date" : "2024-10-29 15:48:00 +1000", + "Avg" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007, + "date" : "2024-10-29 15:53:00 +1000", + "Max" : 62.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "Min" : 64, + "Avg" : 64, + "date" : "2024-10-29 15:57:00 +1000" + }, + { + "date" : "2024-10-29 16:04:00 +1000", + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Max" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 16:07:00 +1000", + "Avg" : 65, + "Min" : 65, + "Max" : 65 + }, + { + "date" : "2024-10-29 16:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Min" : 66, + "Avg" : 66 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Max" : 66, + "date" : "2024-10-29 16:17:00 +1000", + "Avg" : 66 + }, + { + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "date" : "2024-10-29 16:21:00 +1000", + "Min" : 59 + }, + { + "Max" : 97, + "Min" : 88, + "date" : "2024-10-29 16:27:00 +1000", + "Avg" : 92.375, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 79, + "date" : "2024-10-29 16:34:00 +1000", + "Avg" : 79, + "source" : "Madhava’s Apple Watch", + "Max" : 79 + }, + { + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Min" : 66, + "date" : "2024-10-29 16:39:00 +1000" + }, + { + "date" : "2024-10-29 16:45:00 +1000", + "Avg" : 99.447058825633093, + "source" : "Madhava’s Apple Watch", + "Min" : 96, + "Max" : 102 + }, + { + "date" : "2024-10-29 16:51:00 +1000", + "Min" : 88, + "source" : "Madhava’s Apple Watch", + "Avg" : 94.881944444444443, + "Max" : 100 + }, + { + "date" : "2024-10-29 16:52:00 +1000", + "Avg" : 91, + "source" : "Madhava’s Apple Watch", + "Min" : 91, + "Max" : 91 + }, + { + "Max" : 102, + "date" : "2024-10-29 17:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 102, + "Avg" : 102 + }, + { + "date" : "2024-10-29 17:02:00 +1000", + "Min" : 95, + "Avg" : 95, + "source" : "Madhava’s Apple Watch", + "Max" : 95 + }, + { + "Max" : 79, + "date" : "2024-10-29 17:34:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 79, + "Avg" : 79 + }, + { + "date" : "2024-10-29 17:39:00 +1000", + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Avg" : 71 + }, + { + "Min" : 70, + "Avg" : 70, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 17:40:00 +1000", + "Max" : 70 + }, + { + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 17:47:00 +1000", + "Min" : 66, + "Avg" : 66 + }, + { + "Max" : 75.768836975097656, + "date" : "2024-10-29 17:50:00 +1000", + "Min" : 75.768836975097656, + "source" : "Madhava’s Apple Watch", + "Avg" : 75.768836975097656 + }, + { + "date" : "2024-10-29 17:52:00 +1000", + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Max" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Max" : 72, + "date" : "2024-10-29 17:59:00 +1000", + "Min" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "Avg" : 64, + "Min" : 64, + "date" : "2024-10-29 18:02:00 +1000" + }, + { + "date" : "2024-10-29 18:07:00 +1000", + "Min" : 67, + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Avg" : 67 + }, + { + "Min" : 72, + "Avg" : 72, + "Max" : 72, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 18:10:00 +1000" + }, + { + "date" : "2024-10-29 18:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Min" : 72, + "Avg" : 72 + }, + { + "date" : "2024-10-29 18:24:00 +1000", + "Max" : 74, + "Min" : 74, + "Avg" : 74, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 72, + "Min" : 72, + "Avg" : 72, + "date" : "2024-10-29 18:26:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 71, + "date" : "2024-10-29 18:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 71, + "Min" : 71 + }, + { + "Min" : 76, + "Max" : 76, + "date" : "2024-10-29 18:37:00 +1000", + "Avg" : 76, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 83, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 18:41:00 +1000", + "Avg" : 83, + "Max" : 83 + }, + { + "Max" : 89, + "Avg" : 89, + "source" : "Madhava’s Apple Watch", + "Min" : 89, + "date" : "2024-10-29 18:47:00 +1000" + }, + { + "Min" : 76, + "Avg" : 76, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 18:52:00 +1000", + "Max" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 18:55:00 +1000", + "Min" : 89, + "Max" : 89, + "Avg" : 89 + }, + { + "Min" : 94, + "Avg" : 94, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 19:00:00 +1000", + "Max" : 94 + }, + { + "Min" : 90, + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Avg" : 90, + "date" : "2024-10-29 19:07:00 +1000" + }, + { + "date" : "2024-10-29 19:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 77.000000000000014, + "Max" : 77.000000000000014, + "Min" : 77.000000000000014 + }, + { + "date" : "2024-10-29 19:21:00 +1000", + "Max" : 75, + "Avg" : 75, + "source" : "Madhava’s Apple Watch", + "Min" : 75 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 80, + "date" : "2024-10-29 19:26:00 +1000", + "Min" : 80, + "Avg" : 80 + }, + { + "date" : "2024-10-29 19:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "Avg" : 74, + "Min" : 74 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 19:32:00 +1000", + "Min" : 73, + "Avg" : 73, + "Max" : 73 + }, + { + "date" : "2024-10-29 19:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 67, + "Avg" : 67 + }, + { + "date" : "2024-10-29 19:38:00 +1000", + "Avg" : 72.648040771484389, + "Max" : 72.648040771484389, + "Min" : 72.648040771484389, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 70, + "date" : "2024-10-29 19:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 70, + "Avg" : 70 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-10-29 19:49:00 +1000", + "Min" : 63, + "Avg" : 63 + }, + { + "Min" : 63, + "date" : "2024-10-29 19:50:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "Max" : 63 + }, + { + "Max" : 67, + "date" : "2024-10-29 19:55:00 +1000", + "Avg" : 67, + "Min" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 20:03:00 +1000", + "Max" : 60, + "Avg" : 60, + "Min" : 60 + }, + { + "Min" : 65, + "Max" : 65, + "date" : "2024-10-29 20:05:00 +1000", + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 20:13:00 +1000", + "Max" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 64 + }, + { + "Max" : 59, + "date" : "2024-10-29 20:19:00 +1000", + "Min" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 20:22:00 +1000", + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58 + }, + { + "date" : "2024-10-29 20:25:00 +1000", + "Avg" : 60, + "Max" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 61, + "Avg" : 61, + "Max" : 61, + "date" : "2024-10-29 20:30:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "date" : "2024-10-29 20:37:00 +1000", + "Min" : 59, + "Avg" : 59 + }, + { + "Max" : 53, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 20:40:00 +1000", + "Avg" : 53, + "Min" : 53 + }, + { + "Min" : 53, + "source" : "Madhava’s Apple Watch", + "Max" : 53, + "Avg" : 53, + "date" : "2024-10-29 20:45:00 +1000" + }, + { + "Avg" : 75, + "Min" : 75, + "Max" : 75, + "date" : "2024-10-29 20:52:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 21:47:00 +1000", + "Max" : 60.042831420898445, + "Avg" : 60.042831420898445, + "source" : "Madhava’s Apple Watch", + "Min" : 60.042831420898445 + }, + { + "Min" : 57, + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 21:48:00 +1000", + "Avg" : 57 + }, + { + "date" : "2024-10-29 21:50:00 +1000", + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "Max" : 59 + }, + { + "date" : "2024-10-29 21:51:00 +1000", + "Min" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59 + }, + { + "Min" : 60, + "Avg" : 60, + "Max" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 21:58:00 +1000" + }, + { + "Avg" : 59, + "Min" : 59, + "Max" : 59, + "date" : "2024-10-29 22:04:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 59, + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 22:06:00 +1000", + "Max" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Avg" : 59, + "Min" : 59, + "date" : "2024-10-29 22:12:00 +1000" + }, + { + "Avg" : 63, + "Max" : 63, + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 22:16:00 +1000" + }, + { + "Max" : 63, + "date" : "2024-10-29 22:21:00 +1000", + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 61.999999999999993, + "Avg" : 61.999999999999993, + "Max" : 61.999999999999993, + "date" : "2024-10-29 22:26:00 +1000" + }, + { + "Max" : 60, + "date" : "2024-10-29 22:28:00 +1000", + "Min" : 60, + "Avg" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "date" : "2024-10-29 22:30:00 +1000", + "Min" : 60, + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Avg" : 60, + "Min" : 60, + "date" : "2024-10-29 22:35:00 +1000" + }, + { + "Min" : 62.000000000000007, + "date" : "2024-10-29 22:43:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007 + }, + { + "date" : "2024-10-29 22:49:00 +1000", + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63 + }, + { + "Min" : 66, + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Max" : 66, + "date" : "2024-10-29 22:51:00 +1000" + }, + { + "Avg" : 67, + "date" : "2024-10-29 22:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 67 + }, + { + "date" : "2024-10-29 22:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 71, + "Min" : 71, + "Max" : 71 + }, + { + "Min" : 68, + "date" : "2024-10-29 23:00:00 +1000", + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "Avg" : 68 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Min" : 72, + "date" : "2024-10-29 23:05:00 +1000", + "Max" : 72 + }, + { + "Avg" : 79, + "date" : "2024-10-29 23:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 79, + "Min" : 79 + }, + { + "date" : "2024-10-29 23:18:00 +1000", + "Max" : 72, + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Avg" : 72 + }, + { + "Max" : 79, + "date" : "2024-10-29 23:22:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 79, + "Min" : 79 + }, + { + "date" : "2024-10-29 23:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "Avg" : 76, + "Min" : 76 + }, + { + "Max" : 69, + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 23:27:00 +1000", + "Min" : 69 + }, + { + "date" : "2024-10-29 23:31:00 +1000", + "Min" : 68, + "Max" : 68, + "Avg" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-29 23:35:00 +1000", + "Avg" : 66.213920593261719, + "source" : "Madhava’s Apple Watch", + "Max" : 66.213920593261719, + "Min" : 66.213920593261719 + }, + { + "Max" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Avg" : 64, + "date" : "2024-10-29 23:37:00 +1000" + }, + { + "Max" : 63, + "Avg" : 63, + "date" : "2024-10-29 23:41:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 23:45:00 +1000", + "Max" : 60, + "Min" : 60, + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-29 23:50:00 +1000", + "Max" : 56, + "Min" : 56, + "Avg" : 56 + }, + { + "date" : "2024-10-29 23:57:00 +1000", + "Min" : 57, + "Avg" : 57, + "Max" : 57, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 00:02:00 +1000", + "Max" : 56, + "Min" : 56, + "source" : "Madhava’s Apple Watch", + "Avg" : 56 + }, + { + "Max" : 57, + "Min" : 57, + "Avg" : 57, + "date" : "2024-10-30 00:05:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Min" : 55, + "date" : "2024-10-30 00:13:00 +1000", + "Avg" : 55 + }, + { + "Min" : 54, + "Avg" : 54, + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "date" : "2024-10-30 00:15:00 +1000" + }, + { + "Min" : 58, + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 00:20:00 +1000", + "Avg" : 58 + }, + { + "Max" : 57, + "Avg" : 57, + "date" : "2024-10-30 00:27:00 +1000", + "Min" : 57, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 58, + "date" : "2024-10-30 00:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 56, + "Max" : 56, + "date" : "2024-10-30 00:35:00 +1000", + "Avg" : 56 + }, + { + "Min" : 57, + "Avg" : 57, + "date" : "2024-10-30 00:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 57 + }, + { + "Max" : 58, + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 00:45:00 +1000" + }, + { + "Max" : 57, + "Avg" : 57, + "Min" : 57, + "date" : "2024-10-30 00:50:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 00:55:00 +1000", + "Avg" : 56, + "Min" : 56, + "Max" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 57, + "date" : "2024-10-30 00:57:00 +1000", + "Avg" : 57, + "Max" : 57 + }, + { + "date" : "2024-10-30 01:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 56, + "Max" : 56, + "Min" : 56 + }, + { + "Min" : 61, + "source" : "Madhava’s Apple Watch", + "Max" : 61, + "Avg" : 61, + "date" : "2024-10-30 01:06:00 +1000" + }, + { + "Min" : 67, + "Avg" : 67, + "date" : "2024-10-30 01:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67 + }, + { + "date" : "2024-10-30 01:18:00 +1000", + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Min" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 01:20:00 +1000", + "Max" : 59, + "Avg" : 59, + "Min" : 59 + }, + { + "Max" : 62.000000000000007, + "Min" : 62.000000000000007, + "date" : "2024-10-30 01:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 62.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Min" : 65, + "Max" : 65, + "date" : "2024-10-30 01:27:00 +1000" + }, + { + "Avg" : 63, + "Min" : 63, + "Max" : 63, + "date" : "2024-10-30 01:30:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 61.771903991699219, + "Min" : 61.771903991699219, + "Avg" : 61.771903991699219, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 01:35:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "date" : "2024-10-30 01:39:00 +1000", + "Avg" : 58 + }, + { + "Avg" : 54, + "Max" : 54, + "Min" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 01:43:00 +1000" + }, + { + "Max" : 52, + "date" : "2024-10-30 01:46:00 +1000", + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "date" : "2024-10-30 01:54:00 +1000", + "Min" : 52, + "Avg" : 52 + }, + { + "Min" : 49, + "Avg" : 49, + "date" : "2024-10-30 01:56:00 +1000", + "Max" : 49, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 51, + "date" : "2024-10-30 01:57:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 51, + "Avg" : 51 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 49, + "Min" : 49, + "date" : "2024-10-30 02:00:00 +1000", + "Avg" : 49 + }, + { + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "date" : "2024-10-30 02:08:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 02:13:00 +1000", + "Max" : 52, + "Min" : 52, + "Avg" : 52 + }, + { + "date" : "2024-10-30 02:15:00 +1000", + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Max" : 52 + }, + { + "Min" : 53, + "date" : "2024-10-30 02:22:00 +1000", + "Max" : 53, + "Avg" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 49, + "Max" : 49, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 02:28:00 +1000", + "Min" : 49 + }, + { + "date" : "2024-10-30 02:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 49, + "Max" : 49, + "Min" : 49 + }, + { + "Min" : 48, + "date" : "2024-10-30 02:32:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 48, + "Avg" : 48 + }, + { + "date" : "2024-10-30 02:36:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 48, + "Min" : 48, + "Avg" : 48 + }, + { + "date" : "2024-10-30 02:40:00 +1000", + "Max" : 47, + "source" : "Madhava’s Apple Watch", + "Min" : 47, + "Avg" : 47 + }, + { + "Avg" : 49, + "Max" : 49, + "Min" : 49, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 02:46:00 +1000" + }, + { + "date" : "2024-10-30 02:52:00 +1000", + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 02:59:00 +1000", + "Max" : 50, + "Min" : 50, + "Avg" : 50 + }, + { + "Max" : 50, + "Min" : 50, + "Avg" : 50, + "date" : "2024-10-30 03:00:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 52, + "Min" : 52, + "Max" : 52, + "date" : "2024-10-30 03:08:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 50, + "Avg" : 50, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 03:10:00 +1000", + "Max" : 50 + }, + { + "date" : "2024-10-30 03:15:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 50, + "Avg" : 50, + "Max" : 50 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 03:20:00 +1000", + "Max" : 49, + "Min" : 49, + "Avg" : 49 + }, + { + "date" : "2024-10-30 03:25:00 +1000", + "Max" : 48, + "Min" : 48, + "source" : "Madhava’s Apple Watch", + "Avg" : 48 + }, + { + "date" : "2024-10-30 03:31:00 +1000", + "Min" : 47, + "source" : "Madhava’s Apple Watch", + "Avg" : 49, + "Max" : 51 + }, + { + "Max" : 55, + "date" : "2024-10-30 03:35:00 +1000", + "Min" : 54.400707244873054, + "Avg" : 54.700353622436523, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 60, + "Max" : 60, + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 03:44:00 +1000" + }, + { + "Max" : 53, + "Avg" : 53, + "Min" : 53, + "date" : "2024-10-30 03:47:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 03:50:00 +1000", + "Min" : 54, + "Avg" : 54 + }, + { + "Avg" : 55, + "date" : "2024-10-30 03:55:00 +1000", + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "Avg" : 57, + "date" : "2024-10-30 04:01:00 +1000", + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "date" : "2024-10-30 04:09:00 +1000", + "Max" : 57 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Min" : 60, + "Avg" : 60, + "date" : "2024-10-30 04:10:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-30 04:16:00 +1000" + }, + { + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-30 04:22:00 +1000", + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 04:28:00 +1000", + "Min" : 54, + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Avg" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 04:30:00 +1000", + "Max" : 55, + "Min" : 55, + "Avg" : 55 + }, + { + "date" : "2024-10-30 04:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Max" : 54, + "Avg" : 54 + }, + { + "Min" : 53, + "Avg" : 53, + "date" : "2024-10-30 04:41:00 +1000", + "Max" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Avg" : 53, + "Max" : 53, + "date" : "2024-10-30 04:44:00 +1000" + }, + { + "Max" : 55, + "Avg" : 55, + "Min" : 55, + "date" : "2024-10-30 04:45:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 52, + "date" : "2024-10-30 04:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 04:56:00 +1000", + "Avg" : 52, + "Max" : 52, + "Min" : 52 + }, + { + "Max" : 54, + "Avg" : 54, + "Min" : 54, + "date" : "2024-10-30 05:01:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 05:06:00 +1000", + "Max" : 55, + "Min" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "date" : "2024-10-30 05:11:00 +1000", + "Min" : 55, + "Max" : 55 + }, + { + "Avg" : 55, + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 05:14:00 +1000", + "Min" : 55 + }, + { + "Avg" : 46.000000000000007, + "date" : "2024-10-30 05:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 46.000000000000007, + "Min" : 46.000000000000007 + }, + { + "date" : "2024-10-30 05:21:00 +1000", + "Max" : 53, + "Min" : 53, + "source" : "Madhava’s Apple Watch", + "Avg" : 53 + }, + { + "Min" : 55, + "Max" : 55, + "date" : "2024-10-30 05:30:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Max" : 55, + "date" : "2024-10-30 05:31:00 +1000" + }, + { + "Avg" : 57.648590087890625, + "date" : "2024-10-30 05:35:00 +1000", + "Max" : 57.648590087890625, + "source" : "Madhava’s Apple Watch", + "Min" : 57.648590087890625 + }, + { + "Max" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 05:36:00 +1000", + "Min" : 56 + }, + { + "date" : "2024-10-30 05:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Max" : 56, + "Avg" : 55.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "date" : "2024-10-30 05:48:00 +1000", + "Avg" : 56, + "Min" : 56 + }, + { + "date" : "2024-10-30 05:54:00 +1000", + "Max" : 53, + "Min" : 53, + "source" : "Madhava’s Apple Watch", + "Avg" : 53 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55, + "Min" : 55, + "date" : "2024-10-30 05:57:00 +1000" + }, + { + "Max" : 53, + "Avg" : 53, + "Min" : 53, + "date" : "2024-10-30 06:04:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 06:08:00 +1000", + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Max" : 52 + }, + { + "Avg" : 51, + "source" : "Madhava’s Apple Watch", + "Max" : 51, + "date" : "2024-10-30 06:11:00 +1000", + "Min" : 51 + }, + { + "Min" : 55, + "date" : "2024-10-30 06:13:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Max" : 55 + }, + { + "Min" : 54, + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "Avg" : 54, + "date" : "2024-10-30 06:15:00 +1000" + }, + { + "date" : "2024-10-30 06:20:00 +1000", + "Max" : 49, + "source" : "Madhava’s Apple Watch", + "Min" : 49, + "Avg" : 49 + }, + { + "date" : "2024-10-30 06:28:00 +1000", + "Max" : 59, + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59 + }, + { + "Min" : 58, + "Avg" : 58, + "Max" : 58, + "date" : "2024-10-30 06:31:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 54, + "Avg" : 54, + "Max" : 54, + "date" : "2024-10-30 06:36:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 06:41:00 +1000", + "Avg" : 53, + "Max" : 53, + "Min" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 47, + "Max" : 47, + "Min" : 47, + "date" : "2024-10-30 06:49:00 +1000" + }, + { + "Max" : 50, + "date" : "2024-10-30 06:52:00 +1000", + "Min" : 50, + "Avg" : 50, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 52, + "date" : "2024-10-30 06:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "Min" : 52 + }, + { + "Min" : 68, + "Max" : 68, + "date" : "2024-10-30 07:04:00 +1000", + "Avg" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 57, + "Avg" : 57, + "Min" : 57, + "date" : "2024-10-30 07:08:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 07:11:00 +1000", + "Min" : 52, + "Max" : 52, + "Avg" : 52 + }, + { + "Max" : 56, + "Min" : 56, + "date" : "2024-10-30 07:16:00 +1000", + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 07:20:00 +1000", + "Avg" : 49, + "Min" : 49, + "Max" : 49 + }, + { + "Max" : 48, + "date" : "2024-10-30 07:22:00 +1000", + "Avg" : 48, + "source" : "Madhava’s Apple Watch", + "Min" : 48 + }, + { + "date" : "2024-10-30 07:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 49, + "Max" : 49, + "Min" : 49 + }, + { + "Max" : 82, + "Min" : 82, + "date" : "2024-10-30 07:35:00 +1000", + "Avg" : 82, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 07:45:00 +1000", + "Max" : 75, + "Min" : 75, + "Avg" : 75, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 07:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "Avg" : 82.5, + "Min" : 81 + }, + { + "date" : "2024-10-30 07:54:00 +1000", + "Min" : 74, + "Avg" : 74, + "source" : "Madhava’s Apple Watch", + "Max" : 74 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "Avg" : 76, + "date" : "2024-10-30 07:58:00 +1000", + "Min" : 76 + }, + { + "date" : "2024-10-30 08:04:00 +1000", + "Min" : 69, + "source" : "Madhava’s Apple Watch", + "Max" : 69, + "Avg" : 69 + }, + { + "Avg" : 75, + "source" : "Madhava’s Apple Watch", + "Min" : 75, + "date" : "2024-10-30 08:08:00 +1000", + "Max" : 75 + }, + { + "date" : "2024-10-30 08:12:00 +1000", + "Min" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "Avg" : 68 + }, + { + "date" : "2024-10-30 08:18:00 +1000", + "Max" : 73, + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Avg" : 72.5 + }, + { + "Max" : 67, + "Avg" : 67, + "date" : "2024-10-30 08:21:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "Avg" : 65, + "date" : "2024-10-30 08:27:00 +1000", + "Min" : 65, + "Max" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 59, + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 08:33:00 +1000", + "Avg" : 59 + }, + { + "Avg" : 58, + "Min" : 58, + "date" : "2024-10-30 08:40:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 08:42:00 +1000", + "Min" : 64, + "Avg" : 64, + "Max" : 64 + }, + { + "Max" : 69, + "Avg" : 69, + "Min" : 69, + "date" : "2024-10-30 08:47:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 08:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Max" : 60, + "Avg" : 60 + }, + { + "Avg" : 56, + "date" : "2024-10-30 08:54:00 +1000", + "Max" : 56, + "Min" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 58, + "date" : "2024-10-30 08:56:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "date" : "2024-10-30 09:00:00 +1000", + "Avg" : 60, + "Min" : 60 + }, + { + "date" : "2024-10-30 09:18:00 +1000", + "Avg" : 58, + "Max" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 61, + "Min" : 61, + "Avg" : 61, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 09:19:00 +1000" + }, + { + "Avg" : 62.000000000000007, + "date" : "2024-10-30 09:29:00 +1000", + "Max" : 62.000000000000007, + "Min" : 62.000000000000007, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 09:33:00 +1000", + "Min" : 83, + "source" : "Madhava’s Apple Watch", + "Avg" : 83, + "Max" : 83 + }, + { + "Min" : 89, + "Max" : 89, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 09:35:00 +1000", + "Avg" : 89 + }, + { + "Min" : 83, + "Avg" : 83, + "date" : "2024-10-30 09:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 83 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 09:44:00 +1000", + "Min" : 66, + "Max" : 66, + "Avg" : 66 + }, + { + "Avg" : 65.012847900390625, + "Min" : 65.012847900390625, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 09:45:00 +1000", + "Max" : 65.012847900390625 + }, + { + "Avg" : 58, + "Min" : 58, + "date" : "2024-10-30 09:49:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 09:50:00 +1000", + "Max" : 58, + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "Avg" : 58, + "date" : "2024-10-30 09:54:00 +1000" + }, + { + "Min" : 59, + "date" : "2024-10-30 09:55:00 +1000", + "Max" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "date" : "2024-10-30 10:02:00 +1000", + "Max" : 63, + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 10:10:00 +1000", + "Max" : 66, + "Avg" : 66, + "Min" : 66 + }, + { + "Min" : 59, + "date" : "2024-10-30 10:14:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "Max" : 59 + }, + { + "Avg" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 10:16:00 +1000", + "Max" : 60 + }, + { + "Max" : 63, + "date" : "2024-10-30 10:20:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Avg" : 63 + }, + { + "Min" : 63, + "date" : "2024-10-30 10:21:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63 + }, + { + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Avg" : 65, + "date" : "2024-10-30 10:25:00 +1000" + }, + { + "Avg" : 58, + "date" : "2024-10-30 10:33:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "Max" : 59, + "Min" : 59, + "date" : "2024-10-30 10:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "Min" : 57, + "Avg" : 57, + "date" : "2024-10-30 10:44:00 +1000" + }, + { + "Min" : 54, + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Avg" : 54, + "date" : "2024-10-30 10:46:00 +1000" + }, + { + "Max" : 58, + "date" : "2024-10-30 10:53:00 +1000", + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 10:58:00 +1000", + "Avg" : 66, + "Max" : 66, + "Min" : 66 + }, + { + "date" : "2024-10-30 10:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Avg" : 76, + "Max" : 76 + }, + { + "Min" : 89, + "source" : "Madhava’s Apple Watch", + "Max" : 89, + "date" : "2024-10-30 11:01:00 +1000", + "Avg" : 89 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "date" : "2024-10-30 11:08:00 +1000", + "Max" : 72, + "Min" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "Avg" : 64, + "date" : "2024-10-30 11:10:00 +1000", + "Min" : 64 + }, + { + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 11:18:00 +1000", + "Avg" : 68, + "Min" : 68 + }, + { + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 11:25:00 +1000", + "Min" : 69, + "Max" : 69 + }, + { + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "date" : "2024-10-30 11:32:00 +1000", + "Max" : 69 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Max" : 72, + "date" : "2024-10-30 11:39:00 +1000", + "Avg" : 72 + }, + { + "Max" : 69, + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "Avg" : 69, + "date" : "2024-10-30 11:40:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Max" : 76, + "Avg" : 76, + "date" : "2024-10-30 11:46:00 +1000" + }, + { + "date" : "2024-10-30 11:54:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 70, + "Min" : 70, + "Avg" : 70 + }, + { + "Avg" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 11:55:00 +1000", + "Max" : 71, + "Min" : 71 + }, + { + "date" : "2024-10-30 12:03:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 69, + "Avg" : 69, + "Min" : 69 + }, + { + "Avg" : 75, + "Max" : 75, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 12:08:00 +1000", + "Min" : 75 + }, + { + "Min" : 78, + "source" : "Madhava’s Apple Watch", + "Max" : 78, + "Avg" : 78, + "date" : "2024-10-30 12:11:00 +1000" + }, + { + "Max" : 73, + "Avg" : 73, + "date" : "2024-10-30 12:15:00 +1000", + "Min" : 73, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 73, + "date" : "2024-10-30 12:23:00 +1000", + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Avg" : 73 + }, + { + "date" : "2024-10-30 12:27:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 73, + "Max" : 73, + "Min" : 73 + }, + { + "Avg" : 68, + "Min" : 68, + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "date" : "2024-10-30 12:34:00 +1000" + }, + { + "Avg" : 65, + "date" : "2024-10-30 12:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Min" : 65 + }, + { + "Avg" : 67, + "Min" : 67, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 12:43:00 +1000", + "Max" : 67 + }, + { + "date" : "2024-10-30 12:47:00 +1000", + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72 + }, + { + "Max" : 101, + "Min" : 101, + "source" : "Madhava’s Apple Watch", + "Avg" : 101, + "date" : "2024-10-30 12:48:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 89, + "Avg" : 89, + "Max" : 89, + "date" : "2024-10-30 12:52:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 69, + "date" : "2024-10-30 12:55:00 +1000", + "Min" : 69, + "Max" : 69 + }, + { + "Min" : 63, + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 13:01:00 +1000" + }, + { + "Min" : 65, + "Avg" : 65, + "Max" : 65, + "date" : "2024-10-30 13:08:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 63, + "date" : "2024-10-30 13:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 61, + "date" : "2024-10-30 13:19:00 +1000", + "Avg" : 61, + "Min" : 61 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 61, + "Min" : 61, + "Max" : 61, + "date" : "2024-10-30 13:21:00 +1000" + }, + { + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-10-30 13:28:00 +1000", + "Avg" : 63 + }, + { + "Max" : 63, + "date" : "2024-10-30 13:31:00 +1000", + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63 + }, + { + "date" : "2024-10-30 13:39:00 +1000", + "Avg" : 74, + "Min" : 74, + "source" : "Madhava’s Apple Watch", + "Max" : 74 + }, + { + "date" : "2024-10-30 13:42:00 +1000", + "Max" : 60, + "Avg" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 13:48:00 +1000", + "Avg" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Max" : 62.000000000000007, + "Min" : 62.000000000000007 + }, + { + "date" : "2024-10-30 13:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Max" : 60, + "Avg" : 60 + }, + { + "Min" : 60, + "Avg" : 60, + "date" : "2024-10-30 13:57:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 60 + }, + { + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "date" : "2024-10-30 14:03:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 14:05:00 +1000", + "Max" : 60, + "Min" : 60, + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Min" : 65, + "Max" : 65, + "date" : "2024-10-30 14:20:00 +1000" + }, + { + "Min" : 65, + "Max" : 65, + "date" : "2024-10-30 14:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 14:28:00 +1000", + "Max" : 67, + "Min" : 67, + "Avg" : 67 + }, + { + "Avg" : 67, + "Max" : 67, + "Min" : 67, + "date" : "2024-10-30 14:32:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 14:37:00 +1000", + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "Max" : 57 + }, + { + "date" : "2024-10-30 14:40:00 +1000", + "Min" : 67, + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "Max" : 67 + }, + { + "Min" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "date" : "2024-10-30 14:47:00 +1000", + "Avg" : 64 + }, + { + "Max" : 64, + "Min" : 64, + "Avg" : 64, + "date" : "2024-10-30 14:53:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 68, + "Min" : 68, + "Avg" : 68, + "date" : "2024-10-30 14:55:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 15:02:00 +1000", + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "Min" : 63 + }, + { + "Min" : 56, + "Max" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 15:08:00 +1000", + "Avg" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "Min" : 68, + "date" : "2024-10-30 15:13:00 +1000", + "Avg" : 68 + }, + { + "date" : "2024-10-30 15:16:00 +1000", + "Max" : 63, + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 68, + "Min" : 68, + "Max" : 68, + "date" : "2024-10-30 15:22:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "date" : "2024-10-30 15:26:00 +1000", + "Avg" : 58 + }, + { + "Min" : 65, + "Max" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 15:32:00 +1000" + }, + { + "Avg" : 64, + "date" : "2024-10-30 15:37:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64 + }, + { + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "date" : "2024-10-30 15:44:00 +1000", + "Max" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-10-30 15:45:00 +1000", + "Min" : 63, + "Avg" : 63 + }, + { + "Min" : 62.000000000000007, + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Avg" : 62.000000000000007, + "date" : "2024-10-30 15:50:00 +1000" + }, + { + "date" : "2024-10-30 15:56:00 +1000", + "Avg" : 59, + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59 + }, + { + "Max" : 56, + "source" : "Madhava’s Apple Watch", + "Min" : 56, + "Avg" : 56, + "date" : "2024-10-30 16:00:00 +1000" + }, + { + "Avg" : 61, + "date" : "2024-10-30 16:08:00 +1000", + "Max" : 61, + "Min" : 61, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 16:13:00 +1000", + "Min" : 60, + "Avg" : 60 + }, + { + "Min" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 16:15:00 +1000", + "Max" : 66 + }, + { + "Avg" : 65, + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 16:23:00 +1000", + "Min" : 65 + }, + { + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Min" : 65, + "date" : "2024-10-30 16:28:00 +1000" + }, + { + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "date" : "2024-10-30 16:37:00 +1000", + "Avg" : 57 + }, + { + "Max" : 78, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 16:44:00 +1000", + "Min" : 78, + "Avg" : 78 + }, + { + "date" : "2024-10-30 16:50:00 +1000", + "Min" : 59, + "Max" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 61, + "source" : "Madhava’s Apple Watch", + "Max" : 61, + "date" : "2024-10-30 16:52:00 +1000", + "Avg" : 61 + }, + { + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 16:58:00 +1000", + "Max" : 56, + "Min" : 56 + }, + { + "Max" : 56, + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 17:01:00 +1000" + }, + { + "date" : "2024-10-30 17:08:00 +1000", + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Avg" : 52 + }, + { + "Avg" : 51, + "Max" : 51, + "Min" : 51, + "date" : "2024-10-30 17:10:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 55, + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "date" : "2024-10-30 17:16:00 +1000" + }, + { + "date" : "2024-10-30 17:23:00 +1000", + "Max" : 81, + "Min" : 81, + "Avg" : 81, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 17:25:00 +1000", + "Avg" : 50, + "Max" : 50, + "source" : "Madhava’s Apple Watch", + "Min" : 50 + }, + { + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 17:33:00 +1000", + "Avg" : 55, + "Min" : 55 + }, + { + "Max" : 56, + "Min" : 56, + "source" : "Madhava’s Apple Watch", + "Avg" : 56, + "date" : "2024-10-30 17:35:00 +1000" + }, + { + "date" : "2024-10-30 17:42:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "Min" : 56, + "Avg" : 56 + }, + { + "date" : "2024-10-30 17:46:00 +1000", + "Max" : 56, + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-30 17:47:00 +1000", + "Min" : 60.134883880615234, + "source" : "Madhava’s Apple Watch", + "Avg" : 60.134883880615234, + "Max" : 60.134883880615234 + }, + { + "date" : "2024-10-30 17:48:00 +1000", + "Min" : 57.757545471191413, + "Avg" : 57.757545471191413, + "source" : "Madhava’s Apple Watch", + "Max" : 57.757545471191413 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 56, + "Max" : 56, + "Avg" : 56, + "date" : "2024-10-30 17:51:00 +1000" + }, + { + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Max" : 55, + "date" : "2024-10-30 17:55:00 +1000" + }, + { + "date" : "2024-10-30 18:00:00 +1000", + "Min" : 51, + "Max" : 51, + "Avg" : 51, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 55, + "date" : "2024-10-30 18:07:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "Min" : 63, + "Avg" : 63, + "Max" : 63, + "date" : "2024-10-30 18:12:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 74, + "date" : "2024-10-30 18:17:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Avg" : 74 + }, + { + "Max" : 80, + "Avg" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 18:25:00 +1000", + "Min" : 80 + }, + { + "Avg" : 79, + "Min" : 79, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 18:34:00 +1000", + "Max" : 79 + }, + { + "date" : "2024-10-30 18:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 86, + "Max" : 86, + "Min" : 86 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 86, + "date" : "2024-10-30 18:40:00 +1000", + "Min" : 86, + "Max" : 86 + }, + { + "Max" : 87, + "date" : "2024-10-30 18:56:00 +1000", + "Min" : 87, + "Avg" : 87, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 19:03:00 +1000", + "Avg" : 83, + "Min" : 83, + "Max" : 83 + }, + { + "Min" : 92.000000000000014, + "date" : "2024-10-30 19:09:00 +1000", + "Max" : 92.000000000000014, + "Avg" : 92.000000000000014, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 96, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 19:10:00 +1000", + "Max" : 96, + "Avg" : 96 + }, + { + "date" : "2024-10-30 19:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 86, + "Avg" : 86, + "Min" : 86 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 83, + "Min" : 83, + "date" : "2024-10-30 19:22:00 +1000", + "Avg" : 83 + }, + { + "Min" : 86, + "Avg" : 86, + "Max" : 86, + "date" : "2024-10-30 19:26:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 93, + "date" : "2024-10-30 19:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 93, + "Min" : 93 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 83, + "Max" : 83, + "Avg" : 83, + "date" : "2024-10-30 19:48:00 +1000" + }, + { + "Min" : 78.41839599609375, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 19:54:00 +1000", + "Max" : 80, + "Avg" : 79.209197998046875 + }, + { + "date" : "2024-10-30 19:55:00 +1000", + "Max" : 80, + "Min" : 80, + "source" : "Madhava’s Apple Watch", + "Avg" : 80 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 19:56:00 +1000", + "Max" : 79, + "Avg" : 79, + "Min" : 79 + }, + { + "Min" : 80, + "Max" : 80, + "Avg" : 80, + "date" : "2024-10-30 20:01:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 74, + "date" : "2024-10-30 20:09:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "Avg" : 74 + }, + { + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Avg" : 72, + "date" : "2024-10-30 20:12:00 +1000" + }, + { + "Max" : 85, + "source" : "Madhava’s Apple Watch", + "Min" : 85, + "date" : "2024-10-30 20:18:00 +1000", + "Avg" : 85 + }, + { + "Avg" : 81, + "source" : "Madhava’s Apple Watch", + "Max" : 81, + "date" : "2024-10-30 20:20:00 +1000", + "Min" : 81 + }, + { + "date" : "2024-10-30 20:28:00 +1000", + "Avg" : 73, + "Min" : 73, + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "date" : "2024-10-30 20:33:00 +1000", + "Avg" : 73 + }, + { + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "date" : "2024-10-30 20:36:00 +1000" + }, + { + "Min" : 78, + "source" : "Madhava’s Apple Watch", + "Max" : 78, + "Avg" : 78, + "date" : "2024-10-30 20:40:00 +1000" + }, + { + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 20:45:00 +1000", + "Min" : 80, + "Avg" : 80 + }, + { + "date" : "2024-10-30 20:50:00 +1000", + "Min" : 73, + "Avg" : 73, + "Max" : 73, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 84, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 20:55:00 +1000", + "Min" : 84, + "Max" : 84 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "Avg" : 84, + "date" : "2024-10-30 21:02:00 +1000", + "Max" : 84 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 80, + "date" : "2024-10-30 21:06:00 +1000", + "Min" : 80, + "Max" : 80 + }, + { + "date" : "2024-10-30 21:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "Min" : 76, + "Avg" : 76 + }, + { + "Max" : 77.000000000000014, + "Min" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "Avg" : 77.000000000000014, + "date" : "2024-10-30 21:15:00 +1000" + }, + { + "date" : "2024-10-30 21:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Min" : 71, + "Avg" : 71 + }, + { + "Avg" : 72, + "Max" : 72, + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 21:20:00 +1000" + }, + { + "Max" : 79, + "date" : "2024-10-30 21:26:00 +1000", + "Avg" : 79, + "Min" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 68.562578611951949, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 22:10:00 +1000", + "Min" : 66.871124267578125, + "Max" : 69 + }, + { + "date" : "2024-10-30 22:12:00 +1000", + "Max" : 64, + "Min" : 64, + "source" : "Madhava’s Apple Watch", + "Avg" : 64 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 67, + "date" : "2024-10-30 22:18:00 +1000", + "Avg" : 67 + }, + { + "Max" : 63, + "date" : "2024-10-30 22:20:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Avg" : 63 + }, + { + "Avg" : 63, + "date" : "2024-10-30 22:25:00 +1000", + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63 + }, + { + "date" : "2024-10-30 22:34:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Min" : 58 + }, + { + "date" : "2024-10-30 22:35:00 +1000", + "Min" : 58, + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58 + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 22:40:00 +1000", + "Min" : 54, + "Avg" : 54 + }, + { + "Max" : 55, + "Avg" : 55, + "Min" : 55, + "date" : "2024-10-30 22:49:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 56, + "Avg" : 56, + "date" : "2024-10-30 22:50:00 +1000", + "Max" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 58, + "date" : "2024-10-30 22:56:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "date" : "2024-10-30 23:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "Min" : 57, + "Avg" : 57 + }, + { + "Avg" : 58, + "date" : "2024-10-30 23:07:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58 + }, + { + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "date" : "2024-10-30 23:10:00 +1000", + "Min" : 55 + }, + { + "date" : "2024-10-30 23:17:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64, + "Avg" : 64 + }, + { + "Avg" : 64.500000000000014, + "date" : "2024-10-30 23:21:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 62.000000000000007 + }, + { + "date" : "2024-10-30 23:28:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "Avg" : 68, + "Min" : 68 + }, + { + "Avg" : 66, + "Max" : 66, + "Min" : 66, + "date" : "2024-10-30 23:31:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 65.090805053710938, + "Max" : 65.090805053710938, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 23:35:00 +1000", + "Min" : 65.090805053710938 + }, + { + "Min" : 66, + "date" : "2024-10-30 23:36:00 +1000", + "Max" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-30 23:40:00 +1000", + "Max" : 63, + "Min" : 63 + }, + { + "Min" : 63, + "date" : "2024-10-30 23:46:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63 + }, + { + "date" : "2024-10-30 23:51:00 +1000", + "Min" : 59, + "Avg" : 59, + "Max" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 63, + "Avg" : 63, + "date" : "2024-10-30 23:54:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "date" : "2024-10-30 23:57:00 +1000" + }, + { + "Min" : 59, + "Max" : 59, + "date" : "2024-10-31 00:01:00 +1000", + "Avg" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 00:05:00 +1000", + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58 + }, + { + "date" : "2024-10-31 00:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Min" : 55, + "Avg" : 55 + }, + { + "Min" : 56, + "Avg" : 56, + "Max" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 00:17:00 +1000" + }, + { + "Avg" : 56, + "Max" : 56, + "date" : "2024-10-31 00:21:00 +1000", + "Min" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "Max" : 55, + "date" : "2024-10-31 00:22:00 +1000" + }, + { + "Min" : 55, + "date" : "2024-10-31 00:26:00 +1000", + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "date" : "2024-10-31 00:32:00 +1000", + "Max" : 55, + "Min" : 55 + }, + { + "date" : "2024-10-31 00:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 00:40:00 +1000", + "Avg" : 58, + "Min" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 00:45:00 +1000", + "Min" : 58, + "Avg" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 00:51:00 +1000", + "Max" : 57, + "Avg" : 56.5, + "Min" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 00:59:00 +1000", + "Max" : 66, + "Avg" : 66, + "Min" : 66 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Min" : 59, + "Avg" : 59, + "date" : "2024-10-31 01:05:00 +1000" + }, + { + "date" : "2024-10-31 01:08:00 +1000", + "Max" : 60, + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Min" : 60 + }, + { + "Min" : 60, + "Avg" : 60, + "Max" : 60, + "date" : "2024-10-31 01:11:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Min" : 59, + "date" : "2024-10-31 01:16:00 +1000" + }, + { + "Max" : 53, + "date" : "2024-10-31 01:22:00 +1000", + "Min" : 53, + "Avg" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 01:28:00 +1000", + "Min" : 54, + "Avg" : 54, + "Max" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Avg" : 54, + "date" : "2024-10-31 01:29:00 +1000", + "Min" : 54 + }, + { + "Avg" : 52, + "Min" : 52, + "date" : "2024-10-31 01:34:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 52 + }, + { + "date" : "2024-10-31 01:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 52.838233947753906, + "Avg" : 51.419116973876953, + "Min" : 50 + }, + { + "Min" : 52, + "Max" : 52, + "date" : "2024-10-31 01:42:00 +1000", + "Avg" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 01:45:00 +1000" + }, + { + "Max" : 52, + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 01:51:00 +1000", + "Avg" : 52 + }, + { + "date" : "2024-10-31 01:55:00 +1000", + "Min" : 53, + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Max" : 53 + }, + { + "Min" : 54, + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 01:58:00 +1000", + "Avg" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 53, + "Min" : 53, + "Avg" : 53, + "date" : "2024-10-31 02:01:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "date" : "2024-10-31 02:07:00 +1000" + }, + { + "Avg" : 52, + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 02:11:00 +1000", + "Max" : 52 + }, + { + "Max" : 51, + "Avg" : 51, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 02:15:00 +1000", + "Min" : 51 + }, + { + "Max" : 49, + "Min" : 49, + "Avg" : 49, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 02:20:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 47, + "Avg" : 47, + "Min" : 47, + "date" : "2024-10-31 02:27:00 +1000" + }, + { + "date" : "2024-10-31 02:32:00 +1000", + "Max" : 49, + "Min" : 49, + "source" : "Madhava’s Apple Watch", + "Avg" : 49 + }, + { + "Avg" : 50, + "Max" : 50, + "Min" : 50, + "date" : "2024-10-31 02:33:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 02:38:00 +1000", + "Max" : 49, + "Min" : 49, + "Avg" : 49, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 53, + "Min" : 53, + "Avg" : 53, + "date" : "2024-10-31 02:42:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 02:49:00 +1000", + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 54, + "Min" : 54, + "source" : "Madhava’s Apple Watch", + "Avg" : 54, + "date" : "2024-10-31 02:50:00 +1000" + }, + { + "Avg" : 52, + "date" : "2024-10-31 02:58:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Max" : 52 + }, + { + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "date" : "2024-10-31 03:01:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 51, + "Max" : 51, + "date" : "2024-10-31 03:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 51 + }, + { + "date" : "2024-10-31 03:06:00 +1000", + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 03:14:00 +1000", + "Min" : 48, + "Max" : 48, + "Avg" : 48 + }, + { + "Avg" : 49, + "date" : "2024-10-31 03:16:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 49, + "Max" : 49 + }, + { + "Max" : 49, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 03:23:00 +1000", + "Avg" : 49, + "Min" : 49 + }, + { + "date" : "2024-10-31 03:26:00 +1000", + "Min" : 48, + "source" : "Madhava’s Apple Watch", + "Max" : 48, + "Avg" : 48 + }, + { + "Max" : 48, + "source" : "Madhava’s Apple Watch", + "Min" : 48, + "date" : "2024-10-31 03:32:00 +1000", + "Avg" : 48 + }, + { + "Avg" : 47, + "source" : "Madhava’s Apple Watch", + "Max" : 47, + "date" : "2024-10-31 03:33:00 +1000", + "Min" : 47 + }, + { + "date" : "2024-10-31 03:35:00 +1000", + "Avg" : 44.902919769287109, + "Max" : 44.902919769287109, + "Min" : 44.902919769287109, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 48, + "date" : "2024-10-31 03:38:00 +1000", + "Avg" : 48, + "source" : "Madhava’s Apple Watch", + "Min" : 48 + }, + { + "Avg" : 46.000000000000007, + "source" : "Madhava’s Apple Watch", + "Max" : 46.000000000000007, + "date" : "2024-10-31 03:42:00 +1000", + "Min" : 46.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 48, + "Avg" : 48, + "Min" : 48, + "date" : "2024-10-31 03:47:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 03:50:00 +1000", + "Min" : 48, + "Avg" : 48, + "Max" : 48 + }, + { + "Avg" : 49, + "Max" : 49, + "Min" : 49, + "date" : "2024-10-31 03:55:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 04:00:00 +1000", + "Min" : 48, + "Max" : 48, + "source" : "Madhava’s Apple Watch", + "Avg" : 48 + }, + { + "Min" : 49, + "Avg" : 49, + "Max" : 49, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 04:02:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 48, + "date" : "2024-10-31 04:05:00 +1000", + "Max" : 48, + "Min" : 48 + }, + { + "date" : "2024-10-31 04:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 49, + "Max" : 49, + "Avg" : 49 + }, + { + "Avg" : 49, + "date" : "2024-10-31 04:15:00 +1000", + "Max" : 49, + "source" : "Madhava’s Apple Watch", + "Min" : 49 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 04:20:00 +1000", + "Min" : 63, + "Max" : 63, + "Avg" : 63 + }, + { + "Min" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "Avg" : 64, + "date" : "2024-10-31 04:25:00 +1000" + }, + { + "Min" : 51, + "date" : "2024-10-31 04:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 51, + "Max" : 51 + }, + { + "Max" : 59, + "Avg" : 59, + "Min" : 59, + "date" : "2024-10-31 04:32:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 04:36:00 +1000", + "Max" : 45, + "source" : "Madhava’s Apple Watch", + "Min" : 45, + "Avg" : 45 + }, + { + "Avg" : 44, + "date" : "2024-10-31 04:40:00 +1000", + "Max" : 44, + "source" : "Madhava’s Apple Watch", + "Min" : 44 + }, + { + "Max" : 50, + "Min" : 50, + "date" : "2024-10-31 04:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 50 + }, + { + "Max" : 49, + "Min" : 49, + "Avg" : 49, + "date" : "2024-10-31 04:51:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 52, + "date" : "2024-10-31 04:55:00 +1000", + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52 + }, + { + "Avg" : 49, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 05:01:00 +1000", + "Max" : 49, + "Min" : 49 + }, + { + "Avg" : 48, + "source" : "Madhava’s Apple Watch", + "Max" : 48, + "date" : "2024-10-31 05:02:00 +1000", + "Min" : 48 + }, + { + "date" : "2024-10-31 05:09:00 +1000", + "Max" : 49, + "Min" : 49, + "source" : "Madhava’s Apple Watch", + "Avg" : 49 + }, + { + "Min" : 49, + "date" : "2024-10-31 05:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 49, + "Max" : 49 + }, + { + "Avg" : 50, + "date" : "2024-10-31 05:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 50, + "Min" : 50 + }, + { + "date" : "2024-10-31 05:20:00 +1000", + "Min" : 50, + "Avg" : 50, + "Max" : 50, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 50, + "Min" : 50, + "source" : "Madhava’s Apple Watch", + "Max" : 50, + "date" : "2024-10-31 05:26:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 49, + "Avg" : 49, + "date" : "2024-10-31 05:33:00 +1000", + "Max" : 49 + }, + { + "Min" : 50, + "Avg" : 50.109588623046875, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 05:35:00 +1000", + "Max" : 50.21917724609375 + }, + { + "Avg" : 50, + "date" : "2024-10-31 05:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 50, + "Min" : 50 + }, + { + "date" : "2024-10-31 05:45:00 +1000", + "Max" : 50, + "Min" : 50, + "Avg" : 50, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Max" : 58, + "date" : "2024-10-31 05:50:00 +1000" + }, + { + "Max" : 44, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 05:52:00 +1000", + "Min" : 44, + "Avg" : 44 + }, + { + "Min" : 51, + "Avg" : 51, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 05:55:00 +1000", + "Max" : 51 + }, + { + "Min" : 54, + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "Avg" : 54, + "date" : "2024-10-31 06:01:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64, + "date" : "2024-10-31 06:08:00 +1000", + "Avg" : 64 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 67, + "Max" : 67, + "date" : "2024-10-31 06:18:00 +1000", + "Min" : 67 + }, + { + "date" : "2024-10-31 06:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 59, + "Max" : 59 + }, + { + "Min" : 66, + "date" : "2024-10-31 06:22:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Max" : 66 + }, + { + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "Max" : 59, + "date" : "2024-10-31 06:26:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "date" : "2024-10-31 06:34:00 +1000", + "Avg" : 65, + "Max" : 65 + }, + { + "Max" : 74, + "source" : "Madhava’s Apple Watch", + "Avg" : 74, + "Min" : 74, + "date" : "2024-10-31 06:38:00 +1000" + }, + { + "Max" : 65, + "Min" : 65, + "date" : "2024-10-31 06:43:00 +1000", + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 06:51:00 +1000", + "Avg" : 68, + "Min" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 62.000000000000007, + "date" : "2024-10-31 06:57:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 62.000000000000007, + "Min" : 62.000000000000007 + }, + { + "Avg" : 67, + "date" : "2024-10-31 07:02:00 +1000", + "Min" : 67, + "Max" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 07:06:00 +1000", + "Min" : 61, + "Avg" : 61, + "source" : "Madhava’s Apple Watch", + "Max" : 61 + }, + { + "Max" : 63, + "Min" : 63, + "date" : "2024-10-31 07:14:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "date" : "2024-10-31 07:15:00 +1000", + "Min" : 60, + "Avg" : 60 + }, + { + "Max" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "date" : "2024-10-31 07:24:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 07:27:00 +1000", + "Min" : 57, + "Avg" : 57, + "Max" : 57 + }, + { + "date" : "2024-10-31 07:31:00 +1000", + "Min" : 65, + "Max" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 66, + "Min" : 66, + "date" : "2024-10-31 07:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 66 + }, + { + "date" : "2024-10-31 07:46:00 +1000", + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Max" : 65 + }, + { + "Avg" : 59, + "date" : "2024-10-31 07:52:00 +1000", + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 59 + }, + { + "Max" : 60, + "date" : "2024-10-31 07:57:00 +1000", + "Avg" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 56, + "date" : "2024-10-31 08:04:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 56, + "Avg" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 08:06:00 +1000", + "Max" : 56, + "Avg" : 56, + "Min" : 56 + }, + { + "Max" : 57, + "Min" : 57, + "date" : "2024-10-31 08:12:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 57 + }, + { + "Avg" : 58.325004577636719, + "date" : "2024-10-31 08:13:00 +1000", + "Max" : 60.650009155273445, + "source" : "Madhava’s Apple Watch", + "Min" : 56 + }, + { + "date" : "2024-10-31 08:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Avg" : 59, + "Min" : 59 + }, + { + "date" : "2024-10-31 08:22:00 +1000", + "Min" : 80, + "Avg" : 80, + "source" : "Madhava’s Apple Watch", + "Max" : 80 + }, + { + "date" : "2024-10-31 08:27:00 +1000", + "Max" : 55, + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Avg" : 60, + "Min" : 60, + "date" : "2024-10-31 08:31:00 +1000" + }, + { + "date" : "2024-10-31 08:35:00 +1000", + "Max" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58 + }, + { + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "Min" : 68, + "date" : "2024-10-31 08:44:00 +1000", + "Avg" : 68 + }, + { + "date" : "2024-10-31 08:46:00 +1000", + "Max" : 54, + "Min" : 54, + "Avg" : 54, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 08:50:00 +1000", + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007 + }, + { + "date" : "2024-10-31 08:57:00 +1000", + "Avg" : 58, + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "date" : "2024-10-31 09:04:00 +1000", + "Min" : 56, + "Max" : 56, + "source" : "Madhava’s Apple Watch", + "Avg" : 56 + }, + { + "Min" : 56, + "date" : "2024-10-31 09:06:00 +1000", + "Max" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 09:13:00 +1000", + "Max" : 60, + "Min" : 60, + "Avg" : 60 + }, + { + "Max" : 60, + "Min" : 60, + "Avg" : 60, + "date" : "2024-10-31 09:18:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "Max" : 57, + "date" : "2024-10-31 09:22:00 +1000", + "Min" : 57 + }, + { + "Avg" : 75.181818181818173, + "Max" : 80, + "Min" : 71, + "date" : "2024-10-31 09:26:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 65, + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 09:28:00 +1000", + "Min" : 65 + }, + { + "Max" : 59, + "date" : "2024-10-31 09:34:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 59 + }, + { + "Min" : 57, + "date" : "2024-10-31 09:36:00 +1000", + "Avg" : 57, + "source" : "Madhava’s Apple Watch", + "Max" : 57 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "date" : "2024-10-31 09:45:00 +1000", + "Min" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "date" : "2024-10-31 09:47:00 +1000", + "Min" : 55, + "Avg" : 55 + }, + { + "Avg" : 66, + "Max" : 66, + "Min" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 09:51:00 +1000" + }, + { + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 09:56:00 +1000", + "Avg" : 58, + "Max" : 58 + }, + { + "Max" : 59, + "date" : "2024-10-31 10:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 59 + }, + { + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 10:05:00 +1000", + "Max" : 56, + "Min" : 56 + }, + { + "Max" : 59, + "date" : "2024-10-31 10:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "Min" : 59 + }, + { + "Max" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 10:19:00 +1000", + "Min" : 64 + }, + { + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 10:21:00 +1000", + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007 + }, + { + "date" : "2024-10-31 10:27:00 +1000", + "Max" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 64 + }, + { + "date" : "2024-10-31 10:43:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Avg" : 65, + "Max" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "date" : "2024-10-31 10:46:00 +1000", + "Avg" : 58, + "Max" : 58 + }, + { + "Max" : 66, + "date" : "2024-10-31 10:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66 + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 10:58:00 +1000", + "Avg" : 54, + "Min" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "Avg" : 56, + "date" : "2024-10-31 11:02:00 +1000", + "Min" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 68, + "date" : "2024-10-31 11:27:00 +1000", + "Avg" : 68, + "Max" : 68 + }, + { + "date" : "2024-10-31 11:45:00 +1000", + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Avg" : 71 + }, + { + "Min" : 100, + "Max" : 104, + "date" : "2024-10-31 12:09:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 103.00000000000001 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 68, + "date" : "2024-10-31 12:11:00 +1000", + "Min" : 68, + "Max" : 68 + }, + { + "Min" : 88, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 12:28:00 +1000", + "Avg" : 88, + "Max" : 88 + }, + { + "Avg" : 82, + "date" : "2024-10-31 12:38:00 +1000", + "Min" : 82, + "source" : "Madhava’s Apple Watch", + "Max" : 82 + }, + { + "Max" : 76, + "Avg" : 76, + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "date" : "2024-10-31 12:40:00 +1000" + }, + { + "Max" : 80, + "Min" : 80, + "date" : "2024-10-31 12:45:00 +1000", + "Avg" : 80, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 77.000000000000014, + "Avg" : 77.000000000000014, + "Max" : 77.000000000000014, + "date" : "2024-10-31 12:50:00 +1000" + }, + { + "Max" : 99, + "Min" : 99, + "Avg" : 99, + "date" : "2024-10-31 12:55:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 13:01:00 +1000", + "Max" : 102, + "Min" : 102, + "source" : "Madhava’s Apple Watch", + "Avg" : 102 + }, + { + "Max" : 93, + "Avg" : 93, + "source" : "Madhava’s Apple Watch", + "Min" : 93, + "date" : "2024-10-31 13:13:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 13:16:00 +1000", + "Avg" : 94, + "Max" : 94, + "Min" : 94 + }, + { + "Min" : 102, + "Max" : 102, + "date" : "2024-10-31 13:24:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 102 + }, + { + "Max" : 102, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 13:27:00 +1000", + "Avg" : 102, + "Min" : 102 + }, + { + "date" : "2024-10-31 13:34:00 +1000", + "Min" : 100, + "Max" : 100, + "source" : "Madhava’s Apple Watch", + "Avg" : 100 + }, + { + "date" : "2024-10-31 13:37:00 +1000", + "Min" : 109, + "source" : "Madhava’s Apple Watch", + "Max" : 109, + "Avg" : 109 + }, + { + "Min" : 108, + "date" : "2024-10-31 13:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 108, + "Avg" : 108 + }, + { + "Min" : 110.78223419189453, + "Avg" : 110.78223419189453, + "Max" : 110.78223419189453, + "date" : "2024-10-31 13:43:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 108, + "date" : "2024-10-31 13:47:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 108, + "Min" : 108 + }, + { + "date" : "2024-10-31 13:54:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 102, + "Min" : 102, + "Avg" : 102 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 100, + "Avg" : 100, + "date" : "2024-10-31 13:56:00 +1000", + "Max" : 100 + }, + { + "Min" : 100, + "source" : "Madhava’s Apple Watch", + "Avg" : 100, + "date" : "2024-10-31 14:02:00 +1000", + "Max" : 100 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 14:06:00 +1000", + "Max" : 113, + "Avg" : 113, + "Min" : 113 + }, + { + "Max" : 108, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 14:17:00 +1000", + "Min" : 108, + "Avg" : 108 + }, + { + "Max" : 105, + "source" : "Madhava’s Apple Watch", + "Avg" : 105, + "date" : "2024-10-31 14:23:00 +1000", + "Min" : 105 + }, + { + "Max" : 103, + "Avg" : 103, + "Min" : 103, + "date" : "2024-10-31 14:28:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 100, + "date" : "2024-10-31 14:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 100, + "Min" : 100 + }, + { + "Avg" : 92.000000000000014, + "Min" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Max" : 92.000000000000014, + "date" : "2024-10-31 15:00:00 +1000" + }, + { + "Max" : 94, + "Min" : 94, + "Avg" : 94, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 15:04:00 +1000" + }, + { + "Max" : 98, + "date" : "2024-10-31 15:07:00 +1000", + "Avg" : 98, + "Min" : 98, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 15:13:00 +1000", + "Max" : 85, + "Min" : 85, + "Avg" : 85 + }, + { + "Min" : 85, + "source" : "Madhava’s Apple Watch", + "Avg" : 85, + "Max" : 85, + "date" : "2024-10-31 15:17:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "date" : "2024-10-31 15:22:00 +1000", + "Avg" : 90, + "Min" : 90 + }, + { + "Max" : 83, + "date" : "2024-10-31 15:26:00 +1000", + "Min" : 83, + "source" : "Madhava’s Apple Watch", + "Avg" : 83 + }, + { + "Max" : 81, + "Min" : 81, + "Avg" : 81, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 15:32:00 +1000" + }, + { + "Avg" : 87.584941864013658, + "date" : "2024-10-31 15:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 84.169883728027344, + "Max" : 91 + }, + { + "date" : "2024-10-31 15:40:00 +1000", + "Avg" : 91, + "Min" : 91, + "Max" : 91, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 15:47:00 +1000", + "Min" : 84, + "Max" : 84, + "Avg" : 84, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 87, + "Min" : 87, + "Max" : 87, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 15:51:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 79, + "Avg" : 79, + "date" : "2024-10-31 15:59:00 +1000", + "Max" : 79 + }, + { + "Avg" : 85, + "source" : "Madhava’s Apple Watch", + "Min" : 85, + "date" : "2024-10-31 16:04:00 +1000", + "Max" : 85 + }, + { + "Avg" : 84, + "Min" : 84, + "Max" : 84, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 16:05:00 +1000" + }, + { + "Max" : 82, + "Min" : 82, + "Avg" : 82, + "date" : "2024-10-31 16:11:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 86, + "Avg" : 86, + "date" : "2024-10-31 16:19:00 +1000", + "Max" : 86 + }, + { + "Min" : 83, + "Avg" : 83, + "Max" : 83, + "date" : "2024-10-31 16:21:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 64, + "Min" : 64, + "Max" : 64, + "date" : "2024-10-31 16:28:00 +1000" + }, + { + "date" : "2024-10-31 16:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "Max" : 69, + "Avg" : 69 + }, + { + "Min" : 81, + "Max" : 81, + "source" : "Madhava’s Apple Watch", + "Avg" : 81, + "date" : "2024-10-31 16:36:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 83, + "Min" : 83, + "date" : "2024-10-31 16:44:00 +1000", + "Avg" : 83 + }, + { + "Max" : 78, + "date" : "2024-10-31 16:49:00 +1000", + "Min" : 78, + "Avg" : 78, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 76, + "Max" : 76, + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "date" : "2024-10-31 16:53:00 +1000" + }, + { + "Max" : 75, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 16:59:00 +1000", + "Min" : 75, + "Avg" : 75 + }, + { + "Max" : 74, + "date" : "2024-10-31 17:01:00 +1000", + "Min" : 74, + "Avg" : 74, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "date" : "2024-10-31 17:05:00 +1000", + "Min" : 76, + "Avg" : 76 + }, + { + "Min" : 86, + "source" : "Madhava’s Apple Watch", + "Avg" : 86, + "date" : "2024-10-31 17:12:00 +1000", + "Max" : 86 + }, + { + "Min" : 83, + "Avg" : 83, + "Max" : 83, + "date" : "2024-10-31 17:20:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 17:24:00 +1000", + "Max" : 80, + "Min" : 80, + "Avg" : 80 + }, + { + "Min" : 73, + "Avg" : 73, + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 17:30:00 +1000" + }, + { + "Max" : 88, + "source" : "Madhava’s Apple Watch", + "Min" : 88, + "Avg" : 88, + "date" : "2024-10-31 17:34:00 +1000" + }, + { + "Max" : 96, + "Avg" : 96, + "Min" : 96, + "date" : "2024-10-31 17:36:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 17:43:00 +1000", + "Max" : 90, + "Min" : 90, + "Avg" : 90 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 107.00000000000001, + "Max" : 107.00000000000001, + "Min" : 107.00000000000001, + "date" : "2024-10-31 17:53:00 +1000" + }, + { + "Avg" : 98, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 18:05:00 +1000", + "Min" : 98, + "Max" : 98 + }, + { + "Max" : 105, + "source" : "Madhava’s Apple Watch", + "Min" : 105, + "Avg" : 105, + "date" : "2024-10-31 18:09:00 +1000" + }, + { + "Avg" : 104, + "Min" : 104, + "date" : "2024-10-31 18:12:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 104 + }, + { + "Max" : 104, + "source" : "Madhava’s Apple Watch", + "Min" : 104, + "Avg" : 104, + "date" : "2024-10-31 18:17:00 +1000" + }, + { + "Min" : 106, + "date" : "2024-10-31 18:23:00 +1000", + "Max" : 106, + "source" : "Madhava’s Apple Watch", + "Avg" : 106 + }, + { + "Max" : 114, + "Avg" : 114, + "Min" : 114, + "date" : "2024-10-31 18:28:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 110, + "date" : "2024-10-31 18:31:00 +1000", + "Avg" : 110, + "source" : "Madhava’s Apple Watch", + "Max" : 110 + }, + { + "Min" : 95, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 18:38:00 +1000", + "Max" : 95, + "Avg" : 95 + }, + { + "Avg" : 105, + "date" : "2024-10-31 18:42:00 +1000", + "Max" : 105, + "source" : "Madhava’s Apple Watch", + "Min" : 105 + }, + { + "Avg" : 104, + "Min" : 104, + "Max" : 104, + "date" : "2024-10-31 18:48:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 104, + "date" : "2024-10-31 18:52:00 +1000", + "Min" : 104, + "Max" : 104, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 93, + "date" : "2024-10-31 19:05:00 +1000", + "Avg" : 93, + "source" : "Madhava’s Apple Watch", + "Max" : 93 + }, + { + "Max" : 106, + "date" : "2024-10-31 19:11:00 +1000", + "Min" : 106, + "source" : "Madhava’s Apple Watch", + "Avg" : 106 + }, + { + "Max" : 108, + "date" : "2024-10-31 19:17:00 +1000", + "Min" : 108, + "Avg" : 108, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 84, + "Max" : 84, + "Avg" : 84, + "date" : "2024-10-31 19:34:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 80, + "Avg" : 80, + "source" : "Madhava’s Apple Watch", + "Max" : 80, + "date" : "2024-10-31 19:38:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 80.598564147949219, + "Avg" : 80.598564147949219, + "Max" : 80.598564147949219, + "date" : "2024-10-31 19:39:00 +1000" + }, + { + "date" : "2024-10-31 19:40:00 +1000", + "Min" : 80, + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "Avg" : 80 + }, + { + "Min" : 80, + "date" : "2024-10-31 19:44:00 +1000", + "Avg" : 80, + "Max" : 80, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "date" : "2024-10-31 19:48:00 +1000", + "Min" : 84, + "Avg" : 84 + }, + { + "Max" : 79, + "date" : "2024-10-31 19:52:00 +1000", + "Avg" : 79, + "Min" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 74, + "Max" : 74, + "source" : "Madhava’s Apple Watch", + "Avg" : 74, + "date" : "2024-10-31 19:59:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 81, + "date" : "2024-10-31 20:01:00 +1000", + "Min" : 81, + "Max" : 81 + }, + { + "Max" : 66, + "date" : "2024-10-31 20:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66 + }, + { + "Max" : 72, + "Min" : 72, + "Avg" : 72, + "date" : "2024-10-31 20:12:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 71, + "Max" : 71, + "date" : "2024-10-31 20:14:00 +1000", + "Avg" : 71, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "date" : "2024-10-31 20:16:00 +1000" + }, + { + "Min" : 73, + "Avg" : 73, + "date" : "2024-10-31 20:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "Avg" : 72, + "Max" : 72, + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "date" : "2024-10-31 20:26:00 +1000" + }, + { + "Min" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 20:34:00 +1000", + "Max" : 73, + "Avg" : 73 + }, + { + "Max" : 73, + "Avg" : 73, + "Min" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 20:37:00 +1000" + }, + { + "date" : "2024-10-31 20:42:00 +1000", + "Max" : 72, + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Avg" : 72 + }, + { + "Max" : 65, + "date" : "2024-10-31 20:46:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Min" : 65 + }, + { + "Avg" : 63, + "date" : "2024-10-31 20:50:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63 + }, + { + "Max" : 71, + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 20:56:00 +1000", + "Avg" : 71 + }, + { + "Max" : 66, + "Min" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 21:01:00 +1000" + }, + { + "Avg" : 67, + "Max" : 67, + "Min" : 67, + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 21:06:00 +1000" + }, + { + "Max" : 67, + "date" : "2024-10-31 21:11:00 +1000", + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "Max" : 67, + "Min" : 67, + "date" : "2024-10-31 21:17:00 +1000", + "Avg" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-10-31 22:02:00 +1000", + "Min" : 67.797615051269531, + "source" : "Madhava’s Apple Watch", + "Max" : 67.797615051269531, + "Avg" : 67.797615051269531 + }, + { + "date" : "2024-10-31 22:03:00 +1000", + "Avg" : 69, + "Max" : 69, + "Min" : 69, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Avg" : 71, + "date" : "2024-10-31 22:04:00 +1000" + }, + { + "date" : "2024-10-31 22:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 67, + "Avg" : 67 + }, + { + "Min" : 67, + "date" : "2024-10-31 22:14:00 +1000", + "Max" : 67, + "Avg" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67, + "date" : "2024-10-31 22:20:00 +1000", + "Max" : 67 + }, + { + "Max" : 67, + "Avg" : 67, + "date" : "2024-10-31 22:22:00 +1000", + "Min" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 70, + "Min" : 70, + "date" : "2024-10-31 22:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 70 + }, + { + "date" : "2024-10-31 22:32:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 68, + "Max" : 68, + "Avg" : 68 + }, + { + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Avg" : 71, + "date" : "2024-10-31 22:33:00 +1000" + }, + { + "Min" : 71, + "Avg" : 71, + "Max" : 71, + "date" : "2024-10-31 22:35:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 69, + "date" : "2024-10-31 22:43:00 +1000", + "Min" : 69, + "Avg" : 69 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 70, + "Avg" : 70, + "date" : "2024-10-31 22:45:00 +1000", + "Min" : 70 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "Avg" : 69, + "date" : "2024-10-31 22:50:00 +1000", + "Max" : 69 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 67, + "Max" : 67, + "date" : "2024-10-31 22:57:00 +1000", + "Min" : 67 + }, + { + "Min" : 68, + "Avg" : 68, + "date" : "2024-10-31 23:04:00 +1000", + "Max" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 67, + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "date" : "2024-10-31 23:09:00 +1000", + "Avg" : 67 + }, + { + "date" : "2024-10-31 23:15:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 63, + "Avg" : 63 + }, + { + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-10-31 23:16:00 +1000", + "Avg" : 63 + }, + { + "date" : "2024-10-31 23:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Max" : 72, + "Avg" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 61, + "date" : "2024-10-31 23:21:00 +1000", + "Avg" : 61, + "Max" : 61 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-10-31 23:26:00 +1000", + "Avg" : 70, + "Max" : 70, + "Min" : 70 + }, + { + "Max" : 67, + "Avg" : 67, + "date" : "2024-10-31 23:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "date" : "2024-10-31 23:36:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63, + "Min" : 63 + }, + { + "date" : "2024-10-31 23:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 61.494770050048828, + "Max" : 61.494770050048828, + "Min" : 61.494770050048828 + }, + { + "date" : "2024-10-31 23:43:00 +1000", + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 62.000000000000007, + "date" : "2024-10-31 23:46:00 +1000", + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 63, + "date" : "2024-10-31 23:50:00 +1000", + "Avg" : 63 + }, + { + "date" : "2024-10-31 23:51:00 +1000", + "Avg" : 61, + "Min" : 61, + "source" : "Madhava’s Apple Watch", + "Max" : 61 + }, + { + "Avg" : 63, + "date" : "2024-10-31 23:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63, + "Avg" : 63, + "date" : "2024-11-01 00:00:00 +1000" + }, + { + "date" : "2024-11-01 00:06:00 +1000", + "Avg" : 63, + "Min" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 63, + "Max" : 63, + "Avg" : 63, + "date" : "2024-11-01 00:10:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 63, + "date" : "2024-11-01 00:19:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Min" : 65, + "Avg" : 65, + "date" : "2024-11-01 00:21:00 +1000" + }, + { + "Min" : 63, + "date" : "2024-11-01 00:24:00 +1000", + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63 + }, + { + "date" : "2024-11-01 00:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63, + "Max" : 63 + }, + { + "date" : "2024-11-01 00:31:00 +1000", + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63 + }, + { + "date" : "2024-11-01 00:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Avg" : 67, + "Min" : 67 + }, + { + "Avg" : 64, + "Min" : 64, + "date" : "2024-11-01 00:42:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 64 + }, + { + "date" : "2024-11-01 00:46:00 +1000", + "Min" : 64, + "Max" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 66, + "Max" : 66, + "date" : "2024-11-01 00:51:00 +1000", + "Min" : 66, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 63, + "date" : "2024-11-01 00:54:00 +1000", + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 64, + "date" : "2024-11-01 00:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Avg" : 64 + }, + { + "Avg" : 63, + "Max" : 63, + "Min" : 63, + "date" : "2024-11-01 01:01:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 69, + "Avg" : 69, + "date" : "2024-11-01 01:05:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 69 + }, + { + "Max" : 79, + "date" : "2024-11-01 01:12:00 +1000", + "Min" : 79, + "Avg" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "date" : "2024-11-01 01:15:00 +1000", + "Min" : 71, + "Avg" : 71 + }, + { + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Min" : 72, + "date" : "2024-11-01 01:20:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Min" : 73, + "Avg" : 73, + "date" : "2024-11-01 01:21:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Max" : 65, + "Min" : 65, + "date" : "2024-11-01 01:26:00 +1000" + }, + { + "date" : "2024-11-01 01:35:00 +1000", + "Min" : 63, + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 62.000000000000007, + "Max" : 62.000000000000007, + "Avg" : 62.000000000000007, + "date" : "2024-11-01 01:37:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 67.96148681640625, + "Avg" : 67.96148681640625, + "Max" : 67.96148681640625, + "date" : "2024-11-01 01:40:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 01:44:00 +1000", + "Min" : 63, + "Max" : 63, + "Avg" : 63 + }, + { + "date" : "2024-11-01 01:45:00 +1000", + "Min" : 65, + "Max" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 01:55:00 +1000", + "Avg" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58 + }, + { + "Avg" : 60, + "Max" : 60, + "date" : "2024-11-01 01:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 60 + }, + { + "Max" : 59, + "date" : "2024-11-01 02:01:00 +1000", + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59 + }, + { + "Min" : 57, + "Avg" : 57, + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "date" : "2024-11-01 02:07:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Avg" : 54, + "Max" : 54, + "date" : "2024-11-01 02:10:00 +1000" + }, + { + "date" : "2024-11-01 02:16:00 +1000", + "Max" : 57, + "Min" : 57, + "Avg" : 57, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 02:21:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Min" : 55, + "Avg" : 55 + }, + { + "Max" : 58, + "date" : "2024-11-01 02:28:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Min" : 58 + }, + { + "date" : "2024-11-01 02:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "Avg" : 58 + }, + { + "Avg" : 58, + "date" : "2024-11-01 02:36:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "date" : "2024-11-01 02:41:00 +1000", + "Min" : 52, + "Avg" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 02:45:00 +1000", + "Min" : 64 + }, + { + "date" : "2024-11-01 02:46:00 +1000", + "Min" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59 + }, + { + "Max" : 55, + "date" : "2024-11-01 02:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Avg" : 55 + }, + { + "Max" : 55, + "date" : "2024-11-01 02:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Avg" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 03:00:00 +1000", + "Min" : 56, + "Max" : 56, + "Avg" : 56 + }, + { + "Min" : 58, + "date" : "2024-11-01 03:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Max" : 58 + }, + { + "date" : "2024-11-01 03:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "Avg" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Avg" : 54, + "date" : "2024-11-01 03:17:00 +1000", + "Max" : 54 + }, + { + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 03:20:00 +1000", + "Max" : 52, + "Min" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "date" : "2024-11-01 03:27:00 +1000", + "Min" : 52, + "Avg" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Min" : 60, + "Max" : 60, + "date" : "2024-11-01 03:30:00 +1000" + }, + { + "Max" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 03:35:00 +1000", + "Min" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 59.185033798217773, + "Max" : 59.370067596435547, + "Min" : 59, + "date" : "2024-11-01 03:41:00 +1000" + }, + { + "Min" : 58, + "Avg" : 58, + "date" : "2024-11-01 03:48:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 58 + }, + { + "Max" : 63, + "date" : "2024-11-01 03:52:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63 + }, + { + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 03:54:00 +1000", + "Max" : 56 + }, + { + "Min" : 54, + "Avg" : 54, + "date" : "2024-11-01 04:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 54 + }, + { + "Max" : 53, + "Min" : 53, + "date" : "2024-11-01 04:05:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 53 + }, + { + "Max" : 54, + "Avg" : 54, + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "date" : "2024-11-01 04:07:00 +1000" + }, + { + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Avg" : 55, + "date" : "2024-11-01 04:12:00 +1000" + }, + { + "date" : "2024-11-01 04:19:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Min" : 54, + "Avg" : 54 + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Avg" : 54, + "date" : "2024-11-01 04:22:00 +1000" + }, + { + "Max" : 51, + "Avg" : 51, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 04:24:00 +1000", + "Min" : 51 + }, + { + "Avg" : 54, + "Min" : 54, + "date" : "2024-11-01 04:26:00 +1000", + "Max" : 54, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 55, + "date" : "2024-11-01 04:32:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "date" : "2024-11-01 04:36:00 +1000", + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Avg" : 55, + "Max" : 55, + "date" : "2024-11-01 04:44:00 +1000" + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "Avg" : 54, + "Min" : 54, + "date" : "2024-11-01 04:47:00 +1000" + }, + { + "Max" : 54, + "date" : "2024-11-01 04:54:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Avg" : 54 + }, + { + "Max" : 46.000000000000007, + "Avg" : 46.000000000000007, + "Min" : 46.000000000000007, + "date" : "2024-11-01 04:56:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 52, + "date" : "2024-11-01 05:00:00 +1000", + "Min" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52 + }, + { + "date" : "2024-11-01 05:07:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55, + "Min" : 55 + }, + { + "Max" : 54, + "date" : "2024-11-01 05:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 54, + "Min" : 54 + }, + { + "Max" : 54, + "date" : "2024-11-01 05:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "Avg" : 54 + }, + { + "Min" : 55, + "Max" : 55, + "date" : "2024-11-01 05:23:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 05:24:00 +1000", + "Max" : 58, + "Min" : 58 + }, + { + "Avg" : 53, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 05:26:00 +1000", + "Min" : 53, + "Max" : 53 + }, + { + "Min" : 52, + "date" : "2024-11-01 05:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Avg" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "Min" : 59, + "date" : "2024-11-01 05:38:00 +1000", + "Max" : 59 + }, + { + "date" : "2024-11-01 05:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 57.72718620300293, + "Max" : 58, + "Min" : 57.454372406005859 + }, + { + "Min" : 89, + "source" : "Madhava’s Apple Watch", + "Avg" : 89, + "date" : "2024-11-01 05:49:00 +1000", + "Max" : 89 + }, + { + "date" : "2024-11-01 05:55:00 +1000", + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Min" : 66 + }, + { + "Max" : 70, + "Min" : 70, + "source" : "Madhava’s Apple Watch", + "Avg" : 70, + "date" : "2024-11-01 06:00:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 75, + "date" : "2024-11-01 06:01:00 +1000", + "Avg" : 75, + "Min" : 75 + }, + { + "date" : "2024-11-01 06:17:00 +1000", + "Min" : 80, + "Avg" : 80, + "Max" : 80, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 91, + "Max" : 91, + "date" : "2024-11-01 06:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 91 + }, + { + "Max" : 92.000000000000014, + "Min" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Avg" : 92.000000000000014, + "date" : "2024-11-01 06:30:00 +1000" + }, + { + "Min" : 74, + "source" : "Madhava’s Apple Watch", + "Avg" : 75, + "date" : "2024-11-01 06:34:00 +1000", + "Max" : 76 + }, + { + "Avg" : 75, + "date" : "2024-11-01 06:42:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 75, + "Min" : 75 + }, + { + "Max" : 70, + "date" : "2024-11-01 06:54:00 +1000", + "Min" : 70, + "Avg" : 70, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 78, + "date" : "2024-11-01 07:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 78, + "Avg" : 78 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Avg" : 76, + "date" : "2024-11-01 07:14:00 +1000", + "Max" : 76 + }, + { + "Max" : 75, + "Avg" : 75, + "Min" : 75, + "date" : "2024-11-01 07:15:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 86, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 07:24:00 +1000", + "Min" : 86, + "Avg" : 86 + }, + { + "Min" : 73, + "date" : "2024-11-01 07:26:00 +1000", + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "date" : "2024-11-01 07:32:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 78, + "Min" : 78, + "Avg" : 78 + }, + { + "date" : "2024-11-01 07:38:00 +1000", + "Max" : 122, + "source" : "Madhava’s Apple Watch", + "Min" : 122, + "Avg" : 122 + }, + { + "Min" : 83, + "date" : "2024-11-01 07:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 83, + "Avg" : 83 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 82, + "Max" : 89, + "Avg" : 86.534895716370272, + "date" : "2024-11-01 07:42:00 +1000" + }, + { + "Max" : 91, + "Min" : 83, + "date" : "2024-11-01 07:43:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 87.835000000792746 + }, + { + "Max" : 94, + "Min" : 79, + "Avg" : 86.040404047075441, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 07:44:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 88, + "Avg" : 85.803571428571431, + "date" : "2024-11-01 07:45:00 +1000", + "Min" : 84 + }, + { + "Avg" : 90, + "date" : "2024-11-01 07:48:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Min" : 90 + }, + { + "Min" : 81, + "Max" : 81, + "source" : "Madhava’s Apple Watch", + "Avg" : 81, + "date" : "2024-11-01 07:49:00 +1000" + }, + { + "Max" : 86, + "date" : "2024-11-01 07:51:00 +1000", + "Min" : 86, + "Avg" : 86, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 89, + "date" : "2024-11-01 07:52:00 +1000", + "Avg" : 86.78947368421052, + "source" : "Madhava’s Apple Watch", + "Min" : 85 + }, + { + "Max" : 94, + "Avg" : 89.270833333333343, + "Min" : 83, + "date" : "2024-11-01 07:53:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 87.698019801980209, + "source" : "Madhava’s Apple Watch", + "Max" : 93, + "Min" : 86, + "date" : "2024-11-01 07:54:00 +1000" + }, + { + "Avg" : 87.445121951219505, + "date" : "2024-11-01 07:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 89, + "Min" : 86 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 93, + "Min" : 88, + "Avg" : 91.077507598784194, + "date" : "2024-11-01 07:58:00 +1000" + }, + { + "Min" : 88, + "date" : "2024-11-01 07:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 89, + "Avg" : 88.5 + }, + { + "Avg" : 88.446428571428569, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 08:00:00 +1000", + "Max" : 91, + "Min" : 85 + }, + { + "Min" : 81, + "date" : "2024-11-01 08:01:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 86.303921568627445, + "Max" : 91 + }, + { + "date" : "2024-11-01 08:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "Max" : 96, + "Avg" : 87.872448979591837 + }, + { + "date" : "2024-11-01 08:03:00 +1000", + "Min" : 85, + "Avg" : 87.87912087912089, + "Max" : 91, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 90.381927710843371, + "Max" : 95, + "date" : "2024-11-01 08:04:00 +1000", + "Min" : 88 + }, + { + "Max" : 88, + "Min" : 83, + "Avg" : 86.191011235955045, + "date" : "2024-11-01 08:05:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 101, + "Avg" : 101, + "date" : "2024-11-01 08:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 101 + }, + { + "date" : "2024-11-01 08:12:00 +1000", + "Avg" : 101, + "source" : "Madhava’s Apple Watch", + "Max" : 101, + "Min" : 101 + }, + { + "Avg" : 113.37068965517241, + "Max" : 116, + "Min" : 111, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 08:15:00 +1000" + }, + { + "Max" : 117, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 08:16:00 +1000", + "Avg" : 115.64705882352942, + "Min" : 114 + }, + { + "Max" : 91, + "Min" : 91, + "date" : "2024-11-01 08:20:00 +1000", + "Avg" : 91, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 93, + "Max" : 93, + "date" : "2024-11-01 08:23:00 +1000", + "Avg" : 93, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 100, + "date" : "2024-11-01 08:28:00 +1000", + "Max" : 100, + "Avg" : 100 + }, + { + "Min" : 78, + "date" : "2024-11-01 08:40:00 +1000", + "Max" : 78, + "source" : "Madhava’s Apple Watch", + "Avg" : 78 + }, + { + "date" : "2024-11-01 08:43:00 +1000", + "Max" : 86, + "Min" : 86, + "Avg" : 86, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 67, + "Avg" : 67, + "date" : "2024-11-01 08:48:00 +1000" + }, + { + "Min" : 65, + "date" : "2024-11-01 08:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 66.25201034545897, + "Max" : 67.504020690917969 + }, + { + "Max" : 69, + "date" : "2024-11-01 08:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 68 + }, + { + "Min" : 76, + "Max" : 76, + "date" : "2024-11-01 09:03:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 76 + }, + { + "date" : "2024-11-01 09:08:00 +1000", + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007 + }, + { + "Min" : 67, + "Avg" : 67, + "date" : "2024-11-01 09:12:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67 + }, + { + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "date" : "2024-11-01 09:20:00 +1000", + "Avg" : 65 + }, + { + "date" : "2024-11-01 09:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 71, + "Avg" : 71, + "Max" : 71 + }, + { + "Avg" : 65.500000000000014, + "date" : "2024-11-01 09:30:00 +1000", + "Max" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 64 + }, + { + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "date" : "2024-11-01 09:31:00 +1000", + "Max" : 69 + }, + { + "Max" : 63, + "Min" : 63, + "Avg" : 63, + "date" : "2024-11-01 09:40:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 66, + "date" : "2024-11-01 09:42:00 +1000", + "Max" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 70, + "date" : "2024-11-01 09:50:00 +1000", + "Max" : 70, + "source" : "Madhava’s Apple Watch", + "Avg" : 70 + }, + { + "Avg" : 71, + "date" : "2024-11-01 09:52:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Min" : 71 + }, + { + "Avg" : 69, + "Max" : 69, + "Min" : 69, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 09:59:00 +1000" + }, + { + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007, + "date" : "2024-11-01 10:04:00 +1000" + }, + { + "Avg" : 64, + "date" : "2024-11-01 10:06:00 +1000", + "Min" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 10:15:00 +1000", + "Avg" : 63, + "Max" : 63, + "Min" : 63 + }, + { + "Max" : 66, + "date" : "2024-11-01 10:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66 + }, + { + "Min" : 66, + "Max" : 66, + "date" : "2024-11-01 10:22:00 +1000", + "Avg" : 66, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Min" : 66, + "Max" : 66, + "date" : "2024-11-01 10:28:00 +1000" + }, + { + "Min" : 66, + "date" : "2024-11-01 10:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Avg" : 66 + }, + { + "date" : "2024-11-01 10:37:00 +1000", + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 63 + }, + { + "Max" : 61, + "source" : "Madhava’s Apple Watch", + "Min" : 61, + "Avg" : 61, + "date" : "2024-11-01 10:41:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 62.000000000000007, + "Avg" : 62.000000000000007, + "date" : "2024-11-01 10:46:00 +1000", + "Min" : 62.000000000000007 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 10:51:00 +1000", + "Max" : 60, + "Min" : 60, + "Avg" : 60 + }, + { + "Max" : 56, + "date" : "2024-11-01 10:58:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 56, + "Min" : 56 + }, + { + "date" : "2024-11-01 11:05:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63, + "Min" : 63 + }, + { + "date" : "2024-11-01 11:07:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007, + "Min" : 62.000000000000007 + }, + { + "date" : "2024-11-01 11:13:00 +1000", + "Min" : 56, + "Max" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 11:15:00 +1000", + "Max" : 61, + "Min" : 61, + "Avg" : 61 + }, + { + "Max" : 60, + "Min" : 60, + "date" : "2024-11-01 11:23:00 +1000", + "Avg" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 11:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "Max" : 57, + "Min" : 57 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "date" : "2024-11-01 11:34:00 +1000", + "Avg" : 58, + "Min" : 58 + }, + { + "Avg" : 61, + "Min" : 61, + "date" : "2024-11-01 11:40:00 +1000", + "Max" : 61, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 11:41:00 +1000", + "Max" : 60, + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Min" : 60 + }, + { + "Max" : 56, + "Min" : 56, + "date" : "2024-11-01 11:49:00 +1000", + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 56, + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 11:51:00 +1000" + }, + { + "Avg" : 56, + "date" : "2024-11-01 11:55:00 +1000", + "Min" : 56, + "Max" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 12:05:00 +1000", + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55 + }, + { + "date" : "2024-11-01 12:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55, + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Min" : 59, + "date" : "2024-11-01 12:14:00 +1000", + "Avg" : 59 + }, + { + "Avg" : 63, + "Min" : 63, + "date" : "2024-11-01 12:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "date" : "2024-11-01 12:21:00 +1000", + "Min" : 63, + "Max" : 63 + }, + { + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 12:27:00 +1000", + "Max" : 65, + "Min" : 65 + }, + { + "date" : "2024-11-01 12:31:00 +1000", + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Avg" : 59 + }, + { + "date" : "2024-11-01 12:37:00 +1000", + "Avg" : 61, + "source" : "Madhava’s Apple Watch", + "Max" : 61, + "Min" : 61 + }, + { + "date" : "2024-11-01 12:41:00 +1000", + "Max" : 64, + "Avg" : 64, + "Min" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 57, + "date" : "2024-11-01 12:49:00 +1000", + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Min" : 53, + "date" : "2024-11-01 12:55:00 +1000", + "Max" : 53 + }, + { + "Max" : 59, + "date" : "2024-11-01 12:58:00 +1000", + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 59 + }, + { + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "date" : "2024-11-01 13:02:00 +1000", + "Min" : 63 + }, + { + "Min" : 71, + "Avg" : 71, + "Max" : 71, + "date" : "2024-11-01 13:10:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 72.366622924804688, + "Avg" : 72.68331146240233, + "Max" : 73, + "date" : "2024-11-01 13:11:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 13:17:00 +1000", + "Avg" : 74, + "Min" : 74, + "source" : "Madhava’s Apple Watch", + "Max" : 74 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 91, + "date" : "2024-11-01 13:42:00 +1000", + "Max" : 91, + "Avg" : 91 + }, + { + "date" : "2024-11-01 14:13:00 +1000", + "Avg" : 100.77631578947367, + "Max" : 105, + "source" : "Madhava’s Apple Watch", + "Min" : 97 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 92.499999999999986, + "Max" : 94, + "date" : "2024-11-01 14:14:00 +1000", + "Min" : 91 + }, + { + "date" : "2024-11-01 14:24:00 +1000", + "Min" : 60, + "Avg" : 60, + "Max" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 14:27:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Max" : 60, + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 61, + "Avg" : 61, + "Max" : 61, + "date" : "2024-11-01 14:33:00 +1000" + }, + { + "Min" : 65, + "Max" : 65, + "date" : "2024-11-01 14:36:00 +1000", + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 79, + "Min" : 79, + "Max" : 79, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 14:41:00 +1000" + }, + { + "Min" : 88, + "date" : "2024-11-01 14:59:00 +1000", + "Max" : 88, + "Avg" : 88, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 84, + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "date" : "2024-11-01 15:03:00 +1000", + "Min" : 84 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "date" : "2024-11-01 15:10:00 +1000", + "Avg" : 84, + "Max" : 84 + }, + { + "Min" : 84, + "date" : "2024-11-01 15:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 84, + "Max" : 84 + }, + { + "date" : "2024-11-01 15:20:00 +1000", + "Min" : 80, + "Max" : 81.575431823730469, + "Avg" : 80.787715911865234, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 81, + "date" : "2024-11-01 15:25:00 +1000", + "Min" : 81, + "Avg" : 81, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 75, + "date" : "2024-11-01 15:28:00 +1000", + "Avg" : 75, + "Min" : 75 + }, + { + "Max" : 76, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 15:30:00 +1000", + "Avg" : 76, + "Min" : 76 + }, + { + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "date" : "2024-11-01 15:39:00 +1000", + "Max" : 72 + }, + { + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 15:45:00 +1000", + "Max" : 73, + "Min" : 73 + }, + { + "date" : "2024-11-01 15:47:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Min" : 72, + "Avg" : 72 + }, + { + "Min" : 73, + "date" : "2024-11-01 15:50:00 +1000", + "Avg" : 73.014484405517578, + "Max" : 73.028968811035156, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 15:52:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Min" : 73, + "Avg" : 73 + }, + { + "date" : "2024-11-01 15:55:00 +1000", + "Max" : 67, + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 84, + "Max" : 84, + "Min" : 84, + "date" : "2024-11-01 16:03:00 +1000" + }, + { + "Avg" : 74, + "Max" : 74, + "Min" : 74, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 16:06:00 +1000" + }, + { + "Min" : 61, + "Avg" : 61, + "date" : "2024-11-01 16:23:00 +1000", + "Max" : 61, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 63, + "Min" : 63, + "date" : "2024-11-01 16:29:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63 + }, + { + "Max" : 72, + "Avg" : 72, + "Min" : 72, + "date" : "2024-11-01 16:35:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 16:38:00 +1000", + "Min" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64 + }, + { + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 16:44:00 +1000", + "Min" : 63 + }, + { + "Min" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 16:45:00 +1000", + "Max" : 59 + }, + { + "Avg" : 79, + "Max" : 79, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 16:52:00 +1000", + "Min" : 79 + }, + { + "Avg" : 64, + "date" : "2024-11-01 16:57:00 +1000", + "Min" : 64, + "Max" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 59, + "date" : "2024-11-01 17:05:00 +1000", + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 59 + }, + { + "Min" : 76, + "date" : "2024-11-01 17:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "Avg" : 76 + }, + { + "Max" : 78, + "Min" : 74, + "Avg" : 75.719999999999999, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:14:00 +1000" + }, + { + "Max" : 82, + "Min" : 77.000000000000014, + "Avg" : 78.965346532251985, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:15:00 +1000" + }, + { + "Max" : 91, + "source" : "Madhava’s Apple Watch", + "Avg" : 84.301020408163282, + "date" : "2024-11-01 17:16:00 +1000", + "Min" : 80 + }, + { + "Min" : 87, + "Avg" : 91.116687579476022, + "Max" : 94, + "date" : "2024-11-01 17:17:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 88, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:18:00 +1000", + "Avg" : 82.37113402061857, + "Min" : 78 + }, + { + "Max" : 84, + "Min" : 76, + "date" : "2024-11-01 17:19:00 +1000", + "Avg" : 80.152173913043484, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 17:20:00 +1000", + "Avg" : 78.754901960784323, + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Max" : 86 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 79.680000000000007, + "Max" : 81, + "Min" : 78, + "date" : "2024-11-01 17:21:00 +1000" + }, + { + "Max" : 78, + "Min" : 78, + "date" : "2024-11-01 17:22:00 +1000", + "Avg" : 78, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 76, + "Avg" : 79.947368419401542, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:23:00 +1000", + "Max" : 81 + }, + { + "Avg" : 80.349462364757528, + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "Min" : 78, + "date" : "2024-11-01 17:24:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 81, + "Min" : 78, + "Avg" : 79.787878787878796, + "date" : "2024-11-01 17:25:00 +1000" + }, + { + "Min" : 80, + "source" : "Madhava’s Apple Watch", + "Max" : 85, + "Avg" : 82.432098765432102, + "date" : "2024-11-01 17:26:00 +1000" + }, + { + "Max" : 92.000000000000014, + "date" : "2024-11-01 17:27:00 +1000", + "Min" : 84, + "source" : "Madhava’s Apple Watch", + "Avg" : 88.777777779583971 + }, + { + "Max" : 94, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:28:00 +1000", + "Min" : 89, + "Avg" : 91.356435643564353 + }, + { + "Avg" : 93.6328125, + "Max" : 95, + "Min" : 92.000000000000014, + "date" : "2024-11-01 17:29:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 99, + "date" : "2024-11-01 17:32:00 +1000", + "Max" : 99, + "Avg" : 99 + }, + { + "Min" : 98, + "Avg" : 99.484126984126988, + "date" : "2024-11-01 17:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 103 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:34:00 +1000", + "Min" : 102, + "Max" : 104, + "Avg" : 103.00000000000001 + }, + { + "Avg" : 96.453333333333319, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:35:00 +1000", + "Min" : 92.000000000000014, + "Max" : 101 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 99, + "Avg" : 88.670212765957459, + "date" : "2024-11-01 17:36:00 +1000", + "Min" : 82 + }, + { + "date" : "2024-11-01 17:37:00 +1000", + "Min" : 84, + "source" : "Madhava’s Apple Watch", + "Max" : 88, + "Avg" : 85.938271604938251 + }, + { + "Max" : 75, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 17:45:00 +1000", + "Min" : 75, + "Avg" : 75 + }, + { + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58, + "date" : "2024-11-01 17:49:00 +1000" + }, + { + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "date" : "2024-11-01 17:52:00 +1000", + "Avg" : 59 + }, + { + "Max" : 59, + "date" : "2024-11-01 17:58:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63, + "Max" : 63, + "date" : "2024-11-01 18:00:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Avg" : 60, + "date" : "2024-11-01 18:08:00 +1000", + "Max" : 60 + }, + { + "date" : "2024-11-01 18:13:00 +1000", + "Avg" : 58.101131439208984, + "Min" : 57.202262878417969, + "Max" : 59, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 55, + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "date" : "2024-11-01 18:17:00 +1000" + }, + { + "date" : "2024-11-01 18:22:00 +1000", + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 59 + }, + { + "Min" : 82, + "date" : "2024-11-01 18:40:00 +1000", + "Avg" : 82, + "Max" : 82, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 89, + "Avg" : 89, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 18:44:00 +1000", + "Min" : 89 + }, + { + "Min" : 85, + "Avg" : 85, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 18:49:00 +1000", + "Max" : 85 + }, + { + "Min" : 88, + "source" : "Madhava’s Apple Watch", + "Max" : 88, + "date" : "2024-11-01 18:52:00 +1000", + "Avg" : 88 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 87, + "date" : "2024-11-01 19:04:00 +1000", + "Min" : 87, + "Avg" : 87 + }, + { + "Min" : 85, + "Max" : 85, + "Avg" : 85, + "date" : "2024-11-01 19:07:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 86, + "Avg" : 86, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 19:14:00 +1000", + "Min" : 86 + }, + { + "date" : "2024-11-01 19:16:00 +1000", + "Max" : 96, + "source" : "Madhava’s Apple Watch", + "Avg" : 96, + "Min" : 96 + }, + { + "Min" : 85, + "date" : "2024-11-01 19:21:00 +1000", + "Avg" : 85, + "source" : "Madhava’s Apple Watch", + "Max" : 85 + }, + { + "Avg" : 85, + "Max" : 85, + "Min" : 85, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 19:26:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 91, + "date" : "2024-11-01 19:33:00 +1000", + "Max" : 91, + "Avg" : 91 + }, + { + "Min" : 77.000000000000014, + "Avg" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 20:00:00 +1000", + "Max" : 77.000000000000014 + }, + { + "Avg" : 67, + "date" : "2024-11-01 20:04:00 +1000", + "Max" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "date" : "2024-11-01 20:05:00 +1000", + "Min" : 67, + "Avg" : 67 + }, + { + "date" : "2024-11-01 20:13:00 +1000", + "Max" : 80, + "Avg" : 80, + "Min" : 80, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 90, + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Avg" : 90, + "date" : "2024-11-01 20:16:00 +1000" + }, + { + "Max" : 80, + "Min" : 80, + "date" : "2024-11-01 20:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 80 + }, + { + "Avg" : 80, + "date" : "2024-11-01 20:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 80, + "Max" : 80 + }, + { + "Max" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 20:27:00 +1000", + "Min" : 73 + }, + { + "Min" : 72, + "Avg" : 72, + "date" : "2024-11-01 20:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 72 + }, + { + "date" : "2024-11-01 20:35:00 +1000", + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "Max" : 73 + }, + { + "date" : "2024-11-01 20:40:00 +1000", + "Min" : 80, + "Avg" : 80, + "source" : "Madhava’s Apple Watch", + "Max" : 80 + }, + { + "Min" : 73, + "Max" : 73, + "date" : "2024-11-01 20:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 73 + }, + { + "Max" : 73, + "date" : "2024-11-01 20:51:00 +1000", + "Min" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 20:59:00 +1000", + "Min" : 70, + "source" : "Madhava’s Apple Watch", + "Max" : 70, + "Avg" : 70 + }, + { + "date" : "2024-11-01 21:04:00 +1000", + "Max" : 102, + "Min" : 102, + "Avg" : 102, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 68, + "Max" : 68, + "Min" : 68, + "date" : "2024-11-01 21:33:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 75, + "date" : "2024-11-01 21:34:00 +1000", + "Avg" : 75, + "Min" : 75 + }, + { + "Avg" : 74, + "Max" : 74, + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "date" : "2024-11-01 21:37:00 +1000" + }, + { + "Max" : 76.033309936523438, + "Min" : 76.033309936523438, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 21:40:00 +1000", + "Avg" : 76.033309936523438 + }, + { + "Max" : 72, + "Min" : 72, + "date" : "2024-11-01 21:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 77.000000000000014, + "Max" : 77.000000000000014, + "Avg" : 77.000000000000014, + "date" : "2024-11-01 21:48:00 +1000" + }, + { + "date" : "2024-11-01 21:51:00 +1000", + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Avg" : 71 + }, + { + "Min" : 73, + "Avg" : 73, + "date" : "2024-11-01 21:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "Max" : 72, + "Avg" : 72, + "Min" : 72, + "date" : "2024-11-01 22:02:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 22:04:00 +1000", + "Min" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "Max" : 73, + "date" : "2024-11-01 22:05:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 73, + "Min" : 73 + }, + { + "date" : "2024-11-01 22:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 79, + "Min" : 79, + "Avg" : 79 + }, + { + "Max" : 80, + "Min" : 80, + "Avg" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 22:18:00 +1000" + }, + { + "Max" : 79, + "source" : "Madhava’s Apple Watch", + "Min" : 79, + "Avg" : 79, + "date" : "2024-11-01 22:20:00 +1000" + }, + { + "date" : "2024-11-01 22:28:00 +1000", + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72 + }, + { + "date" : "2024-11-01 22:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 71, + "Avg" : 71, + "Max" : 71 + }, + { + "Min" : 69, + "Max" : 69, + "date" : "2024-11-01 22:34:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 69 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "date" : "2024-11-01 22:39:00 +1000", + "Min" : 68, + "Avg" : 68 + }, + { + "Min" : 64, + "source" : "Madhava’s Apple Watch", + "Avg" : 64, + "date" : "2024-11-01 22:45:00 +1000", + "Max" : 64 + }, + { + "Avg" : 63, + "Max" : 63, + "Min" : 63, + "date" : "2024-11-01 22:47:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "date" : "2024-11-01 22:50:00 +1000", + "Max" : 64, + "Avg" : 64 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "date" : "2024-11-01 22:56:00 +1000", + "Avg" : 63, + "Max" : 63 + }, + { + "date" : "2024-11-01 23:00:00 +1000", + "Avg" : 64, + "Max" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 64 + }, + { + "date" : "2024-11-01 23:04:00 +1000", + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Min" : 60 + }, + { + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007, + "date" : "2024-11-01 23:06:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 23:12:00 +1000", + "Max" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 64, + "date" : "2024-11-01 23:17:00 +1000", + "Max" : 64, + "Min" : 64 + }, + { + "Avg" : 66, + "Min" : 66, + "date" : "2024-11-01 23:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 66 + }, + { + "Max" : 67, + "date" : "2024-11-01 23:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67 + }, + { + "date" : "2024-11-01 23:31:00 +1000", + "Min" : 64, + "Max" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63, + "date" : "2024-11-01 23:37:00 +1000", + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-01 23:39:00 +1000", + "Avg" : 61, + "Max" : 61, + "Min" : 61 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 61, + "date" : "2024-11-01 23:40:00 +1000", + "Min" : 60.486045837402344, + "Avg" : 60.743022918701172 + }, + { + "Avg" : 60, + "Max" : 60, + "Min" : 60, + "date" : "2024-11-01 23:48:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-01 23:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "Min" : 57, + "Avg" : 57 + }, + { + "date" : "2024-11-01 23:58:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Min" : 58 + }, + { + "Min" : 58, + "date" : "2024-11-02 00:02:00 +1000", + "Max" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 59, + "Avg" : 59, + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 00:07:00 +1000" + }, + { + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "date" : "2024-11-02 00:11:00 +1000", + "Max" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "Avg" : 58, + "date" : "2024-11-02 00:19:00 +1000" + }, + { + "date" : "2024-11-02 00:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Avg" : 59, + "Min" : 59 + }, + { + "date" : "2024-11-02 00:25:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Min" : 58 + }, + { + "Max" : 52, + "Min" : 52, + "date" : "2024-11-02 00:27:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 52 + }, + { + "date" : "2024-11-02 00:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "Max" : 58, + "Min" : 58 + }, + { + "Min" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 00:35:00 +1000", + "Avg" : 56, + "Max" : 56 + }, + { + "Max" : 55, + "Min" : 55, + "date" : "2024-11-02 00:43:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 55, + "Min" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 00:48:00 +1000" + }, + { + "date" : "2024-11-02 00:54:00 +1000", + "Max" : 55, + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55 + }, + { + "Max" : 56, + "Min" : 56, + "source" : "Madhava’s Apple Watch", + "Avg" : 56, + "date" : "2024-11-02 00:56:00 +1000" + }, + { + "date" : "2024-11-02 00:57:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Max" : 55, + "Avg" : 55 + }, + { + "date" : "2024-11-02 01:03:00 +1000", + "Avg" : 57, + "Max" : 57, + "Min" : 57, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 58, + "date" : "2024-11-02 01:05:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58 + }, + { + "Avg" : 56, + "Min" : 56, + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "date" : "2024-11-02 01:11:00 +1000" + }, + { + "Max" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 01:20:00 +1000", + "Min" : 59, + "Avg" : 59 + }, + { + "date" : "2024-11-02 01:23:00 +1000", + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58 + }, + { + "date" : "2024-11-02 01:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Min" : 60, + "Max" : 60 + }, + { + "Max" : 61, + "source" : "Madhava’s Apple Watch", + "Avg" : 61, + "Min" : 61, + "date" : "2024-11-02 01:30:00 +1000" + }, + { + "date" : "2024-11-02 01:36:00 +1000", + "Max" : 57, + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57 + }, + { + "Max" : 62.000000000000007, + "Min" : 60.037975311279304, + "date" : "2024-11-02 01:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 61.018987655639656 + }, + { + "date" : "2024-11-02 01:43:00 +1000", + "Min" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 64, + "Min" : 64, + "Max" : 64, + "date" : "2024-11-02 01:49:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Min" : 54, + "date" : "2024-11-02 01:55:00 +1000", + "Avg" : 54 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 01:56:00 +1000", + "Min" : 53, + "Max" : 53, + "Avg" : 53 + }, + { + "Min" : 54, + "Avg" : 54, + "date" : "2024-11-02 02:00:00 +1000", + "Max" : 54, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Max" : 55, + "Avg" : 55, + "date" : "2024-11-02 02:06:00 +1000" + }, + { + "date" : "2024-11-02 02:08:00 +1000", + "Max" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Max" : 53, + "Min" : 53, + "date" : "2024-11-02 02:13:00 +1000" + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "Min" : 54, + "date" : "2024-11-02 02:17:00 +1000", + "Avg" : 54 + }, + { + "Min" : 55, + "date" : "2024-11-02 02:21:00 +1000", + "Max" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 55, + "date" : "2024-11-02 02:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55 + }, + { + "Max" : 56, + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 02:32:00 +1000" + }, + { + "date" : "2024-11-02 02:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Max" : 55, + "Avg" : 55 + }, + { + "Max" : 55, + "date" : "2024-11-02 02:36:00 +1000", + "Min" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 53, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 02:43:00 +1000", + "Max" : 53, + "Min" : 53 + }, + { + "Min" : 55, + "date" : "2024-11-02 02:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "Max" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Min" : 59, + "date" : "2024-11-02 02:50:00 +1000", + "Avg" : 59 + }, + { + "Min" : 55, + "date" : "2024-11-02 02:58:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55 + }, + { + "date" : "2024-11-02 03:00:00 +1000", + "Max" : 54, + "Min" : 54, + "Avg" : 54, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 03:06:00 +1000", + "Min" : 61.999999999999993, + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Avg" : 64 + }, + { + "Min" : 53, + "Avg" : 53, + "source" : "Madhava’s Apple Watch", + "Max" : 53, + "date" : "2024-11-02 03:12:00 +1000" + }, + { + "Avg" : 64, + "date" : "2024-11-02 03:16:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Max" : 64 + }, + { + "Max" : 67, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 03:22:00 +1000", + "Min" : 67, + "Avg" : 67 + }, + { + "date" : "2024-11-02 03:26:00 +1000", + "Max" : 67, + "Avg" : 67, + "Min" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 03:33:00 +1000", + "Avg" : 52, + "Min" : 52, + "Max" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 03:36:00 +1000", + "Avg" : 50, + "Min" : 50, + "source" : "Madhava’s Apple Watch", + "Max" : 50 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "Max" : 52, + "date" : "2024-11-02 03:37:00 +1000", + "Min" : 52 + }, + { + "Avg" : 50.028911590576172, + "Max" : 50.028911590576172, + "date" : "2024-11-02 03:40:00 +1000", + "Min" : 50.028911590576172, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 03:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 51, + "Min" : 51, + "Max" : 51 + }, + { + "Max" : 50, + "Min" : 50, + "Avg" : 50, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 03:48:00 +1000" + }, + { + "Max" : 50, + "date" : "2024-11-02 03:52:00 +1000", + "Min" : 50, + "source" : "Madhava’s Apple Watch", + "Avg" : 50 + }, + { + "Avg" : 52, + "Max" : 52, + "date" : "2024-11-02 03:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 52 + }, + { + "date" : "2024-11-02 04:01:00 +1000", + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52 + }, + { + "date" : "2024-11-02 04:06:00 +1000", + "Max" : 55, + "Min" : 55, + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 52, + "date" : "2024-11-02 04:07:00 +1000", + "Max" : 52, + "Min" : 52, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 04:10:00 +1000", + "Max" : 55, + "Min" : 55, + "Avg" : 55 + }, + { + "date" : "2024-11-02 04:16:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 56, + "Avg" : 56, + "Max" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 62.000000000000007, + "date" : "2024-11-02 04:23:00 +1000", + "Max" : 62.000000000000007, + "Min" : 62.000000000000007 + }, + { + "date" : "2024-11-02 04:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007, + "Avg" : 62.000000000000007, + "Max" : 62.000000000000007 + }, + { + "date" : "2024-11-02 04:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "Min" : 56, + "Avg" : 56 + }, + { + "date" : "2024-11-02 04:35:00 +1000", + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63 + }, + { + "Avg" : 63, + "Min" : 63, + "Max" : 63, + "date" : "2024-11-02 04:36:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 04:45:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58 + }, + { + "Min" : 52, + "Max" : 52, + "Avg" : 52, + "date" : "2024-11-02 04:46:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 57, + "Avg" : 57, + "date" : "2024-11-02 04:51:00 +1000", + "Max" : 57, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 63, + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 04:56:00 +1000", + "Max" : 63 + }, + { + "Max" : 55, + "Min" : 55, + "date" : "2024-11-02 05:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 55 + }, + { + "Min" : 57, + "Max" : 57, + "date" : "2024-11-02 05:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 57 + }, + { + "Avg" : 53, + "source" : "Madhava’s Apple Watch", + "Max" : 53, + "date" : "2024-11-02 05:07:00 +1000", + "Min" : 53 + }, + { + "date" : "2024-11-02 05:13:00 +1000", + "Max" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "Avg" : 60 + }, + { + "date" : "2024-11-02 05:16:00 +1000", + "Max" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "date" : "2024-11-02 05:21:00 +1000", + "Min" : 60, + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Max" : 60 + }, + { + "Max" : 55, + "Avg" : 55, + "date" : "2024-11-02 05:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 55 + }, + { + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "date" : "2024-11-02 05:32:00 +1000", + "Min" : 56 + }, + { + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 05:37:00 +1000", + "Max" : 57, + "Min" : 55 + }, + { + "Avg" : 54.107303619384766, + "Max" : 54.107303619384766, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 05:40:00 +1000", + "Min" : 54.107303619384766 + }, + { + "Min" : 55, + "Max" : 55, + "Avg" : 55, + "date" : "2024-11-02 05:44:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 05:50:00 +1000", + "Max" : 55, + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55 + }, + { + "Max" : 56, + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 05:53:00 +1000" + }, + { + "Avg" : 55, + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 05:55:00 +1000", + "Min" : 55 + }, + { + "Avg" : 55, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 06:00:00 +1000", + "Min" : 55, + "Max" : 55 + }, + { + "Avg" : 58.5, + "Min" : 57, + "date" : "2024-11-02 06:07:00 +1000", + "Max" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 06:14:00 +1000", + "Avg" : 58, + "Max" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 61, + "source" : "Madhava’s Apple Watch", + "Min" : 61, + "Avg" : 61, + "date" : "2024-11-02 06:16:00 +1000" + }, + { + "Avg" : 66, + "date" : "2024-11-02 06:22:00 +1000", + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "Min" : 66 + }, + { + "date" : "2024-11-02 06:28:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "Max" : 63, + "Min" : 63 + }, + { + "date" : "2024-11-02 06:32:00 +1000", + "Max" : 65, + "Min" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 06:35:00 +1000", + "Max" : 60, + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Avg" : 60 + }, + { + "Max" : 57, + "Min" : 57, + "Avg" : 57, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 06:37:00 +1000" + }, + { + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "date" : "2024-11-02 06:41:00 +1000", + "Max" : 65 + }, + { + "date" : "2024-11-02 06:46:00 +1000", + "Max" : 59, + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59 + }, + { + "Max" : 67, + "Min" : 67, + "Avg" : 67, + "date" : "2024-11-02 06:53:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-02 06:58:00 +1000", + "Avg" : 63, + "Min" : 63 + }, + { + "Max" : 65, + "date" : "2024-11-02 07:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Min" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 70, + "Max" : 70, + "date" : "2024-11-02 07:13:00 +1000", + "Avg" : 70 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 81, + "Max" : 81, + "date" : "2024-11-02 07:17:00 +1000", + "Avg" : 81 + }, + { + "Avg" : 81, + "date" : "2024-11-02 07:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 81, + "Min" : 81 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 76, + "Max" : 76, + "date" : "2024-11-02 07:33:00 +1000", + "Min" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 07:37:00 +1000", + "Max" : 75, + "Min" : 75, + "Avg" : 75 + }, + { + "date" : "2024-11-02 07:41:00 +1000", + "Avg" : 76, + "Max" : 76, + "Min" : 76, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 07:47:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "Max" : 65, + "Min" : 65 + }, + { + "Avg" : 85, + "date" : "2024-11-02 07:51:00 +1000", + "Max" : 85, + "source" : "Madhava’s Apple Watch", + "Min" : 85 + }, + { + "Min" : 70, + "date" : "2024-11-02 07:59:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 70, + "Max" : 70 + }, + { + "date" : "2024-11-02 08:02:00 +1000", + "Max" : 70, + "Min" : 70, + "Avg" : 70, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 75, + "Max" : 75, + "Avg" : 75, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 08:06:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 08:10:00 +1000", + "Min" : 75, + "Avg" : 75, + "Max" : 75 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "Avg" : 84, + "Min" : 84, + "date" : "2024-11-02 08:14:00 +1000" + }, + { + "Min" : 154.00000000000003, + "Max" : 154.00000000000003, + "date" : "2024-11-02 08:17:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 154.00000000000003 + }, + { + "Avg" : 93.677777777777777, + "date" : "2024-11-02 08:18:00 +1000", + "Max" : 109, + "source" : "Madhava’s Apple Watch", + "Min" : 83 + }, + { + "Max" : 70, + "date" : "2024-11-02 08:21:00 +1000", + "Min" : 70, + "Avg" : 70, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 08:23:00 +1000", + "Min" : 77.000000000000014, + "Avg" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "Max" : 77.000000000000014 + }, + { + "date" : "2024-11-02 08:25:00 +1000", + "Max" : 84, + "Min" : 84, + "Avg" : 84, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 83.482758620689665, + "Max" : 87, + "date" : "2024-11-02 08:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 79 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 08:27:00 +1000", + "Min" : 80, + "Max" : 91, + "Avg" : 83.115815682982216 + }, + { + "Max" : 82, + "date" : "2024-11-02 08:28:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 77.000000000000014, + "Avg" : 79.73737373737373 + }, + { + "Avg" : 86.462499999999991, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 08:29:00 +1000", + "Max" : 93, + "Min" : 84 + }, + { + "date" : "2024-11-02 08:32:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Min" : 90, + "Avg" : 90 + }, + { + "Max" : 104, + "source" : "Madhava’s Apple Watch", + "Min" : 92.000000000000014, + "Avg" : 97.986666666666665, + "date" : "2024-11-02 08:33:00 +1000" + }, + { + "Max" : 96, + "date" : "2024-11-02 08:34:00 +1000", + "Min" : 82, + "source" : "Madhava’s Apple Watch", + "Avg" : 89.785714285714306 + }, + { + "Max" : 105, + "Min" : 92.000000000000014, + "Avg" : 98.000000000000014, + "date" : "2024-11-02 08:42:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "date" : "2024-11-02 08:54:00 +1000", + "Min" : 64, + "Avg" : 64 + }, + { + "date" : "2024-11-02 08:57:00 +1000", + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "Max" : 65, + "Min" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "Avg" : 69, + "date" : "2024-11-02 09:04:00 +1000", + "Max" : 69 + }, + { + "Min" : 67, + "Avg" : 67, + "Max" : 67, + "date" : "2024-11-02 09:05:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 87, + "date" : "2024-11-02 09:17:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 87, + "Avg" : 87 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 09:21:00 +1000", + "Max" : 73, + "Avg" : 73, + "Min" : 73 + }, + { + "date" : "2024-11-02 09:26:00 +1000", + "Max" : 92.000000000000014, + "Avg" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Min" : 92.000000000000014 + }, + { + "Min" : 86, + "date" : "2024-11-02 09:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 86, + "Max" : 86 + }, + { + "date" : "2024-11-02 09:43:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 92.000000000000014, + "Max" : 92.000000000000014, + "Avg" : 92.000000000000014 + }, + { + "date" : "2024-11-02 09:50:00 +1000", + "Min" : 92.000000000000014, + "Max" : 92.000000000000014, + "Avg" : 92.000000000000014, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 105, + "Min" : 105, + "source" : "Madhava’s Apple Watch", + "Max" : 105, + "date" : "2024-11-02 09:51:00 +1000" + }, + { + "Min" : 94, + "Max" : 94, + "date" : "2024-11-02 10:00:00 +1000", + "Avg" : 94, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 96, + "Max" : 96, + "date" : "2024-11-02 10:14:00 +1000", + "Min" : 96, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 98, + "date" : "2024-11-02 10:16:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 98, + "Min" : 98 + }, + { + "date" : "2024-11-02 10:20:00 +1000", + "Min" : 106, + "Max" : 106, + "source" : "Madhava’s Apple Watch", + "Avg" : 106 + }, + { + "Max" : 88, + "date" : "2024-11-02 10:29:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 88, + "Min" : 88 + }, + { + "date" : "2024-11-02 10:32:00 +1000", + "Min" : 98, + "source" : "Madhava’s Apple Watch", + "Max" : 98, + "Avg" : 98 + }, + { + "date" : "2024-11-02 10:35:00 +1000", + "Min" : 100, + "Avg" : 100, + "Max" : 100, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 10:39:00 +1000", + "Avg" : 87, + "Min" : 87, + "Max" : 87 + }, + { + "Min" : 85, + "Avg" : 85, + "date" : "2024-11-02 10:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 85 + }, + { + "date" : "2024-11-02 10:47:00 +1000", + "Max" : 87, + "Min" : 87, + "Avg" : 87, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 85, + "date" : "2024-11-02 10:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 85, + "Max" : 85 + }, + { + "Avg" : 86, + "Max" : 86, + "Min" : 86, + "date" : "2024-11-02 10:57:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 11:01:00 +1000", + "Min" : 81, + "Max" : 81, + "Avg" : 81 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 79, + "date" : "2024-11-02 11:07:00 +1000", + "Min" : 79, + "Avg" : 79 + }, + { + "Max" : 79, + "Min" : 79, + "Avg" : 79, + "date" : "2024-11-02 11:08:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 77.000000000000014, + "date" : "2024-11-02 11:11:00 +1000", + "Max" : 77.000000000000014, + "Avg" : 77.000000000000014, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 104, + "Avg" : 104, + "Max" : 104, + "date" : "2024-11-02 11:15:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 95, + "Max" : 95, + "source" : "Madhava’s Apple Watch", + "Min" : 95, + "date" : "2024-11-02 11:22:00 +1000" + }, + { + "date" : "2024-11-02 12:01:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 70, + "Max" : 70, + "Min" : 70 + }, + { + "date" : "2024-11-02 12:15:00 +1000", + "Min" : 111, + "Avg" : 114.348484844243, + "source" : "Madhava’s Apple Watch", + "Max" : 117 + }, + { + "Min" : 72, + "Avg" : 72, + "Max" : 72, + "date" : "2024-11-02 12:24:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 73, + "date" : "2024-11-02 12:27:00 +1000", + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Min" : 73 + }, + { + "Min" : 74.263862609863281, + "Avg" : 74.263862609863281, + "Max" : 74.263862609863281, + "date" : "2024-11-02 12:32:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 74.736595153808594, + "Max" : 76, + "Avg" : 75.368297576904297, + "date" : "2024-11-02 12:33:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 73, + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 12:34:00 +1000", + "Avg" : 73 + }, + { + "Max" : 76, + "date" : "2024-11-02 12:38:00 +1000", + "Min" : 76, + "Avg" : 76, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 79, + "Avg" : 79, + "Min" : 79, + "date" : "2024-11-02 12:42:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Avg" : 76, + "date" : "2024-11-02 12:46:00 +1000", + "Max" : 76 + }, + { + "Max" : 75, + "Min" : 75, + "Avg" : 75, + "date" : "2024-11-02 12:52:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 12:57:00 +1000", + "Avg" : 74, + "Min" : 74, + "Max" : 74 + }, + { + "Max" : 76, + "date" : "2024-11-02 13:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Avg" : 76 + }, + { + "Min" : 76, + "Avg" : 76, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 13:09:00 +1000", + "Max" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 13:10:00 +1000", + "Avg" : 73, + "Max" : 73, + "Min" : 73 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Avg" : 74, + "date" : "2024-11-02 13:18:00 +1000", + "Max" : 74 + }, + { + "Avg" : 73, + "Max" : 73, + "Min" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 13:19:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "Avg" : 73, + "Max" : 73, + "date" : "2024-11-02 13:24:00 +1000" + }, + { + "Max" : 73, + "Min" : 73, + "Avg" : 73, + "date" : "2024-11-02 13:27:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 70, + "Min" : 70, + "date" : "2024-11-02 13:31:00 +1000", + "Avg" : 70 + }, + { + "date" : "2024-11-02 13:36:00 +1000", + "Min" : 69, + "source" : "Madhava’s Apple Watch", + "Avg" : 69, + "Max" : 69 + }, + { + "Min" : 70, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 13:45:00 +1000", + "Avg" : 70, + "Max" : 70 + }, + { + "Max" : 67, + "date" : "2024-11-02 13:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67 + }, + { + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "date" : "2024-11-02 13:51:00 +1000", + "Max" : 67 + }, + { + "Min" : 68, + "Avg" : 68, + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "date" : "2024-11-02 14:00:00 +1000" + }, + { + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "date" : "2024-11-02 14:02:00 +1000", + "Min" : 64 + }, + { + "Max" : 67, + "date" : "2024-11-02 14:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 67, + "Min" : 67 + }, + { + "Max" : 66, + "Avg" : 66, + "date" : "2024-11-02 14:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 66 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66, + "Max" : 66, + "date" : "2024-11-02 14:15:00 +1000" + }, + { + "date" : "2024-11-02 14:20:00 +1000", + "Avg" : 68, + "Min" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 70.829948425292969, + "Avg" : 70.829948425292969, + "source" : "Madhava’s Apple Watch", + "Max" : 70.829948425292969, + "date" : "2024-11-02 14:25:00 +1000" + }, + { + "Avg" : 70, + "Min" : 70, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 14:29:00 +1000", + "Max" : 70 + }, + { + "date" : "2024-11-02 14:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 71, + "Max" : 71, + "Min" : 71 + }, + { + "Avg" : 85, + "Max" : 85, + "Min" : 85, + "date" : "2024-11-02 14:37:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 14:45:00 +1000", + "Avg" : 86, + "Max" : 86, + "Min" : 86 + }, + { + "Avg" : 83, + "Min" : 83, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 14:53:00 +1000", + "Max" : 83 + }, + { + "date" : "2024-11-02 15:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Min" : 71, + "Avg" : 71 + }, + { + "Max" : 89, + "Min" : 89, + "Avg" : 89, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 15:30:00 +1000" + }, + { + "Avg" : 89, + "date" : "2024-11-02 15:33:00 +1000", + "Min" : 89, + "Max" : 89, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 85, + "Avg" : 85, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 15:39:00 +1000", + "Min" : 85 + }, + { + "Max" : 83, + "Min" : 83, + "date" : "2024-11-02 15:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 83 + }, + { + "Min" : 88, + "date" : "2024-11-02 15:50:00 +1000", + "Avg" : 94.873417723782453, + "source" : "Madhava’s Apple Watch", + "Max" : 99 + }, + { + "Avg" : 92.000000000000014, + "Max" : 92.000000000000014, + "date" : "2024-11-02 15:54:00 +1000", + "Min" : 92.000000000000014, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 85, + "Max" : 85, + "date" : "2024-11-02 15:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 85 + }, + { + "Max" : 79, + "date" : "2024-11-02 16:00:00 +1000", + "Min" : 79, + "source" : "Madhava’s Apple Watch", + "Avg" : 79 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "date" : "2024-11-02 16:09:00 +1000", + "Avg" : 76, + "Max" : 76 + }, + { + "date" : "2024-11-02 16:14:00 +1000", + "Min" : 79, + "source" : "Madhava’s Apple Watch", + "Max" : 79, + "Avg" : 79 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 77.000000000000014, + "Avg" : 77.000000000000014, + "date" : "2024-11-02 16:15:00 +1000", + "Min" : 77.000000000000014 + }, + { + "Avg" : 78, + "Max" : 78, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 16:21:00 +1000", + "Min" : 78 + }, + { + "date" : "2024-11-02 16:31:00 +1000", + "Avg" : 103.83157894611359, + "Min" : 91, + "Max" : 114, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 94, + "date" : "2024-11-02 16:33:00 +1000", + "Max" : 94, + "source" : "Madhava’s Apple Watch", + "Avg" : 94 + }, + { + "Min" : 100, + "Avg" : 100, + "date" : "2024-11-02 16:38:00 +1000", + "Max" : 100, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 87, + "Avg" : 87, + "date" : "2024-11-02 16:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 87 + }, + { + "Max" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Min" : 92.000000000000014, + "date" : "2024-11-02 16:54:00 +1000", + "Avg" : 92.000000000000014 + }, + { + "Avg" : 93, + "date" : "2024-11-02 16:58:00 +1000", + "Max" : 93, + "Min" : 93, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 90, + "Max" : 90, + "Min" : 90, + "date" : "2024-11-02 17:04:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 87, + "Avg" : 87, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 17:09:00 +1000", + "Max" : 87 + }, + { + "Min" : 90, + "Max" : 90, + "Avg" : 90, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 17:13:00 +1000" + }, + { + "Avg" : 81, + "Max" : 81, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 17:29:00 +1000", + "Min" : 81 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Min" : 90, + "date" : "2024-11-02 17:34:00 +1000", + "Avg" : 90 + }, + { + "Max" : 100, + "Min" : 100, + "Avg" : 100, + "date" : "2024-11-02 17:38:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 99, + "date" : "2024-11-02 17:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 99, + "Avg" : 99 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 102, + "Avg" : 102, + "date" : "2024-11-02 17:45:00 +1000", + "Min" : 102 + }, + { + "Max" : 104, + "source" : "Madhava’s Apple Watch", + "Min" : 104, + "Avg" : 104, + "date" : "2024-11-02 17:53:00 +1000" + }, + { + "Max" : 100, + "Min" : 100, + "date" : "2024-11-02 17:56:00 +1000", + "Avg" : 100, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 97, + "source" : "Madhava’s Apple Watch", + "Min" : 97, + "Max" : 97, + "date" : "2024-11-02 18:04:00 +1000" + }, + { + "date" : "2024-11-02 18:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 89, + "Min" : 89, + "Avg" : 89 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 104, + "Min" : 104, + "date" : "2024-11-02 18:13:00 +1000", + "Avg" : 104 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 101, + "Max" : 101, + "date" : "2024-11-02 18:16:00 +1000", + "Avg" : 101 + }, + { + "Avg" : 96, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 18:20:00 +1000", + "Min" : 96, + "Max" : 96 + }, + { + "Min" : 100, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 18:26:00 +1000", + "Max" : 100, + "Avg" : 100 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 104, + "Avg" : 104, + "Max" : 104, + "date" : "2024-11-02 18:34:00 +1000" + }, + { + "Min" : 99, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 18:36:00 +1000", + "Avg" : 99, + "Max" : 99 + }, + { + "date" : "2024-11-02 18:37:00 +1000", + "Min" : 103.58255767822266, + "Avg" : 103.58255767822266, + "Max" : 103.58255767822266, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 100, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 18:44:00 +1000", + "Avg" : 100, + "Max" : 100 + }, + { + "Max" : 91, + "source" : "Madhava’s Apple Watch", + "Avg" : 91, + "date" : "2024-11-02 18:48:00 +1000", + "Min" : 91 + }, + { + "Avg" : 99, + "source" : "Madhava’s Apple Watch", + "Max" : 99, + "date" : "2024-11-02 18:54:00 +1000", + "Min" : 99 + }, + { + "Avg" : 99, + "Max" : 99, + "date" : "2024-11-02 18:55:00 +1000", + "Min" : 99, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 100, + "Min" : 100, + "source" : "Madhava’s Apple Watch", + "Max" : 100, + "date" : "2024-11-02 19:03:00 +1000" + }, + { + "Min" : 97, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 19:09:00 +1000", + "Max" : 97, + "Avg" : 97 + }, + { + "Min" : 91, + "date" : "2024-11-02 19:13:00 +1000", + "Avg" : 91, + "Max" : 91, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 19:19:00 +1000", + "Max" : 84, + "Min" : 84, + "source" : "Madhava’s Apple Watch", + "Avg" : 84 + }, + { + "Avg" : 85, + "date" : "2024-11-02 19:22:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 85, + "Min" : 85 + }, + { + "Max" : 85, + "Avg" : 85, + "date" : "2024-11-02 19:27:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 85 + }, + { + "Avg" : 87, + "Min" : 87, + "source" : "Madhava’s Apple Watch", + "Max" : 87, + "date" : "2024-11-02 19:30:00 +1000" + }, + { + "Avg" : 99, + "source" : "Madhava’s Apple Watch", + "Max" : 99, + "date" : "2024-11-02 19:37:00 +1000", + "Min" : 99 + }, + { + "date" : "2024-11-02 19:44:00 +1000", + "Max" : 98, + "source" : "Madhava’s Apple Watch", + "Min" : 98, + "Avg" : 98 + }, + { + "date" : "2024-11-02 19:47:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 94, + "Min" : 94, + "Avg" : 94 + }, + { + "date" : "2024-11-02 19:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 87, + "Max" : 87, + "Min" : 87 + }, + { + "Max" : 98, + "date" : "2024-11-02 20:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 98, + "Avg" : 98 + }, + { + "date" : "2024-11-02 20:09:00 +1000", + "Max" : 99, + "Min" : 99, + "Avg" : 99, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 20:10:00 +1000", + "Avg" : 92.000000000000014, + "Max" : 92.000000000000014, + "Min" : 92.000000000000014, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 93.509307861328125, + "Avg" : 93.509307861328125, + "Min" : 93.509307861328125, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 20:13:00 +1000" + }, + { + "Max" : 106, + "source" : "Madhava’s Apple Watch", + "Min" : 106, + "Avg" : 106, + "date" : "2024-11-02 20:17:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "Avg" : 84, + "date" : "2024-11-02 20:23:00 +1000", + "Max" : 84 + }, + { + "date" : "2024-11-02 20:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 91, + "Min" : 91, + "Avg" : 91 + }, + { + "date" : "2024-11-02 20:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 100, + "Max" : 100, + "Avg" : 100 + }, + { + "Min" : 77.000000000000014, + "Max" : 77.000000000000014, + "Avg" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 20:38:00 +1000" + }, + { + "date" : "2024-11-02 20:41:00 +1000", + "Avg" : 84, + "Max" : 84, + "Min" : 84, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 78, + "source" : "Madhava’s Apple Watch", + "Min" : 78, + "Max" : 78, + "date" : "2024-11-02 20:45:00 +1000" + }, + { + "Min" : 80, + "Avg" : 80, + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 20:51:00 +1000" + }, + { + "date" : "2024-11-02 20:57:00 +1000", + "Max" : 73, + "Min" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Avg" : 72, + "date" : "2024-11-02 21:02:00 +1000", + "Max" : 72 + }, + { + "Min" : 73, + "Max" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 21:08:00 +1000" + }, + { + "Max" : 70, + "date" : "2024-11-02 21:12:00 +1000", + "Min" : 70, + "source" : "Madhava’s Apple Watch", + "Avg" : 70 + }, + { + "Max" : 79, + "date" : "2024-11-02 21:18:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 79, + "Min" : 79 + }, + { + "Max" : 77.000000000000014, + "Min" : 77.000000000000014, + "Avg" : 77.000000000000014, + "date" : "2024-11-02 21:20:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 79, + "Min" : 79, + "date" : "2024-11-02 21:26:00 +1000", + "Max" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 80, + "Min" : 80, + "Max" : 80, + "date" : "2024-11-02 21:27:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 21:31:00 +1000", + "Min" : 74, + "Avg" : 74, + "Max" : 74 + }, + { + "Min" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 21:35:00 +1000", + "Max" : 77.000000000000014, + "Avg" : 77.000000000000014 + }, + { + "date" : "2024-11-02 21:42:00 +1000", + "Min" : 77.59527587890625, + "Avg" : 77.59527587890625, + "source" : "Madhava’s Apple Watch", + "Max" : 77.59527587890625 + }, + { + "Avg" : 80, + "Min" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 21:45:00 +1000", + "Max" : 80 + }, + { + "Max" : 79, + "source" : "Madhava’s Apple Watch", + "Min" : 79, + "date" : "2024-11-02 21:47:00 +1000", + "Avg" : 79 + }, + { + "date" : "2024-11-02 21:52:00 +1000", + "Max" : 79, + "Min" : 79, + "Avg" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "Min" : 76, + "Avg" : 76, + "date" : "2024-11-02 21:56:00 +1000" + }, + { + "Max" : 74, + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Avg" : 74, + "date" : "2024-11-02 21:58:00 +1000" + }, + { + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 22:01:00 +1000", + "Max" : 73, + "Min" : 73 + }, + { + "date" : "2024-11-02 22:09:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 70, + "Avg" : 70, + "Max" : 70 + }, + { + "Min" : 71, + "Avg" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 22:12:00 +1000", + "Max" : 71 + }, + { + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Avg" : 72, + "date" : "2024-11-02 22:16:00 +1000" + }, + { + "Avg" : 70, + "Min" : 70, + "date" : "2024-11-02 22:22:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 70 + }, + { + "Min" : 72, + "Max" : 72, + "date" : "2024-11-02 22:29:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 72 + }, + { + "Avg" : 72, + "date" : "2024-11-02 22:31:00 +1000", + "Min" : 72, + "Max" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Min" : 73, + "date" : "2024-11-02 22:35:00 +1000", + "Avg" : 73 + }, + { + "Max" : 72, + "date" : "2024-11-02 22:43:00 +1000", + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 71, + "Avg" : 71, + "Max" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 22:45:00 +1000" + }, + { + "Max" : 72, + "Avg" : 72, + "date" : "2024-11-02 22:52:00 +1000", + "Min" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-02 22:55:00 +1000", + "Min" : 73, + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Avg" : 73 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Avg" : 73, + "Min" : 73, + "date" : "2024-11-02 22:56:00 +1000" + }, + { + "Max" : 72, + "Avg" : 72, + "date" : "2024-11-02 23:01:00 +1000", + "Min" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 74, + "Min" : 74, + "date" : "2024-11-02 23:05:00 +1000", + "Max" : 74, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 73, + "Min" : 73, + "date" : "2024-11-02 23:12:00 +1000", + "Avg" : 73, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Min" : 73, + "Avg" : 73, + "date" : "2024-11-02 23:15:00 +1000" + }, + { + "Max" : 73, + "Min" : 73, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 23:20:00 +1000", + "Avg" : 73 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "date" : "2024-11-02 23:25:00 +1000", + "Min" : 73, + "Avg" : 73 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "date" : "2024-11-02 23:31:00 +1000", + "Avg" : 73, + "Max" : 73 + }, + { + "Max" : 79, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 23:35:00 +1000", + "Avg" : 79, + "Min" : 79 + }, + { + "Max" : 75.013442993164062, + "Avg" : 75.013442993164062, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-02 23:40:00 +1000", + "Min" : 75.013442993164062 + }, + { + "date" : "2024-11-02 23:41:00 +1000", + "Min" : 76, + "Max" : 76, + "Avg" : 76, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Avg" : 72, + "Max" : 72, + "date" : "2024-11-02 23:43:00 +1000" + }, + { + "Max" : 74, + "date" : "2024-11-02 23:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Avg" : 74 + }, + { + "date" : "2024-11-02 23:50:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Min" : 73, + "Avg" : 73 + }, + { + "date" : "2024-11-02 23:55:00 +1000", + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Avg" : 73, + "Min" : 73 + }, + { + "date" : "2024-11-03 00:03:00 +1000", + "Avg" : 84, + "Max" : 84, + "Min" : 84, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 00:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 83, + "Max" : 83, + "Min" : 83 + }, + { + "date" : "2024-11-03 00:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 83, + "Max" : 83, + "Avg" : 83 + }, + { + "date" : "2024-11-03 00:13:00 +1000", + "Max" : 84, + "Min" : 84, + "source" : "Madhava’s Apple Watch", + "Avg" : 84 + }, + { + "date" : "2024-11-03 00:15:00 +1000", + "Max" : 84, + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "Avg" : 84 + }, + { + "Min" : 80, + "date" : "2024-11-03 00:22:00 +1000", + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "Avg" : 80 + }, + { + "Max" : 80, + "date" : "2024-11-03 00:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 80, + "Avg" : 80 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 80, + "Min" : 80, + "Avg" : 80, + "date" : "2024-11-03 00:30:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "date" : "2024-11-03 00:35:00 +1000", + "Avg" : 72, + "Max" : 72 + }, + { + "date" : "2024-11-03 00:40:00 +1000", + "Min" : 72, + "Max" : 72, + "source" : "Madhava’s Apple Watch", + "Avg" : 72 + }, + { + "date" : "2024-11-03 00:45:00 +1000", + "Min" : 79, + "Max" : 79, + "Avg" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 80, + "source" : "Madhava’s Apple Watch", + "Max" : 80, + "Min" : 80, + "date" : "2024-11-03 00:54:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Avg" : 74, + "Max" : 74, + "date" : "2024-11-03 00:55:00 +1000" + }, + { + "Avg" : 73, + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "date" : "2024-11-03 01:02:00 +1000" + }, + { + "Avg" : 73, + "Max" : 73, + "Min" : 73, + "date" : "2024-11-03 01:06:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 72, + "Avg" : 72, + "date" : "2024-11-03 01:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 72 + }, + { + "Max" : 69, + "Min" : 69, + "date" : "2024-11-03 01:12:00 +1000", + "Avg" : 69, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 72, + "Max" : 72, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 01:18:00 +1000", + "Min" : 72 + }, + { + "date" : "2024-11-03 01:20:00 +1000", + "Min" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "Min" : 72, + "date" : "2024-11-03 01:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Max" : 72 + }, + { + "Avg" : 72, + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 01:33:00 +1000", + "Max" : 72 + }, + { + "date" : "2024-11-03 01:35:00 +1000", + "Max" : 72, + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 71.491607666015625, + "Max" : 71.491607666015625, + "date" : "2024-11-03 01:40:00 +1000", + "Avg" : 71.491607666015625, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Max" : 72, + "date" : "2024-11-03 01:41:00 +1000" + }, + { + "Max" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 01:42:00 +1000", + "Min" : 71, + "Avg" : 71 + }, + { + "Max" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 01:46:00 +1000", + "Min" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Min" : 72, + "Avg" : 72, + "date" : "2024-11-03 01:50:00 +1000" + }, + { + "date" : "2024-11-03 01:55:00 +1000", + "Max" : 71, + "source" : "Madhava’s Apple Watch", + "Min" : 71, + "Avg" : 71 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "date" : "2024-11-03 02:04:00 +1000", + "Min" : 67, + "Avg" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 71, + "date" : "2024-11-03 02:08:00 +1000", + "Avg" : 71, + "Max" : 71 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 02:10:00 +1000", + "Avg" : 73, + "Min" : 73, + "Max" : 73 + }, + { + "Min" : 77, + "Avg" : 77, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 02:12:00 +1000", + "Max" : 77 + }, + { + "date" : "2024-11-03 02:17:00 +1000", + "Max" : 72, + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 02:22:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Min" : 72, + "Max" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Max" : 72, + "date" : "2024-11-03 02:27:00 +1000", + "Min" : 72 + }, + { + "date" : "2024-11-03 02:31:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Max" : 72, + "Min" : 72 + }, + { + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 02:37:00 +1000", + "Max" : 67, + "Min" : 67 + }, + { + "Min" : 58, + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "date" : "2024-11-03 02:41:00 +1000" + }, + { + "Min" : 61.999999999999993, + "Avg" : 61.999999999999993, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 02:42:00 +1000", + "Max" : 61.999999999999993 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Max" : 60, + "date" : "2024-11-03 02:46:00 +1000", + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58, + "date" : "2024-11-03 02:52:00 +1000", + "Max" : 58 + }, + { + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "date" : "2024-11-03 02:56:00 +1000", + "Avg" : 57 + }, + { + "Max" : 59, + "Avg" : 59, + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 03:02:00 +1000" + }, + { + "Max" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "date" : "2024-11-03 03:06:00 +1000", + "Avg" : 64 + }, + { + "date" : "2024-11-03 03:11:00 +1000", + "Avg" : 60, + "Max" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "date" : "2024-11-03 03:12:00 +1000", + "Max" : 60, + "Min" : 60 + }, + { + "Avg" : 60, + "date" : "2024-11-03 03:17:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Max" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 03:20:00 +1000", + "Min" : 61, + "Avg" : 61, + "Max" : 61 + }, + { + "date" : "2024-11-03 03:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "Min" : 63, + "Max" : 63 + }, + { + "Min" : 69, + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "Max" : 69, + "date" : "2024-11-03 03:33:00 +1000" + }, + { + "Max" : 54, + "date" : "2024-11-03 03:38:00 +1000", + "Min" : 54, + "Avg" : 54, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 03:40:00 +1000", + "Max" : 68.2105712890625, + "Min" : 68, + "Avg" : 68.10528564453125 + }, + { + "Min" : 71, + "date" : "2024-11-03 03:42:00 +1000", + "Max" : 71, + "source" : "Madhava’s Apple Watch", + "Avg" : 71 + }, + { + "date" : "2024-11-03 03:50:00 +1000", + "Max" : 63, + "Min" : 63, + "source" : "Madhava’s Apple Watch", + "Avg" : 63 + }, + { + "date" : "2024-11-03 03:51:00 +1000", + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63 + }, + { + "Avg" : 62.000000000000007, + "date" : "2024-11-03 03:55:00 +1000", + "Max" : 62.000000000000007, + "source" : "Madhava’s Apple Watch", + "Min" : 62.000000000000007 + }, + { + "Max" : 53, + "date" : "2024-11-03 04:01:00 +1000", + "Avg" : 53, + "Min" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 67, + "Avg" : 67, + "date" : "2024-11-03 04:09:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 04:10:00 +1000", + "Min" : 70, + "Avg" : 70, + "Max" : 70 + }, + { + "Max" : 74, + "Min" : 74, + "source" : "Madhava’s Apple Watch", + "Avg" : 74, + "date" : "2024-11-03 04:12:00 +1000" + }, + { + "Min" : 58, + "Max" : 58, + "date" : "2024-11-03 04:17:00 +1000", + "Avg" : 58, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "date" : "2024-11-03 04:21:00 +1000", + "Min" : 52, + "Avg" : 52 + }, + { + "Min" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "date" : "2024-11-03 04:26:00 +1000", + "Max" : 57 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 53, + "Avg" : 53, + "Max" : 53, + "date" : "2024-11-03 04:35:00 +1000" + }, + { + "Min" : 53, + "Max" : 53, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 04:37:00 +1000", + "Avg" : 53 + }, + { + "Avg" : 57, + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "date" : "2024-11-03 04:42:00 +1000", + "Min" : 57 + }, + { + "Max" : 59, + "Min" : 59, + "date" : "2024-11-03 04:44:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 59 + }, + { + "Avg" : 55, + "date" : "2024-11-03 04:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 04:54:00 +1000", + "Min" : 60, + "Avg" : 60, + "Max" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "date" : "2024-11-03 04:56:00 +1000", + "Min" : 60, + "Max" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:01:00 +1000", + "Min" : 60, + "Avg" : 60, + "Max" : 60 + }, + { + "date" : "2024-11-03 05:05:00 +1000", + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "Max" : 56 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "date" : "2024-11-03 05:12:00 +1000", + "Min" : 58, + "Avg" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:16:00 +1000", + "Avg" : 58, + "Min" : 58, + "Max" : 58 + }, + { + "Max" : 58, + "date" : "2024-11-03 05:21:00 +1000", + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "Max" : 61, + "Min" : 61, + "Avg" : 61, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:26:00 +1000" + }, + { + "Avg" : 61, + "Max" : 61, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:27:00 +1000", + "Min" : 61 + }, + { + "Min" : 72, + "Max" : 72, + "Avg" : 72, + "date" : "2024-11-03 05:31:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:35:00 +1000", + "Max" : 72, + "Min" : 72, + "Avg" : 72 + }, + { + "Avg" : 73.320995330810547, + "Min" : 73, + "Max" : 73.641990661621094, + "date" : "2024-11-03 05:40:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 63, + "date" : "2024-11-03 05:48:00 +1000", + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 69, + "Min" : 69, + "Avg" : 69, + "date" : "2024-11-03 05:52:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:55:00 +1000", + "Max" : 66, + "Avg" : 66 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 05:58:00 +1000", + "Avg" : 68, + "Min" : 68, + "Max" : 68 + }, + { + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 06:02:00 +1000", + "Max" : 63, + "Min" : 63 + }, + { + "Min" : 66, + "Avg" : 66, + "date" : "2024-11-03 06:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 66 + }, + { + "Avg" : 60, + "Max" : 60, + "date" : "2024-11-03 06:12:00 +1000", + "Min" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "date" : "2024-11-03 06:15:00 +1000", + "Min" : 58 + }, + { + "Avg" : 72, + "Min" : 72, + "date" : "2024-11-03 06:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 72 + }, + { + "date" : "2024-11-03 06:28:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 61, + "Max" : 61, + "Avg" : 61 + }, + { + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-03 06:29:00 +1000", + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 78, + "Avg" : 78, + "date" : "2024-11-03 06:34:00 +1000", + "Max" : 78 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 73, + "Max" : 73, + "Min" : 73, + "date" : "2024-11-03 06:40:00 +1000" + }, + { + "Max" : 65, + "date" : "2024-11-03 06:43:00 +1000", + "Min" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 60, + "Avg" : 60, + "date" : "2024-11-03 06:47:00 +1000", + "Max" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "date" : "2024-11-03 06:53:00 +1000" + }, + { + "date" : "2024-11-03 09:00:00 +1000", + "Max" : 76, + "Min" : 76, + "source" : "Madhava’s Apple Watch", + "Avg" : 76 + }, + { + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Min" : 66, + "date" : "2024-11-03 09:02:00 +1000" + }, + { + "date" : "2024-11-03 09:07:00 +1000", + "Max" : 98, + "Min" : 98, + "source" : "Madhava’s Apple Watch", + "Avg" : 98 + }, + { + "date" : "2024-11-03 09:08:00 +1000", + "Max" : 96, + "Avg" : 95.296610169491544, + "source" : "Madhava’s Apple Watch", + "Min" : 94 + }, + { + "Min" : 77.000000000000014, + "date" : "2024-11-03 09:11:00 +1000", + "Avg" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "Max" : 77.000000000000014 + }, + { + "Min" : 85, + "Avg" : 85, + "Max" : 85, + "date" : "2024-11-03 09:13:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 74, + "Max" : 74, + "Min" : 74, + "date" : "2024-11-03 09:18:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 09:22:00 +1000", + "Max" : 59, + "Min" : 59 + }, + { + "Min" : 69, + "Max" : 69, + "date" : "2024-11-03 09:27:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 69 + }, + { + "date" : "2024-11-03 09:32:00 +1000", + "Avg" : 83, + "Max" : 83, + "Min" : 83, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 63, + "Max" : 63, + "date" : "2024-11-03 09:36:00 +1000", + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "date" : "2024-11-03 09:42:00 +1000", + "Avg" : 74, + "Min" : 74 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 09:47:00 +1000", + "Min" : 76, + "Max" : 76, + "Avg" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 69, + "Max" : 69, + "Min" : 69, + "date" : "2024-11-03 09:51:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 84, + "Avg" : 84, + "Max" : 84, + "date" : "2024-11-03 09:53:00 +1000" + }, + { + "Avg" : 70.799980163574219, + "source" : "Madhava’s Apple Watch", + "Max" : 70.799980163574219, + "date" : "2024-11-03 09:58:00 +1000", + "Min" : 70.799980163574219 + }, + { + "Min" : 68, + "Avg" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 09:59:00 +1000" + }, + { + "Max" : 76, + "Avg" : 76, + "Min" : 76, + "date" : "2024-11-03 10:02:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 10:07:00 +1000", + "Max" : 76, + "source" : "Madhava’s Apple Watch", + "Avg" : 76, + "Min" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "Avg" : 74, + "date" : "2024-11-03 10:14:00 +1000", + "Min" : 74 + }, + { + "Min" : 74, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 10:19:00 +1000", + "Avg" : 74, + "Max" : 74 + }, + { + "Min" : 87, + "date" : "2024-11-03 10:24:00 +1000", + "Avg" : 87, + "source" : "Madhava’s Apple Watch", + "Max" : 87 + }, + { + "Avg" : 93, + "Min" : 93, + "date" : "2024-11-03 10:27:00 +1000", + "Max" : 93, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 10:35:00 +1000", + "Avg" : 85, + "Min" : 85, + "source" : "Madhava’s Apple Watch", + "Max" : 85 + }, + { + "Max" : 92.000000000000014, + "Min" : 92.000000000000014, + "date" : "2024-11-03 10:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 92.000000000000014 + }, + { + "date" : "2024-11-03 10:44:00 +1000", + "Min" : 98, + "Avg" : 98, + "Max" : 98, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 100.00537872314453, + "source" : "Madhava’s Apple Watch", + "Min" : 100.00537872314453, + "Max" : 100.00537872314453, + "date" : "2024-11-03 10:46:00 +1000" + }, + { + "date" : "2024-11-03 10:48:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 100, + "Min" : 100, + "Avg" : 100 + }, + { + "Avg" : 98, + "date" : "2024-11-03 10:52:00 +1000", + "Max" : 98, + "Min" : 98, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 10:57:00 +1000", + "Max" : 103, + "Avg" : 103, + "Min" : 103 + }, + { + "Max" : 104, + "Min" : 104, + "Avg" : 104, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 11:00:00 +1000" + }, + { + "Avg" : 105, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 11:02:00 +1000", + "Max" : 105, + "Min" : 105 + }, + { + "Max" : 105, + "date" : "2024-11-03 11:03:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 105, + "Avg" : 105 + }, + { + "Min" : 109, + "Max" : 109, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 11:05:00 +1000", + "Avg" : 109 + }, + { + "date" : "2024-11-03 11:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 111, + "Avg" : 111, + "Min" : 111 + }, + { + "Min" : 116, + "Avg" : 116, + "date" : "2024-11-03 11:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 116 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 116, + "date" : "2024-11-03 11:16:00 +1000", + "Min" : 116, + "Avg" : 116 + }, + { + "Min" : 112, + "Max" : 112, + "Avg" : 112, + "date" : "2024-11-03 11:18:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 114, + "date" : "2024-11-03 11:21:00 +1000", + "Max" : 114, + "Min" : 114, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 11:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 109, + "Min" : 109, + "Max" : 109 + }, + { + "Avg" : 91, + "date" : "2024-11-03 11:27:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 91, + "Min" : 91 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Avg" : 90, + "Min" : 90, + "date" : "2024-11-03 11:30:00 +1000" + }, + { + "Min" : 85, + "date" : "2024-11-03 11:36:00 +1000", + "Avg" : 85, + "Max" : 85, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 85, + "source" : "Madhava’s Apple Watch", + "Avg" : 85, + "date" : "2024-11-03 11:43:00 +1000", + "Max" : 85 + }, + { + "date" : "2024-11-03 11:50:00 +1000", + "Max" : 87, + "Avg" : 87, + "source" : "Madhava’s Apple Watch", + "Min" : 87 + }, + { + "date" : "2024-11-03 11:52:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 91, + "Min" : 91, + "Avg" : 91 + }, + { + "Min" : 84, + "Max" : 84, + "date" : "2024-11-03 11:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 84 + }, + { + "date" : "2024-11-03 12:01:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "Min" : 84, + "Avg" : 84 + }, + { + "date" : "2024-11-03 12:07:00 +1000", + "Max" : 84, + "Min" : 84, + "source" : "Madhava’s Apple Watch", + "Avg" : 84 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 78.283485412597656, + "Min" : 78.283485412597656, + "Avg" : 78.283485412597656, + "date" : "2024-11-03 12:12:00 +1000" + }, + { + "date" : "2024-11-03 12:14:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "Avg" : 73, + "Max" : 73 + }, + { + "Avg" : 81, + "Max" : 81, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 12:18:00 +1000", + "Min" : 81 + }, + { + "Min" : 76, + "date" : "2024-11-03 12:21:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 76, + "Max" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 12:26:00 +1000", + "Max" : 78, + "Min" : 78, + "Avg" : 78 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 91, + "date" : "2024-11-03 12:31:00 +1000", + "Min" : 91, + "Avg" : 91 + }, + { + "date" : "2024-11-03 12:37:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 73, + "Min" : 73, + "Avg" : 73 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 12:43:00 +1000", + "Max" : 72, + "Min" : 72, + "Avg" : 72 + }, + { + "Max" : 68, + "Avg" : 68, + "Min" : 68, + "date" : "2024-11-03 12:46:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 12:51:00 +1000", + "Max" : 71, + "source" : "Madhava’s Apple Watch", + "Min" : 71, + "Avg" : 71 + }, + { + "Min" : 68, + "date" : "2024-11-03 12:58:00 +1000", + "Avg" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 13:02:00 +1000", + "Avg" : 65, + "Min" : 65, + "Max" : 65 + }, + { + "Min" : 68, + "Avg" : 68, + "date" : "2024-11-03 13:06:00 +1000", + "Max" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 69, + "Min" : 69, + "source" : "Madhava’s Apple Watch", + "Avg" : 69, + "date" : "2024-11-03 13:10:00 +1000" + }, + { + "Max" : 72, + "Min" : 72, + "date" : "2024-11-03 13:18:00 +1000", + "Avg" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "Min" : 66, + "date" : "2024-11-03 13:23:00 +1000" + }, + { + "Max" : 68, + "date" : "2024-11-03 13:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 68, + "Avg" : 68 + }, + { + "Avg" : 58, + "date" : "2024-11-03 13:30:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58 + }, + { + "date" : "2024-11-03 13:37:00 +1000", + "Max" : 66, + "Avg" : 66, + "Min" : 66, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 88, + "Avg" : 88, + "Max" : 88, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 13:45:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 76, + "date" : "2024-11-03 13:47:00 +1000", + "Avg" : 76, + "Min" : 76 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "Max" : 69, + "Avg" : 69, + "date" : "2024-11-03 13:53:00 +1000" + }, + { + "Max" : 64, + "Min" : 64, + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 13:59:00 +1000" + }, + { + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 14:01:00 +1000", + "Min" : 59, + "Max" : 59 + }, + { + "Avg" : 65, + "date" : "2024-11-03 14:07:00 +1000", + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65 + }, + { + "date" : "2024-11-03 14:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 69.454452514648452, + "Min" : 63, + "Avg" : 66.227226257324219 + }, + { + "date" : "2024-11-03 14:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Avg" : 66, + "Min" : 66 + }, + { + "Max" : 66, + "Min" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 14:23:00 +1000" + }, + { + "date" : "2024-11-03 14:26:00 +1000", + "Max" : 64, + "Avg" : 64, + "Min" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 14:38:00 +1000", + "Max" : 94, + "Min" : 94, + "Avg" : 94, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 14:39:00 +1000", + "Avg" : 90, + "Max" : 90, + "Min" : 90, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 76, + "date" : "2024-11-03 15:05:00 +1000", + "Max" : 76, + "Min" : 76, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 70, + "Avg" : 70, + "date" : "2024-11-03 15:12:00 +1000", + "Min" : 70, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 74, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 15:17:00 +1000", + "Min" : 68, + "Avg" : 70.683333344260845 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 70, + "Avg" : 71, + "date" : "2024-11-03 15:18:00 +1000", + "Max" : 72 + }, + { + "Avg" : 98, + "Max" : 98, + "date" : "2024-11-03 15:34:00 +1000", + "Min" : 98, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 15:35:00 +1000", + "Min" : 91, + "Max" : 91, + "Avg" : 91 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "date" : "2024-11-03 15:36:00 +1000", + "Min" : 83, + "Avg" : 86.934426234393811 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 94, + "Min" : 94, + "Avg" : 94, + "date" : "2024-11-03 15:59:00 +1000" + }, + { + "Avg" : 85, + "Min" : 85, + "Max" : 85, + "date" : "2024-11-03 16:14:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 16:18:00 +1000", + "Avg" : 75, + "source" : "Madhava’s Apple Watch", + "Max" : 75, + "Min" : 75 + }, + { + "date" : "2024-11-03 16:22:00 +1000", + "Min" : 84, + "Max" : 84, + "Avg" : 84, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 16:26:00 +1000", + "Min" : 86, + "Avg" : 86, + "Max" : 86 + }, + { + "Avg" : 79, + "Min" : 79, + "Max" : 79, + "date" : "2024-11-03 16:33:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 16:36:00 +1000", + "Avg" : 77.445884704589844, + "Max" : 77.445884704589844, + "source" : "Madhava’s Apple Watch", + "Min" : 77.445884704589844 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 80, + "Min" : 80, + "date" : "2024-11-03 16:39:00 +1000", + "Avg" : 80 + }, + { + "date" : "2024-11-03 16:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 74, + "Min" : 74, + "Max" : 74 + }, + { + "Max" : 79, + "date" : "2024-11-03 16:47:00 +1000", + "Min" : 79, + "Avg" : 79, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Avg" : 74, + "date" : "2024-11-03 16:51:00 +1000", + "Max" : 74 + }, + { + "Max" : 75, + "Min" : 75, + "Avg" : 75, + "date" : "2024-11-03 16:55:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Avg" : 72, + "date" : "2024-11-03 17:01:00 +1000", + "Max" : 72 + }, + { + "Avg" : 93, + "date" : "2024-11-03 17:10:00 +1000", + "Max" : 93, + "source" : "Madhava’s Apple Watch", + "Min" : 93 + }, + { + "Min" : 87, + "source" : "Madhava’s Apple Watch", + "Avg" : 87, + "date" : "2024-11-03 17:13:00 +1000", + "Max" : 87 + }, + { + "Min" : 84, + "source" : "Madhava’s Apple Watch", + "Avg" : 84, + "date" : "2024-11-03 17:15:00 +1000", + "Max" : 84 + }, + { + "Min" : 81, + "date" : "2024-11-03 17:23:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 81, + "Max" : 81 + }, + { + "Min" : 90, + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "date" : "2024-11-03 17:26:00 +1000", + "Avg" : 90 + }, + { + "Avg" : 77.000000000000014, + "date" : "2024-11-03 17:34:00 +1000", + "Min" : 77.000000000000014, + "source" : "Madhava’s Apple Watch", + "Max" : 77.000000000000014 + }, + { + "Min" : 80, + "Avg" : 80, + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 17:37:00 +1000" + }, + { + "Max" : 99, + "source" : "Madhava’s Apple Watch", + "Avg" : 91.242424242424235, + "date" : "2024-11-03 17:47:00 +1000", + "Min" : 85 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 87, + "Avg" : 87, + "date" : "2024-11-03 17:51:00 +1000", + "Max" : 87 + }, + { + "Max" : 95, + "Avg" : 92.318181818181813, + "Min" : 90, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 17:56:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 88, + "date" : "2024-11-03 17:57:00 +1000", + "Avg" : 89.053571428571431, + "Max" : 90 + }, + { + "date" : "2024-11-03 18:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 92.000000000000014, + "Avg" : 100.60000000000001, + "Max" : 111 + }, + { + "Min" : 89, + "date" : "2024-11-03 18:03:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 89, + "Max" : 89 + }, + { + "date" : "2024-11-03 18:05:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 103, + "Min" : 91, + "Avg" : 97 + }, + { + "date" : "2024-11-03 18:06:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 86, + "Avg" : 88.29184549179972, + "Max" : 89 + }, + { + "Max" : 104, + "Avg" : 86.683544303797461, + "date" : "2024-11-03 18:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 76 + }, + { + "Max" : 91, + "source" : "Madhava’s Apple Watch", + "Avg" : 86.762376237623755, + "date" : "2024-11-03 18:09:00 +1000", + "Min" : 81 + }, + { + "Max" : 94, + "Min" : 88, + "date" : "2024-11-03 18:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 90.939393939393938 + }, + { + "Avg" : 72, + "date" : "2024-11-03 18:12:00 +1000", + "Max" : 72, + "source" : "Madhava’s Apple Watch", + "Min" : 72 + }, + { + "Max" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Avg" : 92.000000000000014, + "date" : "2024-11-03 18:15:00 +1000", + "Min" : 92.000000000000014 + }, + { + "Avg" : 82, + "Min" : 82, + "source" : "Madhava’s Apple Watch", + "Max" : 82, + "date" : "2024-11-03 18:30:00 +1000" + }, + { + "Max" : 87, + "source" : "Madhava’s Apple Watch", + "Avg" : 87, + "date" : "2024-11-03 18:32:00 +1000", + "Min" : 87 + }, + { + "Avg" : 94, + "source" : "Madhava’s Apple Watch", + "Max" : 94, + "date" : "2024-11-03 18:36:00 +1000", + "Min" : 94 + }, + { + "date" : "2024-11-03 18:45:00 +1000", + "Min" : 104, + "Avg" : 104, + "source" : "Madhava’s Apple Watch", + "Max" : 104 + }, + { + "Min" : 97, + "Avg" : 97, + "Max" : 97, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 18:47:00 +1000" + }, + { + "Avg" : 93, + "date" : "2024-11-03 18:54:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 93, + "Min" : 93 + }, + { + "Max" : 92.000000000000014, + "date" : "2024-11-03 18:59:00 +1000", + "Avg" : 92.000000000000014, + "source" : "Madhava’s Apple Watch", + "Min" : 92.000000000000014 + }, + { + "Avg" : 86, + "Max" : 86, + "source" : "Madhava’s Apple Watch", + "Min" : 86, + "date" : "2024-11-03 19:04:00 +1000" + }, + { + "Max" : 85, + "date" : "2024-11-03 19:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 85, + "Avg" : 85 + }, + { + "Max" : 78.163681030273438, + "date" : "2024-11-03 19:15:00 +1000", + "Avg" : 76.581840515136719, + "source" : "Madhava’s Apple Watch", + "Min" : 75 + }, + { + "Avg" : 76, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 19:20:00 +1000", + "Max" : 76, + "Min" : 76 + }, + { + "Min" : 78, + "Max" : 78, + "Avg" : 78, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 19:25:00 +1000" + }, + { + "date" : "2024-11-03 19:26:00 +1000", + "Min" : 75, + "source" : "Madhava’s Apple Watch", + "Avg" : 75, + "Max" : 75 + }, + { + "Max" : 73, + "Min" : 73, + "date" : "2024-11-03 19:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 73 + }, + { + "Min" : 74, + "Max" : 74, + "Avg" : 74, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 19:37:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 74.412487030029297, + "Max" : 75.824974060058594, + "Min" : 73, + "date" : "2024-11-03 19:40:00 +1000" + }, + { + "date" : "2024-11-03 19:42:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 74, + "Max" : 74, + "Avg" : 74 + }, + { + "Max" : 69, + "Min" : 69, + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 19:47:00 +1000" + }, + { + "Max" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 19:50:00 +1000", + "Min" : 72 + }, + { + "Max" : 73, + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "Avg" : 73, + "date" : "2024-11-03 19:56:00 +1000" + }, + { + "Min" : 80, + "date" : "2024-11-03 20:01:00 +1000", + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "Avg" : 80 + }, + { + "Min" : 95, + "source" : "Madhava’s Apple Watch", + "Max" : 95, + "date" : "2024-11-03 20:08:00 +1000", + "Avg" : 95 + }, + { + "Max" : 70, + "Min" : 70, + "Avg" : 70, + "date" : "2024-11-03 20:13:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 20:19:00 +1000", + "Min" : 74, + "Avg" : 74, + "Max" : 74, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-03 20:22:00 +1000", + "Min" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "Max" : 73 + }, + { + "Min" : 66, + "Avg" : 66, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 20:30:00 +1000", + "Max" : 66 + }, + { + "Max" : 67, + "date" : "2024-11-03 20:31:00 +1000", + "Min" : 67, + "Avg" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 67, + "Avg" : 67, + "date" : "2024-11-03 20:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67 + }, + { + "date" : "2024-11-03 20:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67, + "Max" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 68, + "Max" : 68, + "date" : "2024-11-03 20:48:00 +1000", + "Avg" : 68 + }, + { + "Avg" : 71, + "Min" : 71, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 20:55:00 +1000", + "Max" : 71 + }, + { + "Max" : 68, + "Avg" : 68, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 20:56:00 +1000", + "Min" : 68 + }, + { + "Min" : 70, + "Avg" : 70, + "Max" : 70, + "date" : "2024-11-03 21:01:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "date" : "2024-11-03 21:06:00 +1000", + "Max" : 72, + "Min" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "date" : "2024-11-03 21:14:00 +1000", + "Avg" : 71, + "Min" : 71 + }, + { + "Max" : 72, + "date" : "2024-11-03 21:17:00 +1000", + "Min" : 72, + "Avg" : 72, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 72, + "Avg" : 72, + "date" : "2024-11-03 21:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 72 + }, + { + "Avg" : 69, + "Min" : 69, + "source" : "Madhava’s Apple Watch", + "Max" : 69, + "date" : "2024-11-03 21:30:00 +1000" + }, + { + "Avg" : 67, + "date" : "2024-11-03 21:34:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Max" : 67 + }, + { + "Min" : 67, + "Avg" : 67, + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "date" : "2024-11-03 21:38:00 +1000" + }, + { + "date" : "2024-11-03 21:40:00 +1000", + "Min" : 68.21722412109375, + "source" : "Madhava’s Apple Watch", + "Max" : 70, + "Avg" : 69.108612060546875 + }, + { + "Min" : 67, + "Avg" : 67, + "Max" : 67, + "date" : "2024-11-03 21:42:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 68, + "Max" : 68, + "date" : "2024-11-03 21:46:00 +1000", + "Min" : 68, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 65, + "Max" : 65, + "date" : "2024-11-03 21:50:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 65 + }, + { + "Max" : 64, + "date" : "2024-11-03 21:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 64, + "Avg" : 64 + }, + { + "Min" : 72, + "source" : "Madhava’s Apple Watch", + "Avg" : 72, + "Max" : 72, + "date" : "2024-11-03 22:03:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Avg" : 72, + "Max" : 72, + "date" : "2024-11-03 22:06:00 +1000" + }, + { + "Min" : 68, + "Avg" : 68, + "date" : "2024-11-03 22:12:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 68 + }, + { + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "Avg" : 68, + "date" : "2024-11-03 22:17:00 +1000", + "Min" : 68 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-03 22:20:00 +1000", + "Avg" : 63, + "Min" : 63 + }, + { + "Max" : 67, + "date" : "2024-11-03 22:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67 + }, + { + "Min" : 71, + "Max" : 71, + "Avg" : 71, + "date" : "2024-11-03 22:30:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 65, + "Min" : 65, + "source" : "Madhava’s Apple Watch", + "Avg" : 65, + "date" : "2024-11-03 22:34:00 +1000" + }, + { + "Min" : 69, + "Max" : 69, + "date" : "2024-11-03 22:35:00 +1000", + "Avg" : 69, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 63, + "date" : "2024-11-03 22:41:00 +1000", + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 63, + "Min" : 63, + "date" : "2024-11-03 22:46:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 63 + }, + { + "date" : "2024-11-03 22:50:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Avg" : 60, + "Min" : 60 + }, + { + "date" : "2024-11-03 22:55:00 +1000", + "Max" : 61, + "Avg" : 61, + "Min" : 61, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 60, + "date" : "2024-11-03 22:56:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Avg" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-03 23:01:00 +1000", + "Min" : 63, + "Avg" : 63 + }, + { + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63, + "date" : "2024-11-03 23:05:00 +1000" + }, + { + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 23:10:00 +1000", + "Min" : 57, + "Avg" : 57 + }, + { + "Avg" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "date" : "2024-11-03 23:17:00 +1000" + }, + { + "date" : "2024-11-03 23:20:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58 + }, + { + "date" : "2024-11-03 23:25:00 +1000", + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "Max" : 55, + "date" : "2024-11-03 23:29:00 +1000", + "Min" : 55 + }, + { + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "date" : "2024-11-03 23:31:00 +1000", + "Max" : 55 + }, + { + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Max" : 55, + "Avg" : 55, + "date" : "2024-11-03 23:39:00 +1000" + }, + { + "Min" : 54.459995269775391, + "source" : "Madhava’s Apple Watch", + "Max" : 54.459995269775391, + "Avg" : 54.459995269775391, + "date" : "2024-11-03 23:40:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 23:41:00 +1000", + "Avg" : 55, + "Max" : 55, + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 57, + "Min" : 57, + "date" : "2024-11-03 23:48:00 +1000", + "Avg" : 57 + }, + { + "Min" : 65, + "date" : "2024-11-03 23:50:00 +1000", + "Avg" : 65, + "Max" : 65, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 67, + "date" : "2024-11-03 23:55:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67 + }, + { + "Max" : 67, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-03 23:56:00 +1000", + "Avg" : 67, + "Min" : 67 + }, + { + "Max" : 62.000000000000007, + "Min" : 62.000000000000007, + "date" : "2024-11-04 00:02:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 62.000000000000007 + }, + { + "Avg" : 60, + "Max" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 00:05:00 +1000" + }, + { + "Max" : 60, + "Avg" : 60, + "Min" : 60, + "date" : "2024-11-04 00:14:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 58, + "date" : "2024-11-04 00:19:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Avg" : 58 + }, + { + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "date" : "2024-11-04 00:22:00 +1000" + }, + { + "Max" : 59, + "Min" : 59, + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 00:25:00 +1000" + }, + { + "Max" : 58, + "date" : "2024-11-04 00:29:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "Avg" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "date" : "2024-11-04 00:30:00 +1000", + "Min" : 58, + "Avg" : 58 + }, + { + "Min" : 63, + "date" : "2024-11-04 00:38:00 +1000", + "Avg" : 63, + "Max" : 63, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 66, + "Min" : 66, + "Avg" : 66, + "date" : "2024-11-04 00:41:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-04 00:49:00 +1000", + "Min" : 63, + "Avg" : 63 + }, + { + "date" : "2024-11-04 00:50:00 +1000", + "Min" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Min" : 61, + "Avg" : 61.999999999999993, + "date" : "2024-11-04 00:56:00 +1000" + }, + { + "Max" : 63, + "date" : "2024-11-04 01:03:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63, + "date" : "2024-11-04 01:08:00 +1000", + "Min" : 63 + }, + { + "Max" : 60, + "Min" : 60, + "Avg" : 60, + "date" : "2024-11-04 01:12:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "date" : "2024-11-04 01:15:00 +1000", + "Max" : 60, + "Avg" : 60 + }, + { + "date" : "2024-11-04 01:22:00 +1000", + "Max" : 63, + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Min" : 63 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 01:26:00 +1000", + "Max" : 59, + "Min" : 59, + "Avg" : 59 + }, + { + "date" : "2024-11-04 01:29:00 +1000", + "Max" : 60, + "Avg" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 01:35:00 +1000", + "Min" : 60, + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "Max" : 60 + }, + { + "date" : "2024-11-04 01:36:00 +1000", + "Max" : 59, + "Min" : 59, + "source" : "Madhava’s Apple Watch", + "Avg" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 60.598018646240234, + "Avg" : 60.598018646240234, + "date" : "2024-11-04 01:40:00 +1000", + "Max" : 60.598018646240234 + }, + { + "Avg" : 60, + "date" : "2024-11-04 01:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 60, + "Min" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 58, + "Avg" : 58, + "date" : "2024-11-04 01:48:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 58, + "date" : "2024-11-04 01:50:00 +1000", + "Avg" : 58, + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 58, + "date" : "2024-11-04 01:56:00 +1000", + "Min" : 57, + "Max" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 02:05:00 +1000", + "Min" : 53, + "Avg" : 53, + "Max" : 53 + }, + { + "Min" : 63, + "Avg" : 63, + "Max" : 63, + "date" : "2024-11-04 02:09:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 02:13:00 +1000", + "Avg" : 58, + "Min" : 58, + "source" : "Madhava’s Apple Watch", + "Max" : 58 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "date" : "2024-11-04 02:17:00 +1000", + "Min" : 59, + "Max" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "date" : "2024-11-04 02:21:00 +1000", + "Avg" : 55, + "Max" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "Avg" : 63, + "Min" : 63, + "date" : "2024-11-04 02:26:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 59, + "Avg" : 59, + "Min" : 59, + "date" : "2024-11-04 02:31:00 +1000" + }, + { + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "Avg" : 57, + "date" : "2024-11-04 02:35:00 +1000", + "Min" : 57 + }, + { + "Max" : 56, + "date" : "2024-11-04 02:42:00 +1000", + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 54, + "date" : "2024-11-04 02:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 54, + "Min" : 54 + }, + { + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 02:53:00 +1000", + "Min" : 54, + "Avg" : 54 + }, + { + "Avg" : 54, + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Min" : 54, + "date" : "2024-11-04 02:56:00 +1000" + }, + { + "Min" : 54, + "Avg" : 54, + "Max" : 54, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 02:57:00 +1000" + }, + { + "Min" : 52, + "Avg" : 52, + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "date" : "2024-11-04 03:03:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 03:05:00 +1000", + "Max" : 53, + "Min" : 53, + "Avg" : 53 + }, + { + "date" : "2024-11-04 03:13:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52, + "Max" : 52 + }, + { + "Max" : 51, + "Min" : 51, + "Avg" : 51, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 03:16:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 52, + "Min" : 52, + "Avg" : 52, + "date" : "2024-11-04 03:23:00 +1000" + }, + { + "date" : "2024-11-04 03:26:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 56, + "Min" : 56, + "Avg" : 56 + }, + { + "Min" : 53, + "date" : "2024-11-04 03:28:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 53, + "Max" : 53 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 44, + "Max" : 44, + "Avg" : 44, + "date" : "2024-11-04 03:32:00 +1000" + }, + { + "Max" : 49, + "date" : "2024-11-04 03:38:00 +1000", + "Min" : 49, + "Avg" : 49, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 55.580982208251953, + "source" : "Madhava’s Apple Watch", + "Min" : 55.580982208251953, + "Avg" : 55.580982208251953, + "date" : "2024-11-04 03:40:00 +1000" + }, + { + "Avg" : 58, + "date" : "2024-11-04 03:44:00 +1000", + "Max" : 58, + "source" : "Madhava’s Apple Watch", + "Min" : 58 + }, + { + "Min" : 48, + "Max" : 48, + "date" : "2024-11-04 03:48:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 48 + }, + { + "date" : "2024-11-04 03:51:00 +1000", + "Avg" : 50, + "source" : "Madhava’s Apple Watch", + "Max" : 50, + "Min" : 50 + }, + { + "Avg" : 54, + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Min" : 54, + "date" : "2024-11-04 03:56:00 +1000" + }, + { + "date" : "2024-11-04 04:00:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 50, + "Max" : 50, + "Avg" : 50 + }, + { + "Max" : 48, + "source" : "Madhava’s Apple Watch", + "Min" : 48, + "date" : "2024-11-04 04:02:00 +1000", + "Avg" : 48 + }, + { + "Avg" : 49, + "Max" : 49, + "date" : "2024-11-04 04:08:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 49 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 49, + "Max" : 49, + "date" : "2024-11-04 04:11:00 +1000", + "Avg" : 49 + }, + { + "Avg" : 49, + "Min" : 49, + "date" : "2024-11-04 04:18:00 +1000", + "Max" : 49, + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 55, + "Max" : 55, + "Min" : 55, + "date" : "2024-11-04 04:24:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 04:26:00 +1000", + "Avg" : 62.000000000000007, + "Max" : 66, + "Min" : 58 + }, + { + "Min" : 59, + "date" : "2024-11-04 04:30:00 +1000", + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "Max" : 59 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "date" : "2024-11-04 04:36:00 +1000", + "Min" : 52, + "Max" : 52 + }, + { + "Avg" : 54, + "date" : "2024-11-04 04:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 54, + "Min" : 54 + }, + { + "Min" : 55, + "date" : "2024-11-04 04:47:00 +1000", + "Max" : 55, + "source" : "Madhava’s Apple Watch", + "Avg" : 55 + }, + { + "date" : "2024-11-04 04:50:00 +1000", + "Min" : 54, + "Max" : 54, + "Avg" : 54, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 04:58:00 +1000", + "Max" : 49, + "source" : "Madhava’s Apple Watch", + "Min" : 49, + "Avg" : 49 + }, + { + "Max" : 55, + "Min" : 55, + "date" : "2024-11-04 05:04:00 +1000", + "Avg" : 55, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 53, + "date" : "2024-11-04 05:06:00 +1000", + "Max" : 53, + "Avg" : 53, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 46.000000000000007, + "Min" : 46.000000000000007, + "date" : "2024-11-04 05:11:00 +1000", + "Avg" : 46.000000000000007 + }, + { + "date" : "2024-11-04 05:19:00 +1000", + "Max" : 71, + "Min" : 71, + "Avg" : 71, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Min" : 67, + "Avg" : 67, + "date" : "2024-11-04 05:24:00 +1000" + }, + { + "Min" : 57, + "Avg" : 57, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 05:28:00 +1000", + "Max" : 57 + }, + { + "Avg" : 64, + "date" : "2024-11-04 05:29:00 +1000", + "Min" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64 + }, + { + "Max" : 55, + "date" : "2024-11-04 05:33:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 55, + "Min" : 55 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 05:39:00 +1000", + "Min" : 59, + "Avg" : 59, + "Max" : 59 + }, + { + "Avg" : 59.124446868896484, + "date" : "2024-11-04 05:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59.124446868896484, + "Max" : 59.124446868896484 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 60, + "Avg" : 60, + "Max" : 60, + "date" : "2024-11-04 05:44:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "date" : "2024-11-04 05:46:00 +1000", + "Min" : 68, + "Avg" : 68 + }, + { + "Max" : 67, + "Avg" : 67, + "date" : "2024-11-04 05:50:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-04 05:55:00 +1000", + "Min" : 63, + "Avg" : 63 + }, + { + "date" : "2024-11-04 06:04:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63, + "Avg" : 63 + }, + { + "Min" : 64, + "Max" : 64, + "date" : "2024-11-04 06:05:00 +1000", + "Avg" : 64, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 06:08:00 +1000", + "Min" : 69, + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "Max" : 69 + }, + { + "date" : "2024-11-04 06:14:00 +1000", + "Max" : 71, + "source" : "Madhava’s Apple Watch", + "Min" : 71, + "Avg" : 71 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 06:15:00 +1000", + "Min" : 63, + "Max" : 63, + "Avg" : 63 + }, + { + "Min" : 66, + "Max" : 66, + "Avg" : 66, + "date" : "2024-11-04 06:58:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Avg" : 59, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 07:04:00 +1000", + "Max" : 59, + "Min" : 59 + }, + { + "date" : "2024-11-04 07:07:00 +1000", + "Max" : 65, + "Min" : 65, + "source" : "Madhava’s Apple Watch", + "Avg" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66, + "date" : "2024-11-04 07:13:00 +1000", + "Max" : 66 + }, + { + "Max" : 60, + "Avg" : 60, + "Min" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 07:17:00 +1000" + }, + { + "Avg" : 71, + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Min" : 71, + "date" : "2024-11-04 07:23:00 +1000" + }, + { + "date" : "2024-11-04 07:27:00 +1000", + "Avg" : 73, + "source" : "Madhava’s Apple Watch", + "Min" : 73, + "Max" : 73 + }, + { + "date" : "2024-11-04 07:34:00 +1000", + "Min" : 69, + "Avg" : 69, + "source" : "Madhava’s Apple Watch", + "Max" : 69 + }, + { + "Avg" : 72, + "date" : "2024-11-04 07:40:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 72, + "Max" : 72 + }, + { + "date" : "2024-11-04 07:42:00 +1000", + "Avg" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "Min" : 68 + }, + { + "Max" : 70, + "date" : "2024-11-04 07:47:00 +1000", + "Min" : 70, + "source" : "Madhava’s Apple Watch", + "Avg" : 70 + }, + { + "Min" : 70, + "date" : "2024-11-04 07:52:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 70, + "Avg" : 70 + }, + { + "date" : "2024-11-04 07:57:00 +1000", + "Min" : 67, + "Avg" : 67, + "Max" : 67, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 08:01:00 +1000", + "Max" : 69, + "source" : "Madhava’s Apple Watch", + "Min" : 69, + "Avg" : 69 + }, + { + "Avg" : 87, + "date" : "2024-11-04 08:09:00 +1000", + "Max" : 87, + "Min" : 87, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 08:11:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 93, + "Max" : 93, + "Avg" : 93 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 85, + "Avg" : 84.365671641791053, + "Min" : 80, + "date" : "2024-11-04 08:13:00 +1000" + }, + { + "Avg" : 86, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 08:14:00 +1000", + "Max" : 86, + "Min" : 86 + }, + { + "date" : "2024-11-04 08:15:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 84, + "Max" : 84, + "Min" : 84 + }, + { + "Max" : 92.000000000000014, + "date" : "2024-11-04 08:16:00 +1000", + "Min" : 92.000000000000014, + "Avg" : 92.000000000000014, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 08:17:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 97, + "Avg" : 91.939999999999998, + "Min" : 87 + }, + { + "date" : "2024-11-04 08:20:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 82, + "Min" : 82, + "Avg" : 82 + }, + { + "Min" : 80, + "date" : "2024-11-04 08:22:00 +1000", + "Max" : 80, + "source" : "Madhava’s Apple Watch", + "Avg" : 80 + }, + { + "date" : "2024-11-04 08:25:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 97.178571428571431, + "Max" : 99, + "Min" : 94 + }, + { + "Min" : 87, + "Max" : 100, + "source" : "Madhava’s Apple Watch", + "Avg" : 93.443877551020407, + "date" : "2024-11-04 08:26:00 +1000" + }, + { + "Max" : 96, + "Min" : 87, + "Avg" : 93.108433734939766, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 08:27:00 +1000" + }, + { + "Max" : 99, + "Avg" : 92.990099009900987, + "date" : "2024-11-04 08:28:00 +1000", + "Min" : 88, + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 90, + "source" : "Madhava’s Apple Watch", + "Avg" : 91.769230769230759, + "date" : "2024-11-04 08:29:00 +1000", + "Max" : 93 + }, + { + "date" : "2024-11-04 08:30:00 +1000", + "Max" : 100, + "source" : "Madhava’s Apple Watch", + "Min" : 89, + "Avg" : 93.78125 + }, + { + "date" : "2024-11-04 08:31:00 +1000", + "Avg" : 94.010309278350505, + "source" : "Madhava’s Apple Watch", + "Max" : 95, + "Min" : 91 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 93, + "Avg" : 96.819767441860463, + "Max" : 99, + "date" : "2024-11-04 08:32:00 +1000" + }, + { + "date" : "2024-11-04 08:33:00 +1000", + "Avg" : 94, + "Min" : 94, + "Max" : 94, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 08:37:00 +1000", + "Max" : 91, + "Avg" : 91, + "source" : "Madhava’s Apple Watch", + "Min" : 91 + }, + { + "date" : "2024-11-04 08:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 84, + "Min" : 84, + "Avg" : 84 + }, + { + "Avg" : 71.301628112792969, + "date" : "2024-11-04 08:43:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 72, + "Min" : 70.603256225585938 + }, + { + "date" : "2024-11-04 08:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 68, + "Min" : 68, + "Avg" : 68 + }, + { + "Avg" : 63, + "source" : "Madhava’s Apple Watch", + "Max" : 63, + "date" : "2024-11-04 08:55:00 +1000", + "Min" : 63 + }, + { + "date" : "2024-11-04 09:00:00 +1000", + "Avg" : 64, + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "Min" : 64 + }, + { + "Min" : 68, + "Avg" : 68, + "Max" : 68, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 09:03:00 +1000" + }, + { + "Max" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "date" : "2024-11-04 09:08:00 +1000" + }, + { + "Max" : 67, + "source" : "Madhava’s Apple Watch", + "Avg" : 67, + "Min" : 67, + "date" : "2024-11-04 09:12:00 +1000" + }, + { + "date" : "2024-11-04 09:15:00 +1000", + "source" : "Madhava’s Apple Watch", + "Avg" : 68, + "Max" : 68, + "Min" : 68 + }, + { + "date" : "2024-11-04 09:25:00 +1000", + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Avg" : 65 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 60, + "Min" : 60, + "date" : "2024-11-04 09:30:00 +1000", + "Max" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Max" : 63, + "Avg" : 63, + "date" : "2024-11-04 09:35:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 71, + "Avg" : 71, + "date" : "2024-11-04 09:39:00 +1000", + "Min" : 71 + }, + { + "date" : "2024-11-04 09:45:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 67, + "Avg" : 67, + "Min" : 67 + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 76, + "Max" : 76, + "Avg" : 76, + "date" : "2024-11-04 09:50:00 +1000" + }, + { + "Min" : 75, + "Avg" : 75, + "date" : "2024-11-04 09:53:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 75 + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 09:56:00 +1000", + "Max" : 72, + "Avg" : 72, + "Min" : 72 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 81, + "Min" : 81, + "date" : "2024-11-04 10:01:00 +1000", + "Avg" : 81 + }, + { + "Max" : 65, + "Avg" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "date" : "2024-11-04 10:09:00 +1000" + }, + { + "date" : "2024-11-04 10:14:00 +1000", + "Max" : 65, + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Avg" : 65 + }, + { + "date" : "2024-11-04 10:16:00 +1000", + "Max" : 66, + "source" : "Madhava’s Apple Watch", + "Min" : 66, + "Avg" : 66 + }, + { + "Max" : 64, + "source" : "Madhava’s Apple Watch", + "Min" : 63.458126068115234, + "Avg" : 63.729063034057624, + "date" : "2024-11-04 10:21:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 63, + "Avg" : 63, + "date" : "2024-11-04 10:29:00 +1000", + "Max" : 63 + }, + { + "Max" : 57, + "source" : "Madhava’s Apple Watch", + "Min" : 57, + "Avg" : 57, + "date" : "2024-11-04 10:32:00 +1000" + }, + { + "Max" : 59, + "Avg" : 59, + "date" : "2024-11-04 10:38:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59 + }, + { + "Avg" : 60, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 10:43:00 +1000", + "Min" : 60, + "Max" : 60 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 59, + "Max" : 59, + "Min" : 59, + "date" : "2024-11-04 10:45:00 +1000" + }, + { + "Max" : 66, + "Min" : 54, + "source" : "Madhava’s Apple Watch", + "Avg" : 58.866666666666674, + "date" : "2024-11-04 10:48:00 +1000" + }, + { + "Max" : 67, + "Min" : 55, + "Avg" : 59.982558146465657, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 10:49:00 +1000" + }, + { + "Max" : 61, + "date" : "2024-11-04 10:50:00 +1000", + "Avg" : 57.023255813953483, + "source" : "Madhava’s Apple Watch", + "Min" : 54 + }, + { + "Min" : 55, + "source" : "Madhava’s Apple Watch", + "Max" : 64, + "Avg" : 57.961529382419137, + "date" : "2024-11-04 10:51:00 +1000" + }, + { + "Max" : 61, + "Min" : 57, + "Avg" : 58.31240581269315, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 10:52:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 58, + "Min" : 55, + "Avg" : 56.610465116279066, + "date" : "2024-11-04 10:53:00 +1000" + }, + { + "Max" : 70, + "date" : "2024-11-04 10:54:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 59, + "Avg" : 64.137500005420293 + }, + { + "Avg" : 56, + "Max" : 56, + "Min" : 56, + "date" : "2024-11-04 10:55:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Min" : 69, + "date" : "2024-11-04 10:56:00 +1000", + "Max" : 83, + "source" : "Madhava’s Apple Watch", + "Avg" : 76 + }, + { + "Avg" : 72.396419437340157, + "date" : "2024-11-04 10:57:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 65, + "Max" : 81 + }, + { + "Min" : 59, + "Avg" : 66.77860696517412, + "date" : "2024-11-04 10:58:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 80 + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 61.884816752678439, + "date" : "2024-11-04 10:59:00 +1000", + "Min" : 58, + "Max" : 68 + }, + { + "Avg" : 55.314285714285717, + "Max" : 59, + "Min" : 52, + "date" : "2024-11-04 11:00:00 +1000", + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 76, + "date" : "2024-11-04 11:01:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 51, + "Avg" : 62.665990011691875 + }, + { + "Max" : 70, + "date" : "2024-11-04 11:02:00 +1000", + "Avg" : 64.58425584255842, + "source" : "Madhava’s Apple Watch", + "Min" : 61 + }, + { + "date" : "2024-11-04 11:03:00 +1000", + "Max" : 66, + "Avg" : 65.500000000000014, + "source" : "Madhava’s Apple Watch", + "Min" : 65 + }, + { + "date" : "2024-11-04 11:13:00 +1000", + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "Min" : 52 + }, + { + "date" : "2024-11-04 11:17:00 +1000", + "Max" : 56, + "Min" : 56, + "Avg" : 56, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 52, + "source" : "Madhava’s Apple Watch", + "Min" : 52, + "Avg" : 52, + "date" : "2024-11-04 11:24:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 52, + "Max" : 52, + "date" : "2024-11-04 11:25:00 +1000", + "Min" : 52 + }, + { + "date" : "2024-11-04 11:30:00 +1000", + "Max" : 57, + "Avg" : 57, + "Min" : 57, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 11:35:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 55, + "Avg" : 55, + "Max" : 55 + }, + { + "Max" : 67, + "date" : "2024-11-04 11:49:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 67, + "Avg" : 67 + }, + { + "Max" : 78, + "Min" : 78, + "date" : "2024-11-04 11:55:00 +1000", + "Avg" : 78, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 74, + "date" : "2024-11-04 11:59:00 +1000", + "Avg" : 74, + "Min" : 74 + }, + { + "Max" : 82, + "Min" : 76, + "Avg" : 79, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 12:05:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 12:07:00 +1000", + "Max" : 78, + "Avg" : 78, + "Min" : 78 + }, + { + "date" : "2024-11-04 12:10:00 +1000", + "source" : "Madhava’s Apple Watch", + "Max" : 81, + "Min" : 81, + "Avg" : 81 + }, + { + "date" : "2024-11-04 12:14:00 +1000", + "Avg" : 80, + "Min" : 80, + "Max" : 80, + "source" : "Madhava’s Apple Watch" + }, + { + "source" : "Madhava’s Apple Watch", + "Min" : 91, + "Max" : 91, + "date" : "2024-11-04 12:18:00 +1000", + "Avg" : 91 + }, + { + "date" : "2024-11-04 12:22:00 +1000", + "Max" : 88, + "Avg" : 88, + "Min" : 88, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 66, + "Min" : 66, + "source" : "Madhava’s Apple Watch", + "Avg" : 66, + "date" : "2024-11-04 12:27:00 +1000" + }, + { + "source" : "Madhava’s Apple Watch", + "Avg" : 98.5, + "Min" : 98, + "Max" : 99, + "date" : "2024-11-04 12:29:00 +1000" + }, + { + "Min" : 99, + "Avg" : 100.98412698412697, + "date" : "2024-11-04 12:30:00 +1000", + "Max" : 103, + "source" : "Madhava’s Apple Watch" + }, + { + "Max" : 108, + "Min" : 104, + "Avg" : 105.9818181818182, + "source" : "Madhava’s Apple Watch", + "date" : "2024-11-04 12:31:00 +1000" + }, + { + "date" : "2024-11-04 12:34:00 +1000", + "Avg" : 93, + "source" : "Madhava’s Apple Watch", + "Min" : 93, + "Max" : 93 + }, + { + "source" : "Madhava’s Apple Watch", + "Max" : 90, + "Min" : 90, + "date" : "2024-11-04 12:35:00 +1000", + "Avg" : 90 + }, + { + "date" : "2024-11-04 12:41:00 +1000", + "source" : "Madhava’s Apple Watch", + "Min" : 93, + "Avg" : 93, + "Max" : 93 + }, + { + "Min" : 90, + "Max" : 90, + "date" : "2024-11-04 12:49:00 +1000", + "Avg" : 90, + "source" : "Madhava’s Apple Watch" + }, + { + "date" : "2024-11-04 12:52:00 +1000", + "Max" : 73, + "Min" : 73, + "Avg" : 73, + "source" : "Madhava’s Apple Watch" + } + ], + "name" : "heart_rate", + "units" : "count\/min" + } + ] + } +} + +-------------------------------------------------- +Timestamp: 2024-11-04 13:02:27.792411 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:02:50.655234 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:03:56.631002 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:04:04.273194 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:04:49.182152 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:05:09.982510 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:06:11.354496 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:06:18.796441 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:06:59.560019 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:07:44.618270 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:08.500296 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:25.861522 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:30.374097 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:40.804604 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:44.610602 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:48.424375 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:08:53.337369 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:09:05.384150 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- +Timestamp: 2024-11-04 13:09:21.873584 +Request Type: GET +Headers: + host: madhava.syftbox.madhavajay.com + x-real-ip: 172.17.0.1 + x-forwarded-for: 172.17.0.1 + x-forwarded-proto: https + connection: close + cache-control: max-age=0 + sec-ch-ua: "Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99" + sec-ch-ua-mobile: ?0 + sec-ch-ua-platform: "macOS" + upgrade-insecure-requests: 1 + user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 + accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7 + sec-fetch-site: none + sec-fetch-mode: navigate + sec-fetch-user: ?1 + sec-fetch-dest: document + accept-encoding: gzip, deflate, br, zstd + accept-language: en-US,en;q=0.9 +Body: + + +-------------------------------------------------- diff --git a/packages/syft-extras/.archive/examples/fedhr/routes.yaml b/packages/syft-extras/.archive/examples/fedhr/routes.yaml new file mode 100644 index 00000000000..37ab3f53343 --- /dev/null +++ b/packages/syft-extras/.archive/examples/fedhr/routes.yaml @@ -0,0 +1,6 @@ +routes: + heart_rate: + file: healthkit_importer.py + methods: + POST: {} + GET: {} diff --git a/packages/syft-extras/.archive/examples/proxy/.gitignore b/packages/syft-extras/.archive/examples/proxy/.gitignore new file mode 100644 index 00000000000..44f4f119a4f --- /dev/null +++ b/packages/syft-extras/.archive/examples/proxy/.gitignore @@ -0,0 +1 @@ +certs/* \ No newline at end of file diff --git a/packages/syft-extras/.archive/examples/proxy/client_nginx.conf b/packages/syft-extras/.archive/examples/proxy/client_nginx.conf new file mode 100644 index 00000000000..a8d06484f57 --- /dev/null +++ b/packages/syft-extras/.archive/examples/proxy/client_nginx.conf @@ -0,0 +1,73 @@ +events { + worker_connections 1024; +} +http { + server { + listen 80; + server_name syftbox.madhavajay.com; + location / { + proxy_pass http://host.docker.internal:8083; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + # server { + # listen 80; + # server_name bigquery-openmined-org.syftbox.openmined.dev; + # location /bigquery { + # proxy_pass http://host.docker.internal:9081; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # location / { + # proxy_pass http://host.docker.internal:8082; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # location /chat { + # proxy_pass http://host.docker.internal:9082; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + # server { + # listen 443 ssl; + # server_name madhava.syftbox.madhavajay.com; + # client_max_body_size 256M; + # ssl_certificate tls_cert_madhava.pem; + # ssl_certificate_key tls_cert_madhava_key.pem; + # ssl_protocols TLSv1.2 TLSv1.3; + # ssl_prefer_server_ciphers on; + # ssl_ciphers HIGH:!aNULL:!MD5; + # location /chat { + # proxy_pass http://host.docker.internal:9082; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # location / { + # proxy_pass http://host.docker.internal:8082; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + # server { + # listen 80; + # server_name madhava.syftbox.madhavajay.com; + # location /.well-known/acme-challenge/ { + # root /usr/share/nginx/html; + # } + # return 301 https://$host$request_uri; + # } +} \ No newline at end of file diff --git a/packages/syft-extras/.archive/examples/proxy/proxy.dockerfile b/packages/syft-extras/.archive/examples/proxy/proxy.dockerfile new file mode 100644 index 00000000000..4e8c02ba6ec --- /dev/null +++ b/packages/syft-extras/.archive/examples/proxy/proxy.dockerfile @@ -0,0 +1,42 @@ +# docker build -f proxy/proxy.dockerfile -t syftbox-proxy proxy +# docker run -d -p 80:80 -p 443:443 -v $(pwd)/proxy/server_nginx.conf:/etc/nginx/nginx.conf --name server-syftbox-proxy syftbox-proxy + +# client +# docker run -d -p 9980:80 -p 9943:443 -v $(pwd)/proxy/client_nginx.conf:/etc/nginx/nginx.conf --name client-syftbox-proxy syftbox-proxy + + +# bore + tls +# ssh -i "./keys/test-madhava-dns_key.pem" "azureuser@20.38.32.165" +# bore local 9080 --to 20.38.32.165 -p 6000 +# bore local 9443 --to 20.38.32.165 -p 6001 + + +# openssl genrsa -out syftbox.localhost.key 2048 +# openssl req -new -key syftbox.localhost.key -out syftbox.localhost.csr +# openssl x509 -req -in syftbox.localhost.csr -signkey syftbox.localhost.key -out syftbox.localhost.crt -days 365 -extfile <(printf "subjectAltName=DNS:syftbox.localhost,DNS:*.syftbox.localhost") + +# Use an official Nginx base image +FROM nginx:alpine + +# Install inotify-tools to monitor file changes +RUN apk update && apk add inotify-tools + +# Copy your custom Nginx configuration (optional, if needed) +# COPY ./nginx.conf /etc/nginx/nginx.conf + +COPY ./certs/* /etc/nginx/ + +# Copy the hot-reload bash script to the container +COPY ./start.sh /usr/local/bin/nginx-reload.sh + +# Make the bash script executable +RUN chmod +x /usr/local/bin/nginx-reload.sh + +# Expose port 443 for HTTPS +EXPOSE 443 + +# Expose port 80 for HTTP traffic +EXPOSE 80 + +# Start the bash script in the background and Nginx in the foreground +CMD ["/bin/sh", "-c", "/usr/local/bin/nginx-reload.sh & nginx -g 'daemon off;'"] diff --git a/packages/syft-extras/.archive/examples/proxy/server_nginx.conf b/packages/syft-extras/.archive/examples/proxy/server_nginx.conf new file mode 100644 index 00000000000..f0fb9638641 --- /dev/null +++ b/packages/syft-extras/.archive/examples/proxy/server_nginx.conf @@ -0,0 +1,181 @@ +events { + worker_connections 1024; +} + +http { + resolver 127.0.0.1 valid=30s; + + # log_format custom_log '$remote_addr - $remote_user [$time_local] ' + # '"$request" $status $body_bytes_sent ' + # '"$http_referer" "$http_user_agent" ' + # 'subdomain=$subdomain domain=$domain tld=$tld suffix=$suffix'; + + # access_log /var/log/nginx/custom_access.log custom_log; + + # Increase server_names_hash_bucket_size to handle longer domain names + server_names_hash_bucket_size 128; + + server { + listen 80; + listen 443 ssl; + server_name bigquery-openmined-org.syftbox.openmined.dev; + + ssl_certificate syftbox.openmined.dev.crt; + ssl_certificate_key syftbox.openmined.dev.key; + + location / { + # Proxy the request to the FastAPI server + proxy_pass http://host.docker.internal:9080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + + # server { + # listen 80; + # listen 443 ssl; + # server_name bigquery_openmined_org.syftbox.localhost; + + # ssl_certificate syftbox.localhost.crt; + # ssl_certificate_key syftbox.localhost.key; + + # location / { + # # Rewrite all URLs to the target path, keeping the remaining path and query string + # rewrite ^/(.*)$ /datasites/bigquery@openmined.org/$1 break; + + # # Proxy the request to the FastAPI server + # proxy_pass http://host.docker.internal:5001; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + + # # Special route for /submit_form + # location /submit_form { + # proxy_pass http://host.docker.internal:9081; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + + # server { + # listen 80; + # listen 443 ssl; + # # Use a regex in server_name to capture the different parts + # server_name ~^(?.+?)_(?.+?)_(?.+?)\.(?.+?)\.syftbox\.localhost$; + + # ssl_certificate syftbox.localhost.crt; + # ssl_certificate_key syftbox.localhost.key; + + # location / { + # # Use the captured variables from server_name in the rewrite + # rewrite ^/(.*)$ /datasites/$subdomain@$domain.$tld.$suffix/$1 break; + + # # Proxy the request to the FastAPI server + # proxy_pass http://host.docker.internal:8082; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + + server { + listen 80; + listen 443 ssl; + server_name syftbox.openmined.dev; + + ssl_certificate syftbox.openmined.dev.crt; + ssl_certificate_key syftbox.openmined.dev.key; + + location / { + proxy_pass http://host.docker.internal:5001; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + + # server { + # listen 80; + # listen 443 ssl; + # server_name bigquery_openmined_org.syftbox.localhost; + + # location / { + # # Rewrite all URLs to the target path, keeping the remaining path and query string + # rewrite ^/(.*)$ /datasites/bigquery@openmined.org/$1 break; + + # # Proxy the request to the FastAPI server + # proxy_pass http://host.docker.internal:5001; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + + # # Special route for /submit_form + # location /submit_form { + # proxy_pass http://host.docker.internal:9081; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + + # server { + # listen 80; + # listen 443 ssl; + # server_name madhava-openmined-org.syftbox.localhost; + + # ssl_certificate syftbox.localhost.crt; + # ssl_certificate_key syftbox.localhost.key; + + # location / { + # proxy_pass http://host.docker.internal:8082; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + + # server { + # listen 80; + # listen 443 ssl; + # server_name bigquery.madhava-openmined-org.syftbox.localhost; + + # ssl_certificate syftbox.localhost.crt; + # ssl_certificate_key syftbox.localhost.key; + + # location / { + # proxy_pass http://host.docker.internal:9081; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + + + # server { + # listen 80; + # server_name openmined.localhost; + + # location / { + # proxy_pass http://host.docker.internal:5001; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + # } + # } + + +} diff --git a/packages/syft-extras/.archive/examples/proxy/start.sh b/packages/syft-extras/.archive/examples/proxy/start.sh new file mode 100755 index 00000000000..02858904dcf --- /dev/null +++ b/packages/syft-extras/.archive/examples/proxy/start.sh @@ -0,0 +1,16 @@ +#!/bin/ash + +# Path to your Nginx configuration file or directory +CONFIG_PATH="/etc/nginx/nginx.conf" +# Optional: Monitor the entire directory for changes +# CONFIG_PATH="/etc/nginx/" + +# Run an infinite loop to monitor the configuration file +while true; do + # Monitor for modify, move, create, or delete events on the config file + inotifywait -e modify,move,create,delete $CONFIG_PATH + + # Reload Nginx configuration + echo "Nginx configuration changed. Reloading..." + nginx -s reload +done diff --git a/packages/syft-extras/.archive/examples/rpc/app.py b/packages/syft-extras/.archive/examples/rpc/app.py new file mode 100644 index 00000000000..c22aebcd0ca --- /dev/null +++ b/packages/syft-extras/.archive/examples/rpc/app.py @@ -0,0 +1,30 @@ +from syft_event import Response, Server +import time +from utils import LoginResponse, body_to_obj +from syftbox.lib import Client + +client = Client.load() +print("> Client", client.email) +app = Server(app_name="test", client=client, message_timeout=120) + + +@app.get("/public/rpc/test/listen") +def login(request): + print("Request Headers", request.headers) + print("Request Body", request.decode()) + + user = body_to_obj(request) + + result = LoginResponse(username=user.name, token=1) + headers = {} + + headers["content-type"] = "application/json" + headers["x-syft-rpc-object-type"] = type(result).__name__ + + time.sleep(10) + + return Response(content=result, status_code=200, headers=headers) + + +if __name__ == "__main__": + app.run() diff --git a/packages/syft-extras/.archive/examples/rpc/client.ipynb b/packages/syft-extras/.archive/examples/rpc/client.ipynb new file mode 100644 index 00000000000..92eb3af7ff6 --- /dev/null +++ b/packages/syft-extras/.archive/examples/rpc/client.ipynb @@ -0,0 +1,150 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install -e ../../syft-rpc\n", + "# !uv pip install -e ../../syft-event" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from utils import User, body_to_obj" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from syft_rpc import Request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "user = User(id=1, name=\"Alice\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "request = Request()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# api = request.make_api(\"syft://madhava@openmined.org/public/rpc/test/rpc.service.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "headers = {}\n", + "headers[\"content-type\"] = \"application/json\"\n", + "headers[\"object-type\"] = type(user).__name__\n", + "response = request.get(\n", + " \"syft://madhava@openmined.org/public/rpc/test/listen\",\n", + " body=user.dump(),\n", + " headers=headers,\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "result = response.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "body_to_obj(response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syft-extras/.archive/examples/rpc/client.py b/packages/syft-extras/.archive/examples/rpc/client.py new file mode 100644 index 00000000000..0850ca2604f --- /dev/null +++ b/packages/syft-extras/.archive/examples/rpc/client.py @@ -0,0 +1,20 @@ +from utils import User, body_to_obj +from syft_rpc import Request + +user = User(id=1, name="Alice") + +request = Request() + +headers = {} +headers["content-type"] = "application/json" +headers["x-syft-rpc-object-type"] = type(user).__name__ +response = request.get( + "syft://madhava@openmined.org/public/rpc/test/listen", + body=user.dump(), + headers=headers, +) + +result = response.wait() + +result = body_to_obj(response) +print(result) diff --git a/packages/syft-extras/.archive/examples/rpc/requirements.txt b/packages/syft-extras/.archive/examples/rpc/requirements.txt new file mode 100644 index 00000000000..4f3f6135f16 --- /dev/null +++ b/packages/syft-extras/.archive/examples/rpc/requirements.txt @@ -0,0 +1,3 @@ +cbor2 +pydantic +watchdog diff --git a/packages/syft-extras/.archive/examples/rpc/serde.ipynb b/packages/syft-extras/.archive/examples/rpc/serde.ipynb new file mode 100644 index 00000000000..65d17249186 --- /dev/null +++ b/packages/syft-extras/.archive/examples/rpc/serde.ipynb @@ -0,0 +1,266 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "from app import User, to_obj\n", + "from ulid import ULID\n", + "\n", + "from rpc import Request, RequestMessage, SyftBoxURL\n", + "from syftbox.lib import Client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "user = User(id=1, name=\"Alice\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "x = user.dump()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "u = SyftBoxURL(\"syft://madhava@openmined.org/public/rpc/test/listen\")\n", + "u" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "x = user.dump()\n", + "type(x)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "m = RequestMessage(\n", + " ulid=ULID(),\n", + " url=u,\n", + " sender=\"a@b.com\",\n", + " path=\"/test/\",\n", + " type=type(user).__name__,\n", + " body=user.dump(),\n", + " headers={},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "y = m.dump()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "type(y)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "c = Client.load()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "request = Request()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "headers = {}\n", + "headers[\"content-type\"] = \"application/json\"\n", + "response = request.get(\n", + " \"syft://madhava@openmined.org/public/rpc/test/listen\",\n", + " body=user.dump(),\n", + " headers=headers,\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# response.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "r = response.resolve" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "r" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "x = to_obj(r.decode(), r.headers)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syft-extras/.archive/examples/rpc/utils.py b/packages/syft-extras/.archive/examples/rpc/utils.py new file mode 100644 index 00000000000..1283f4a5851 --- /dev/null +++ b/packages/syft-extras/.archive/examples/rpc/utils.py @@ -0,0 +1,29 @@ +from syft_rpc import JSONModel +from syft_rpc import Future + +OBJECT_TYPE_HEADER = "x-syft-rpc-object-type" + + +class User(JSONModel): + id: int + name: str + + +class LoginResponse(JSONModel): + username: str + token: int = 123 + + +TypeRegistry = {"User": User, "LoginResponse": LoginResponse} + + +def to_obj(obj, headers): + if OBJECT_TYPE_HEADER in headers and headers[OBJECT_TYPE_HEADER] in TypeRegistry: + constructor = TypeRegistry[headers[OBJECT_TYPE_HEADER]] + return constructor(**obj) + + +def body_to_obj(message): + if isinstance(message, Future): + message = message.wait() + return to_obj(message.decode(), message.headers) diff --git a/packages/syft-extras/.archive/fetchers/netflix/.gitignore b/packages/syft-extras/.archive/fetchers/netflix/.gitignore new file mode 100644 index 00000000000..04bd1fda871 --- /dev/null +++ b/packages/syft-extras/.archive/fetchers/netflix/.gitignore @@ -0,0 +1,2 @@ +inputs/* +output \ No newline at end of file diff --git a/packages/syft-extras/.archive/fetchers/netflix/README.md b/packages/syft-extras/.archive/fetchers/netflix/README.md new file mode 100644 index 00000000000..9687c67c5ad --- /dev/null +++ b/packages/syft-extras/.archive/fetchers/netflix/README.md @@ -0,0 +1,22 @@ +# Netflix Fetcher + +This will run periodically and download your netflix data so you can keep your stats up to date. + +## Instructions + +Add email, password and profile to text files in ./inputs + +``` +├── inputs +│   ├── NETFLIX_EMAIL.txt +│   ├── NETFLIX_PASSWORD.txt +│   └── NETFLIX_PROFILE.txt +``` + +## Profile ID + +To get your profile ID go to the Profile Gate: +https://www.netflix.com/ProfilesGate + +Right click and copy the url for your profile and get the part after: +https://www.netflix.com/SwitchProfile?tkn= diff --git a/packages/syft-extras/.archive/fetchers/netflix/main.py b/packages/syft-extras/.archive/fetchers/netflix/main.py new file mode 100644 index 00000000000..4e8c892e1ab --- /dev/null +++ b/packages/syft-extras/.archive/fetchers/netflix/main.py @@ -0,0 +1,70 @@ +import os +import time + +from selenium import webdriver +from selenium.webdriver.chrome.options import Options +from selenium.webdriver.chrome.service import Service +from selenium.webdriver.common.by import By +from selenium.webdriver.common.keys import Keys + +chrome_driver_path = os.environ["CHROMEDRIVER_PATH"] +email = os.environ["NETFLIX_EMAIL"] +password = os.environ["NETFLIX_PASSWORD"] +profile = os.environ["NETFLIX_PROFILE"] +output_dir = os.environ["OUTPUT_DIR"] + +print(f"🍿 Downloading Netflix Activity for: {email} Profile {profile}") + +# Set up WebDriver (for Chrome) +chrome_options = Options() +prefs = { + "download.default_directory": output_dir, + "download.prompt_for_download": False, +} +chrome_options.add_experimental_option("prefs", prefs) +chrome_options.add_argument( + "--headless" +) # Run in headless mode, comment this if you want to see the browser window +chrome_service = Service(chrome_driver_path) # Set the path to your ChromeDriver + +driver = webdriver.Chrome(service=chrome_service, options=chrome_options) + +# get login page +driver.get("https://www.netflix.com/login") + + +# Find the email and password input fields +email_input = driver.find_element(By.NAME, "userLoginId") +password_input = driver.find_element(By.NAME, "password") +# Enter email and password +email_input.send_keys(email) +password_input.send_keys(password) + +# Submit the login form +print("Logging In") +password_input.send_keys(Keys.ENTER) + +# Wait for the login to complete +time.sleep(3) + +print("Switching Profiles") +# Navigate to Viewing Activity page +driver.get(f"https://www.netflix.com/SwitchProfile?tkn={profile}") + +# Wait for the login to complete +time.sleep(3) + +print("Getting Viewing Activity") +# Navigate to Viewing Activity page +driver.get("https://www.netflix.com/viewingactivity") + +time.sleep(3) + +print("Clicking Download all") +# Navigate to a page and download a file +element = driver.find_element(By.LINK_TEXT, "Download all").click() + +print("Sleeping just in case") +time.sleep(10) + +driver.quit() diff --git a/packages/syft-extras/.archive/fetchers/netflix/requirements.txt b/packages/syft-extras/.archive/fetchers/netflix/requirements.txt new file mode 100644 index 00000000000..7cb6656b279 --- /dev/null +++ b/packages/syft-extras/.archive/fetchers/netflix/requirements.txt @@ -0,0 +1 @@ +selenium diff --git a/packages/syft-extras/.archive/fetchers/netflix/run.sh b/packages/syft-extras/.archive/fetchers/netflix/run.sh new file mode 100755 index 00000000000..466a1510010 --- /dev/null +++ b/packages/syft-extras/.archive/fetchers/netflix/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# Check if chromedriver is in the PATH +if ! command -v chromedriver &> /dev/null +then + echo "chromedriver is not installed. Installing with brew..." + brew install chromedriver +else + echo "chromedriver is already installed." +fi + +export CHROMEDRIVER_PATH=$(which chromedriver) +echo $CHROMEDRIVER_PATH + +mkdir -p inputs +mkdir -p output + +export NETFLIX_EMAIL=$(cat inputs/NETFLIX_EMAIL.txt) +export NETFLIX_PASSWORD=$(cat inputs/NETFLIX_PASSWORD.txt) +export NETFLIX_PROFILE=$(cat inputs/NETFLIX_PROFILE.txt) +export OUTPUT_DIR=$(realpath ./output) + +uv pip install -r requirements.txt +uv run main.py diff --git a/packages/syft-extras/.archive/notebooks/client.ipynb b/packages/syft-extras/.archive/notebooks/client.ipynb new file mode 100644 index 00000000000..d1d2b2692b4 --- /dev/null +++ b/packages/syft-extras/.archive/notebooks/client.ipynb @@ -0,0 +1,301 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install -e ../../syft-rpc\n", + "# !uv pip install -e ../../syft-event" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# from utils import User" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "cecb0268-92f6-4a79-bd1d-b03cab798642", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib import Client" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from syft_rpc import Request, JSONModel, SyftBoxURL" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "211a10e4-3660-4c88-a362-13fd670a94ad", + "metadata": {}, + "outputs": [], + "source": [ + "url = SyftBoxURL(\"syft://a@openmined.org/public/rpc/fedreduce/listen\")" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "9e6036ec-8547-48ef-b415-30b54bfe1226", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'a@openmined.org/public/rpc/fedreduce/listen'" + ] + }, + "execution_count": 43, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "url.host + url.path" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "class User(JSONModel):\n", + " id: int\n", + " name: str\n", + "\n", + "\n", + "class LoginResponse(JSONModel):\n", + " username: str\n", + " token: int = 123" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "user = User(id=1, name=\"Alice\")" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "ae9bdb15-bc01-43da-8961-5a5ce5a7ce53", + "metadata": {}, + "outputs": [], + "source": [ + "# export SYFTBOX_CLIENT_CONFIG_PATH=/Users/madhavajay/dev/syft/.clients/a@openmined.org/config.json" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "client = Client.load(filepath=\"/Users/madhavajay/dev/syft/.clients/a@openmined.org/config.json\")\n", + "request = Request(client=client)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# api = request.make_api(\"syft://madhava@openmined.org/public/rpc/test/rpc.service.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "id": "79feeacc-8460-49b9-a6f2-63718a592913", + "metadata": {}, + "outputs": [], + "source": [ + "yaml_path = \"/Users/madhavajay/dev/syft/.clients/a@openmined.org/datasites/a@openmined.org/public/a.yaml\"" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "1048b38c-66e5-429b-a1b2-ed406ef9cacb", + "metadata": {}, + "outputs": [], + "source": [ + "# !touch yaml_path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "be76bcb2-e20e-41b2-ae2f-032c7cea5672", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 50, + "id": "8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Future(local_path=PosixPath('/Users/madhavajay/dev/syft/.clients/a@openmined.org/datasites/a@openmined.org/public/rpc/fedreduce/listen/01JEQKJH9TH3ZDMHFQQ55061YV.response'), value=None)" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "headers = {}\n", + "headers[\"content-type\"] = \"application/json\"\n", + "headers[\"x-syft-rpc-object-type\"] = type(user).__name__\n", + "response = request.get(\n", + " \"syft://a@openmined.org/public/rpc/fedreduce/listen\",\n", + " body=user.dump(),\n", + " headers=headers,\n", + ")\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "9", + "metadata": {}, + "outputs": [ + { + "ename": "TimeoutError", + "evalue": "Timeout reached waiting 5 for response", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTimeoutError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[46], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/dev/syftbox-experimental/.venv/lib/python3.12/site-packages/syft_rpc/rpc.py:220\u001b[0m, in \u001b[0;36mFuture.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvalue\n\u001b[1;32m 219\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;241m0.1\u001b[39m)\n\u001b[0;32m--> 220\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTimeoutError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTimeout reached waiting \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtimeout\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m for response\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "\u001b[0;31mTimeoutError\u001b[0m: Timeout reached waiting 5 for response" + ] + } + ], + "source": [ + "result = response.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "msg = response.value" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "msg.body" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "msg.body" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "LoginResponse()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syft-extras/.archive/notebooks/http.ipynb b/packages/syft-extras/.archive/notebooks/http.ipynb new file mode 100644 index 00000000000..6c1beadb37c --- /dev/null +++ b/packages/syft-extras/.archive/notebooks/http.ipynb @@ -0,0 +1,301 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "!uv pip install -e ../syft-rpc\n", + "!uv pip install -e ../syft-event\n", + "!uv pip install -e ../syft-requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from syft_rpc import JSONModel\n", + "from syft_requests import SyftBoxRPCSession" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "class User(JSONModel):\n", + " id: int\n", + " name: str\n", + "\n", + "\n", + "class LoginResponse(JSONModel):\n", + " username: str\n", + " token: int = 123" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# non blocking" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# python requests library" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "user = User(id=1, name=\"Alice\")\n", + "data = user.dump()\n", + "print(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "headers = {}\n", + "headers[\"content-type\"] = \"application/json\"\n", + "headers[\"x-syft-rpc-object-type\"] = type(user).__name__\n", + "headers[\"x-syft-blocking\"] = \"false\"\n", + "\n", + "session = SyftBoxRPCSession(syftbox_proxy=\"http://localhost:9081/rpc\")\n", + "result = session.get(\n", + " \"syft://madhava@openmined.org/public/rpc/test/listen\",\n", + " data=data,\n", + " headers=headers,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "result.status_code, result.headers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "result.url" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "result.decoded_content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "type(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "result.wait()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "result.decoded_content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "user = User(id=1, name=\"Alice\")\n", + "data = user.dump()\n", + "print(data)\n", + "\n", + "headers = {}\n", + "headers[\"content-type\"] = \"application/json\"\n", + "headers[\"x-syft-rpc-object-type\"] = type(user).__name__\n", + "# headers[\"x-syft-blocking\"] = \"true\" # default\n", + "\n", + "session = SyftBoxRPCSession(syftbox_proxy=\"http://localhost:9081/rpc\")\n", + "result = session.get(\n", + " \"syft://madhava@openmined.org/public/rpc/test/listen\",\n", + " data=data,\n", + " headers=headers,\n", + " timeout=5,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "result.retry()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "result.retry()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "result.headers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "result.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "result.decoded_content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "o = LoginResponse(**result.decoded_content)\n", + "o" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syft-extras/.archive/packages/syft-files/README.md b/packages/syft-extras/.archive/packages/syft-files/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/.archive/packages/syft-files/pyproject.toml b/packages/syft-extras/.archive/packages/syft-files/pyproject.toml new file mode 100644 index 00000000000..337588453b2 --- /dev/null +++ b/packages/syft-extras/.archive/packages/syft-files/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = "syft-files" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = [] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/packages/syft-extras/.archive/packages/syft-files/syft_files/__init__.py b/packages/syft-extras/.archive/packages/syft-files/syft_files/__init__.py new file mode 100644 index 00000000000..5b359ff4b39 --- /dev/null +++ b/packages/syft-extras/.archive/packages/syft-files/syft_files/__init__.py @@ -0,0 +1,3 @@ +from .utils import ensure_folder # noqa + +__version__ = "0.1.0" diff --git a/packages/syft-extras/.archive/packages/syft-files/syft_files/py.typed b/packages/syft-extras/.archive/packages/syft-files/syft_files/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/.archive/packages/syft-files/syft_files/utils.py b/packages/syft-extras/.archive/packages/syft-files/syft_files/utils.py new file mode 100644 index 00000000000..07f2db99865 --- /dev/null +++ b/packages/syft-extras/.archive/packages/syft-files/syft_files/utils.py @@ -0,0 +1,46 @@ +import hashlib + +from pathlib import Path +import os +import shutil + + +def calculate_file_hash(file_path, hash_func=hashlib.sha256): + """Calculate the hash of a file.""" + hash_obj = hash_func() + with open(file_path, "rb") as f: + while chunk := f.read(8192): + hash_obj.update(chunk) + return hash_obj.hexdigest() + + +def ensure_folder(files, destination_folder): + """Ensure that specified files are in the destination folder with the same + hashes. If the destination folder doesn't exist, create it. + Copy files if missing or hashes differ.""" + + # Ensure destination folder exists + Path(destination_folder).mkdir(parents=True, exist_ok=True) + + for src_file_path in files: + # Check if the source file exists + if not os.path.exists(src_file_path): + print(f"Source file '{src_file_path}' does not exist.") + continue + + file_name = os.path.basename(src_file_path) + dest_file_path = os.path.join(destination_folder, file_name) + + # Calculate the hash of the source file + src_hash = calculate_file_hash(src_file_path) + + # Check if destination file exists and has the same hash + if os.path.exists(dest_file_path): + dest_hash = calculate_file_hash(dest_file_path) + if src_hash == dest_hash: + print(f"File '{file_name}' is up-to-date.") + continue # Skip copying as the file is the same + + # Copy file from source to destination + shutil.copy2(src_file_path, dest_file_path) + print(f"Copied '{file_name}' to '{dest_file_path}'.") diff --git a/packages/syft-extras/.archive/packages/syft-requests/README.md b/packages/syft-extras/.archive/packages/syft-requests/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/.archive/packages/syft-requests/pyproject.toml b/packages/syft-extras/.archive/packages/syft-requests/pyproject.toml new file mode 100644 index 00000000000..2379e975fa0 --- /dev/null +++ b/packages/syft-extras/.archive/packages/syft-requests/pyproject.toml @@ -0,0 +1,14 @@ +[project] +name = "syft-requests" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = ["syft-rpc"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv.sources] +syft-rpc = { workspace = true } diff --git a/packages/syft-extras/.archive/packages/syft-requests/syft_requests/__init__.py b/packages/syft-extras/.archive/packages/syft-requests/syft_requests/__init__.py new file mode 100644 index 00000000000..d049f46d3e1 --- /dev/null +++ b/packages/syft-extras/.archive/packages/syft-requests/syft_requests/__init__.py @@ -0,0 +1,3 @@ +from .session import SyftBoxRPCSession # noqa + +__version__ = "0.1.0" diff --git a/packages/syft-extras/.archive/packages/syft-requests/syft_requests/py.typed b/packages/syft-extras/.archive/packages/syft-requests/syft_requests/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/.archive/packages/syft-requests/syft_requests/session.py b/packages/syft-extras/.archive/packages/syft-requests/syft_requests/session.py new file mode 100644 index 00000000000..711e0ea397b --- /dev/null +++ b/packages/syft-extras/.archive/packages/syft-requests/syft_requests/session.py @@ -0,0 +1,149 @@ +import json +import time +import base64 +import requests +import cbor2 + +from syft_rpc import SyftBoxURL + + +class SyftBoxRPCSession(requests.Session): + def __init__(self, additional_headers=None, syftbox_proxy=None): + super().__init__() + self.additional_headers = additional_headers or {} + self.syftbox_proxy = syftbox_proxy + + def request(self, method, url, headers=None, *args, **kwargs): + print("kwargs", kwargs) + if self.syftbox_proxy is None: + raise Exception(f"{type(self)} requires a syftbox_proxy") + # Merge additional headers with existing headers + headers = {**self.additional_headers, **(headers or {})} + + if "syft://" in str(url): + syft_url = SyftBoxURL(url) + params = {"method": method} | syft_url.as_http_params() + if "timeout" in kwargs: + params["timeout"] = kwargs["timeout"] + else: + raise Exception(f"{type(self)} requires a syft:// url") + + print("calling", self.syftbox_proxy, params) + response = super().request( + "post", self.syftbox_proxy, params=params, headers=headers, *args, **kwargs + ) + response.decoded_content = self.decode(response) + response.wait = self._create_wait_method(response) + response.retry = self._create_retry_method( + response, method, url, headers, args, kwargs, self + ) + return response + + @staticmethod + def decode(response): + """Decode the response based on its Content-Type.""" + content_type = response.headers.get("content-type", "") + try: + if content_type == "application/json": + # JSON response, possibly base64 encoded + try: + decoded_content = base64.b64decode(response.content) + return json.loads(decoded_content) + except Exception: + # If decoding fails, fallback to direct JSON parsing + return response.json() + elif content_type == "application/cbor": + # CBOR response + return cbor2.loads(response.content) + else: + # Default to UTF-8 decoding + return response.content.decode("utf-8") + except Exception as e: + raise Exception(f"Failed to decode response: {e}") from e + + def _create_wait_method(self, response): + """Attach a wait method to the response object.""" + + def wait( + location_header="Location", + max_retries=10, + time_between_retries=5, + timeout=60, + ): + if response.status_code == 200: + return response + if response.status_code != 202: + raise Exception("You need a 202 status code to wait on.") + + location = response.headers.get(location_header) + if not location: + raise ValueError(f"Response missing '{location_header}' header") + + # Extract the base URL (host and scheme) from the original response URL + base_url = "/".join(response.url.split("/")[:3]) + full_location = ( + location if location.startswith("http") else f"{base_url}{location}" + ) + + print(f"202 received. Polling location: {full_location}") + start_time = time.time() + retries = 0 + + while retries < max_retries and (time.time() - start_time) < timeout: + # Use self.request to leverage custom logic + poll_response = requests.get(full_location) + if poll_response.status_code == 200: + print("200 OK received. Request successful.") + + # Mutate the original response object + response.status_code = poll_response.status_code + response.headers = poll_response.headers + response._content = poll_response.content + response.decoded_content = self.decode(poll_response) + + return response # Return the mutated response object + + print( + f"Polling attempt {retries + 1}: Received {poll_response.status_code}. Retrying in {time_between_retries}s..." + ) + time.sleep(time_between_retries) + retries += 1 + + raise TimeoutError("Timed out waiting for a 200 response.") + + return wait + + def _create_retry_method( + self, response, method, url, headers, args, kwargs, session + ): + """Attach a retry method to the response object.""" + + def retry(): + if response.status_code == 200: + return response + if response.status_code != 504: + raise Exception( + "Retry is only allowed for responses with a 504 status code." + ) + + print("Retrying the original request...") + # Resend the same request using the session's custom request method + retried_response = session.request( + method, url, headers=headers, *args, **kwargs + ) + + # Mutate the original response object with the new response's attributes + response.status_code = retried_response.status_code + response.headers = retried_response.headers + response._content = retried_response.content + response.decoded_content = retried_response.decoded_content + + # Update wait and retry methods on the mutated response + response.wait = self._create_wait_method(response) + response.retry = self._create_retry_method( + response, method, url, headers, args, kwargs, session + ) + + return response # Return the mutated original response + + return retry diff --git a/packages/syft-extras/.gitignore b/packages/syft-extras/.gitignore new file mode 100644 index 00000000000..7896264bb92 --- /dev/null +++ b/packages/syft-extras/.gitignore @@ -0,0 +1,172 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# OS Garbage +.DS_Store +Thumbs.db + + +# JS dependency +node_modules/ + +**/certs diff --git a/packages/syft-extras/.pre-commit-config.yaml b/packages/syft-extras/.pre-commit-config.yaml new file mode 100644 index 00000000000..2a709943669 --- /dev/null +++ b/packages/syft-extras/.pre-commit-config.yaml @@ -0,0 +1,70 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-ast + always_run: true + - id: trailing-whitespace + always_run: true + exclude: .bumpversion.cfg + - id: check-docstring-first + always_run: true + - id: check-json + always_run: true + - id: check-yaml + always_run: true + - id: check-merge-conflict + always_run: true + args: ["--assume-in-merge"] + - id: check-executables-have-shebangs + always_run: true + - id: debug-statements + always_run: true + - id: name-tests-test + always_run: true + exclude: | + (?x)( + ^tests/.*/fixtures/.*| # Exclude all fixture directories under tests + ^tests/fixtures/.*| # Exclude root level fixtures + ^.*[/\\]fixture[/\\].*| # Exclude any directory named 'fixture' + ^.*[/\\]fixtures[/\\].* # Exclude any directory named 'fixtures' + ) + - id: requirements-txt-fixer + always_run: true + - id: mixed-line-ending + args: ["--fix=lf"] + + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: "v0.6.5" + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + types_or: [python, pyi, jupyter] + - id: ruff-format + types_or: [python, pyi, jupyter] + + - repo: https://github.com/kynan/nbstripout + rev: 0.7.1 + hooks: + - id: nbstripout + + - repo: https://github.com/pre-commit/mirrors-prettier # This repository has been archived by the owner on Apr 11, 2024. It is now read-only. + rev: "v3.0.0-alpha.9-for-vscode" + hooks: + - id: prettier + + # todo - re-enable mypy & fixes in a separate PR + # - repo: https://github.com/pre-commit/mirrors-mypy + # rev: v1.10.0 + # hooks: + # - id: mypy + # name: "mypy" + # always_run: true + # files: ^packages/syft_rpc/ + + # - repo: meta + # hooks: + # - id: identity + # always_run: true + # files: "notebooks/api/*" diff --git a/packages/syft-extras/LICENSE b/packages/syft-extras/LICENSE new file mode 100644 index 00000000000..fb42337d47e --- /dev/null +++ b/packages/syft-extras/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Madhava Jay + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/syft-extras/README.md b/packages/syft-extras/README.md new file mode 100644 index 00000000000..7ca09c6c39a --- /dev/null +++ b/packages/syft-extras/README.md @@ -0,0 +1,19 @@ +# SyftBox Experimental + +SyftBox Experimental + +## RPC Ping Pong + +Start the pong RPC server + + $ just run-pong + +Make a ping RPC request to the pong server + + $ just run-ping + +## HTTP Proxy + +Starts the HTTP proxy on https://syftbox.localhost + + $ just start-proxy diff --git a/packages/syft-extras/examples/pingpong/ping_request.py b/packages/syft-extras/examples/pingpong/ping_request.py new file mode 100644 index 00000000000..3d3688ae54c --- /dev/null +++ b/packages/syft-extras/examples/pingpong/ping_request.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import time +from dataclasses import dataclass, field +from datetime import datetime, timezone + +from loguru import logger +from pydantic import BaseModel +from syft_core import Client +from syft_rpc import rpc + + +@dataclass +class PingRequest: + msg: str + ts: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + + +class PongResponse(BaseModel): + msg: str + ts: datetime + + +def send_ping(): + client = Client.load() + start = time.time() + future = rpc.send( + url=f"syft://{client.email}/api_data/pingpong/rpc/ping", + body=PingRequest(msg="hello!"), + expiry="5m", + cache=True, + ) + logger.debug(f"Request: {future.request}") + + try: + response = future.wait(timeout=300) + response.raise_for_status() + pong_response = response.model(PongResponse) + logger.info(f"Response: {pong_response}. Time taken: {time.time() - start}") + except Exception as e: + logger.error(f"Error: {e}") + raise + + +if __name__ == "__main__": + send_ping() diff --git a/packages/syft-extras/examples/pingpong/pong_server.py b/packages/syft-extras/examples/pingpong/pong_server.py new file mode 100644 index 00000000000..30c545da17a --- /dev/null +++ b/packages/syft-extras/examples/pingpong/pong_server.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from datetime import datetime, timezone + +from loguru import logger +from pydantic import BaseModel, Field +from syft_event import SyftEvents +from syft_event.types import Request + +box = SyftEvents("pingpong") + + +class PingRequest(BaseModel): + """Request to send a ping.""" + + msg: str = Field(description="Ping request string") + ts: datetime = Field(description="Timestamp of the ping request.") + + +class PongResponse(BaseModel): + """Response to a ping request.""" + + msg: str = Field(description="Ping response string") + ts: datetime = Field(description="Timestamp of the pong response.") + + +@box.on_request("/ping") +def pong(ping: PingRequest, ctx: Request) -> PongResponse: + """Respond to a ping request.""" + + logger.info(f"Got ping request - {ping}") + return PongResponse( + msg=f"Pong from {box.client.email}", + ts=datetime.now(timezone.utc), + ) + + +if __name__ == "__main__": + try: + print("Running rpc server for", box.app_rpc_dir) + box.run_forever() + except Exception as e: + print(e) diff --git a/packages/syft-extras/justfile b/packages/syft-extras/justfile new file mode 100644 index 00000000000..f3ce1c4273c --- /dev/null +++ b/packages/syft-extras/justfile @@ -0,0 +1,55 @@ +# Guidelines for new commands +# - Start with a verb +# - Keep it short (max. 3 words in a command) +# - Group commands by context. Include group name in the command name. +# - Mark things private that are util functions with [private] or _var +# - Don't over-engineer, keep it simple. +# - Don't break existing commands +# - Run just --fmt --unstable after adding new commands + +set dotenv-load := true + +# --------------------------------------------------------------------------------------------------------------------- +# Private vars + +_red := '\033[1;31m' +_cyan := '\033[1;36m' +_green := '\033[1;32m' +_yellow := '\033[1;33m' +_nc := '\033[0m' + +# --------------------------------------------------------------------------------------------------------------------- +# Aliases + +alias ba := build-all +alias rj := run-jupyter + +# --------------------------------------------------------------------------------------------------------------------- + +@default: + just --list + +[group('build')] +build-all: + uv build --all-packages + +[group('utils')] +run-jupyter jupyter_args="": + uv sync + + uv run --frozen --with "jupyterlab" \ + jupyter lab {{ jupyter_args }} + +start-proxy *args: + uv sync + rm -rf certs + sudo uv run syft_proxy bootstrap + uv run syft_proxy start {{ args }} + +run-pong: + uv sync + uv run examples/pingpong/pong_server.py + +run-ping: + uv sync + uv run examples/pingpong/ping_request.py diff --git a/packages/syft-extras/packages/syft-core/README.md b/packages/syft-extras/packages/syft-core/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-core/pyproject.toml b/packages/syft-extras/packages/syft-core/pyproject.toml new file mode 100644 index 00000000000..ad5e70f75c2 --- /dev/null +++ b/packages/syft-extras/packages/syft-core/pyproject.toml @@ -0,0 +1,11 @@ +[project] +name = "syft-core" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = ["pydantic[email]>=2.10.4", "typing-extensions>=4.12.2"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/packages/syft-extras/packages/syft-core/syft_core/__init__.py b/packages/syft-extras/packages/syft-core/syft_core/__init__.py new file mode 100644 index 00000000000..1c7322afba1 --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/__init__.py @@ -0,0 +1,7 @@ +from syft_core.client_shim import Client +from syft_core.config import SyftClientConfig +from syft_core.url import SyftBoxURL +from syft_core.workspace import SyftWorkspace + +__all__ = ["Client", "SyftClientConfig", "SyftWorkspace", "SyftBoxURL"] +__version__ = "0.1.0" diff --git a/packages/syft-extras/packages/syft-core/syft_core/client_shim.py b/packages/syft-extras/packages/syft-core/syft_core/client_shim.py new file mode 100644 index 00000000000..9b9f35fe185 --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/client_shim.py @@ -0,0 +1,117 @@ +""" +SyftBox Client Shim for apps and external dependencies + + +NOTE: this will likely get refactored as it's own SDK. +But we need it to maintain compatibility with apps +""" + +from pathlib import Path + +from pydantic import EmailStr +from typing_extensions import Optional, Self + +from syft_core.config import SyftClientConfig +from syft_core.types import PathLike, to_path +from syft_core.url import SyftBoxURL +from syft_core.workspace import SyftWorkspace + +# this just makes it a bit clear what the default is for the api_data() method +CURRENT_API_REQUEST_NAME = None +MY_DATASITE = None + + +class Client: + """ + Client shim for SyftBox Apps + + Minimal set of properties and methods exposed to the apps. + """ + + def __init__(self, conf: SyftClientConfig): + self.config = conf + self.workspace = SyftWorkspace(self.config.data_dir) + + @property + def email(self) -> EmailStr: + """Email of the current user""" + return self.config.email + + @property + def config_path(self) -> Path: + """Path to the config of the current user""" + return self.config.path + + @property + def my_datasite(self) -> Path: + """Path to the datasite of the current user""" + return self.workspace.datasites / self.config.email + + @property + def datasites(self) -> Path: + """Path to the datasites folder""" + return self.workspace.datasites + + @property + def sync_folder(self) -> Path: + """Deprecated property use `client.datasites` instead""" + return self.workspace.datasites + + @property + def datasite_path(self) -> Path: + """Deprecated property. Use `client.my_datasite` instead""" + return self.workspace.datasites / self.config.email + + @classmethod + def load(cls, filepath: Optional[PathLike] = None) -> Self: + """ + Load the client configuration from the given file path or env var or default location + Raises: ClientConfigException + """ + return cls(conf=SyftClientConfig.load(filepath)) + + @property + def api_request_name(self) -> str: + """Returns the name of root directory of the API request calling this property. + + Use this property instead of hardcoding your API request's directory name, + as SyftBox may dynamically change it to prevent conflicts. + """ + # The below works coz we set the cwd to the app's path before executing run.sh (see find_and_run_script method) + api_path = Path.cwd() + api_name = api_path.name + return api_name + + def api_data( + self, + api_request_name: Optional[str] = CURRENT_API_REQUEST_NAME, + datasite: Optional[str] = MY_DATASITE, + ) -> Path: + """ + Gets the filesystem path to an application's API data directory for a specific datasite. + + Args: + api_request_name (Optional[str], default=CURRENT_API_REQUEST_NAME): The name of the API request + whose API data path is needed. + If None, defaults to the name of the API request from which this method is being called. + datasite (Optional[str], default=MY_DATASITE): The datasite's email. + If None, defaults to the current user's configured email. + + Returns: + Path: A filesystem path pointing to '/datasites//api_data/'. + """ + api_request_name = api_request_name or self.api_request_name + datasite = datasite or self.config.email + return self.workspace.datasites / datasite / "api_data" / api_request_name + + def makedirs(self, *paths: PathLike) -> None: + """Create directories""" + + for path in paths: + to_path(path).mkdir(parents=True, exist_ok=True) + + def to_syft_url(self, path: PathLike) -> SyftBoxURL: + return SyftBoxURL.from_path(path, self.workspace) + + def __hash__(self) -> int: + return hash(self.config.data_dir) diff --git a/packages/syft-extras/packages/syft-core/syft_core/config.py b/packages/syft-extras/packages/syft-core/syft_core/config.py new file mode 100644 index 00000000000..63685f0264b --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/config.py @@ -0,0 +1,159 @@ +import json +import os +import shutil +from pathlib import Path +from typing import Optional, Union + +from pydantic import ( + AliasChoices, + AnyHttpUrl, + BaseModel, + ConfigDict, + EmailStr, + Field, + field_validator, +) +from pydantic.main import IncEx +from pydantic_core import Url +from typing_extensions import Self + +from syft_core.constants import ( + DEFAULT_CONFIG_PATH, + DEFAULT_DATA_DIR, + DEFAULT_SERVER_URL, +) +from syft_core.exceptions import ClientConfigException +from syft_core.types import PathLike, to_path + +__all__ = ["SyftClientConfig"] + +# env or default +CONFIG_PATH_ENV = "SYFTBOX_CLIENT_CONFIG_PATH" + +# Old configuration file path for the client +LEGACY_CONFIG_NAME = "client_config.json" + + +class SyftClientConfig(BaseModel): + """SyftBox client configuration""" + + # model config + model_config = ConfigDict( + extra="ignore", json_encoders={AnyHttpUrl: lambda v: str(v)} + ) + + data_dir: Path = Field( + validation_alias=AliasChoices("data_dir", "sync_folder"), + default=DEFAULT_DATA_DIR, + description="Local directory where client data is stored", + ) + """Local directory where client data is stored""" + + server_url: AnyHttpUrl = Field( + default=DEFAULT_SERVER_URL, + description="URL of the remote SyftBox server", + ) + """URL of the remote SyftBox server""" + + client_url: AnyHttpUrl = Field( + validation_alias=AliasChoices("client_url", "port"), + description="URL where the client is running", + ) + """URL where the client is running""" + + email: EmailStr = Field(description="Email address of the user") + """Email address of the user""" + + token: Optional[str] = Field( + default=None, + description="Depracated: Use access_token instead. API token for the user", + deprecated=True, + ) + """Depracated: Use access_token instead. API token for the user""" + + access_token: Optional[str] = Field( + default=None, description="Access token for the user" + ) + """Access token for the user""" + + # WARN: we don't need `path` to be serialized, hence exclude=True + path: Path = Field(exclude=True, description="Path to the config file") + """Path to the config file""" + + @field_validator("client_url", mode="before") + def port_to_url(cls, val: Union[int, str]) -> Optional[str]: + if isinstance(val, int): + return f"http://127.0.0.1:{val}" + return val + + @field_validator("token", mode="before") + def token_to_str(cls, v: Union[int, str, None]) -> Optional[str]: + if not v: + return None + elif isinstance(v, int): + return str(v) + return v + + def set_server_url(self, server: str) -> None: + self.server_url = Url(server) + + def set_port(self, port: int) -> None: + self.client_url = Url(f"http://127.0.0.1:{port}") + + @classmethod + def load(cls, conf_path: Optional[PathLike] = None) -> Self: + try: + # args or env or default + path = conf_path or os.getenv(CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH) + if path is None: + raise ClientConfigException( + f"Config file path not provided or set in env '{CONFIG_PATH_ENV}'" + ) + path = to_path(path) + data = {} + + # todo migration stuff we can remove later + legacy_path = Path(path.parent, LEGACY_CONFIG_NAME) + # prefer to load config.json instead of client_config.json + # initially config.json WILL NOT exist, so we fallback to client_config.json + if path.exists(): + data = json.loads(path.read_text()) + elif legacy_path.exists(): + data = json.loads(legacy_path.read_text()) + path = legacy_path + else: + raise FileNotFoundError(f"Config file not found at '{conf_path}'") + # todo end + + return cls(path=path, **data) + except Exception as e: + raise ClientConfigException( + f"Failed to load config from '{conf_path}' - {e}" + ) + + @classmethod + def exists(cls, path: PathLike) -> bool: + return to_path(path).exists() + + def migrate(self) -> Self: + """Explicit call to migrate the config file""" + + # if we loaded the legacy config, we need to move it to new config + if self.path.name == LEGACY_CONFIG_NAME: + new_path = Path(self.path.parent, DEFAULT_CONFIG_PATH.name) + shutil.move(str(self.path), str(new_path)) + self.path = new_path + self.save() + + return self + + def as_dict(self, exclude: Optional[IncEx] = None) -> dict: + return self.model_dump(exclude=exclude, exclude_none=True, warnings="none") + + def as_json(self, indent: int = 4) -> str: + return self.model_dump_json(indent=indent, exclude_none=True, warnings="none") + + def save(self) -> Self: + self.path.parent.mkdir(parents=True, exist_ok=True) + self.path.write_text(self.as_json()) + return self diff --git a/packages/syft-extras/packages/syft-core/syft_core/constants.py b/packages/syft-extras/packages/syft-core/syft_core/constants.py new file mode 100644 index 00000000000..69eb3427859 --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/constants.py @@ -0,0 +1,30 @@ +from pathlib import Path + +# Default port for the SyftBox client +DEFAULT_PORT = 8080 + +# Default SyftBox cache server URL for the client +DEFAULT_SERVER_URL = "https://syftbox.openmined.org" + +# Default configuration directory for the client +DEFAULT_CONFIG_DIR = Path(Path.home(), ".syftbox") + +# Default configuration file path for the client +DEFAULT_CONFIG_PATH = Path(DEFAULT_CONFIG_DIR, "config.json") + +# Default logs directory for the client +DEFAULT_LOGS_DIR = Path(DEFAULT_CONFIG_DIR, "logs") + +# Default data directory for the client +DEFAULT_DATA_DIR = Path(Path.home(), "SyftBox") + +# Permissions file name +PERM_FILE = "syftperm.yaml" + +# Rejected files client-side +REJECTED_FILE_SUFFIX = ".syftrejected" + +SENDGRID_API_URL = "https://api.sendgrid.com/v3/mail/send" + +# Default benchmark runs +DEFAULT_BENCHMARK_RUNS = 5 diff --git a/packages/syft-extras/packages/syft-core/syft_core/exceptions.py b/packages/syft-extras/packages/syft-core/syft_core/exceptions.py new file mode 100644 index 00000000000..08bb2fb74b0 --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/exceptions.py @@ -0,0 +1,6 @@ +class SyftBoxException(Exception): + pass + + +class ClientConfigException(SyftBoxException): + pass diff --git a/packages/syft-extras/packages/syft-core/syft_core/py.typed b/packages/syft-extras/packages/syft-core/syft_core/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-core/syft_core/types.py b/packages/syft-extras/packages/syft-core/syft_core/types.py new file mode 100644 index 00000000000..3dffa36f18d --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/types.py @@ -0,0 +1,13 @@ +from pathlib import Path +from typing import Iterable, Union + +from typing_extensions import TypeAlias + +__all__ = ["PathLike", "UserLike", "to_path"] + +PathLike: TypeAlias = Union[str, Path] +UserLike: TypeAlias = Union[str, Iterable[str]] + + +def to_path(path: PathLike) -> Path: + return Path(path).expanduser().resolve() diff --git a/packages/syft-extras/packages/syft-core/syft_core/url.py b/packages/syft-extras/packages/syft-core/syft_core/url.py new file mode 100644 index 00000000000..fcc3c938c80 --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/url.py @@ -0,0 +1,84 @@ +import re +from pathlib import Path +from urllib.parse import urlencode, urlparse + +from typing_extensions import Self + +from syft_core.types import PathLike, to_path +from syft_core.workspace import SyftWorkspace + + +class SyftBoxURL(str): + def __new__(cls, url: str): + instance = super().__new__(cls, url) + if not cls.is_valid(url): + raise ValueError(f"Invalid SyftBoxURL: {url}") + instance.parsed = urlparse(url) + return instance + + @classmethod + def is_valid(cls, url: str) -> bool: + """Validates the given URL matches the syft:// protocol and email-based schema.""" + pattern = r"^syft://([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)(/.*)?$" + return bool(re.match(pattern, url)) + + @property + def protocol(self) -> str: + """Returns the protocol (syft://).""" + return self.parsed.scheme + "://" + + @property + def host(self) -> str: + """Returns the host, which is the email part.""" + return self.parsed.netloc + + @property + def path(self) -> str: + """Returns the path component after the email.""" + return self.parsed.path + + def to_local_path(self, datasites_path: PathLike) -> Path: + """ + Converts the SyftBoxURL to a local file system path. + + Args: + datasites_path (Path): Base directory for datasites. + + Returns: + Path: Local file system path. + """ + # Remove the protocol and prepend the datasites_path + local_path = to_path(datasites_path) / self.host / self.path.lstrip("/") + return local_path.resolve() + + def as_http_params(self) -> dict[str, str]: + return { + "method": "get", + "datasite": self.host, + "path": self.path, + } + + def to_http_get(self, rpc_url: str) -> str: + rpc_url = rpc_url.split("//")[-1] + params = self.as_http_params() + url_params = urlencode(params) + http_url = f"http://{rpc_url}?{url_params}" + return http_url + + @classmethod + def from_path(cls, path: PathLike, workspace: SyftWorkspace) -> Self: + rel_path = to_path(path).relative_to(workspace.datasites) + return cls(f"syft://{rel_path}") + + +if __name__ == "__main__": + syftbox_url = SyftBoxURL("syft://info@domain.com/datasite1") + print(syftbox_url.parsed) + print(syftbox_url.to_local_path(Path("~/SyftBox/datasites"))) + print(syftbox_url.as_http_params()) + print( + SyftBoxURL.from_path( + "~/SyftBox/datasites/test@openmined.org/public/some/path", + SyftWorkspace(Path("~/SyftBox")), + ) + ) diff --git a/packages/syft-extras/packages/syft-core/syft_core/workspace.py b/packages/syft-extras/packages/syft-core/syft_core/workspace.py new file mode 100644 index 00000000000..91bd1710cbf --- /dev/null +++ b/packages/syft-extras/packages/syft-core/syft_core/workspace.py @@ -0,0 +1,40 @@ +from syft_core.types import PathLike, to_path + + +class SyftWorkspace: + """ + A Syft workspace is a directory structure for everything stored by the client. + Each workspace is expected to be unique for a client. + + ```txt + data_dir/ + ├── apis/ <-- installed apis + ├── plugins/ <-- plugins data + └── datasites/ <-- synced datasites + ├── user1@openmined.org/ + │ └── api_data/ + └── user2@openmined.org/ + └── api_data/ + ``` + """ + + def __init__(self, data_dir: PathLike): + self.data_dir = to_path(data_dir) + """Path to the root directory of the workspace.""" + + # datasites dir + self.datasites = self.data_dir / "datasites" + """Path to the directory containing datasites.""" + + # plugins dir + """Path to the directory containing plugins.""" + self.plugins = self.data_dir / "plugins" + + # apps/apis dir + self.apps = self.data_dir / "apis" + """Path to the directory containing apps.""" + + def mkdirs(self) -> None: + self.datasites.mkdir(parents=True, exist_ok=True) + self.plugins.mkdir(parents=True, exist_ok=True) + self.apps.mkdir(parents=True, exist_ok=True) diff --git a/packages/syft-extras/packages/syft-event/README.md b/packages/syft-extras/packages/syft-event/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-event/pyproject.toml b/packages/syft-extras/packages/syft-event/pyproject.toml new file mode 100644 index 00000000000..0e8a2dc4b4e --- /dev/null +++ b/packages/syft-extras/packages/syft-event/pyproject.toml @@ -0,0 +1,20 @@ +[project] +name = "syft-event" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = [ + "syft-rpc", + "pathspec>=0.12.1", + "pydantic>=2.10.4", + "watchdog>=6.0.0", + "loguru>=0.7.3", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv.sources] +syft-rpc = { workspace = true } diff --git a/packages/syft-extras/packages/syft-event/syft_event/__init__.py b/packages/syft-extras/packages/syft-event/syft_event/__init__.py new file mode 100644 index 00000000000..8a6e4c36d41 --- /dev/null +++ b/packages/syft-extras/packages/syft-event/syft_event/__init__.py @@ -0,0 +1,5 @@ +from .server2 import SyftEvents +from .types import Request, Response + +__version__ = "0.1.0" +__all__ = ["SyftEvents", "Request", "Response"] diff --git a/packages/syft-extras/packages/syft-event/syft_event/deps.py b/packages/syft-extras/packages/syft-event/syft_event/deps.py new file mode 100644 index 00000000000..5b831db0583 --- /dev/null +++ b/packages/syft-extras/packages/syft-event/syft_event/deps.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import inspect +import json +from dataclasses import is_dataclass + +from pydantic import BaseModel +from syft_rpc.protocol import SyftRequest +from typing_extensions import Any, Callable, Dict, get_type_hints + +from syft_event.types import Request + + +def func_args_from_request(func: Callable, request: SyftRequest) -> Dict[str, Any]: + """Extract dependencies based on function type hints""" + + type_hints = get_type_hints(func) + sig = inspect.signature(func) + kwargs = {} + + for pname, _ in sig.parameters.items(): + ptype = type_hints.get(pname, Any) + + if inspect.isclass(ptype) and ptype is Request: + kwargs[pname] = Request( + id=str(request.id), + sender=request.sender, + url=request.url, + headers=request.headers, + body=request.body, + ) + elif is_dataclass(ptype): + kwargs[pname] = ptype(**request.json()) + elif inspect.isclass(ptype) and issubclass(ptype, BaseModel): + kwargs[pname] = request.model(ptype) + elif ptype is dict: + val = json.loads(request.body.decode()) if request.body else None + kwargs[pname] = val + elif ptype is str: + # Default to injecting body for unknown types + kwargs[pname] = request.text() + else: + raise ValueError(f"Unknown type {ptype} for parameter {pname}") + + return kwargs diff --git a/packages/syft-extras/packages/syft-event/syft_event/handlers.py b/packages/syft-extras/packages/syft-event/syft_event/handlers.py new file mode 100644 index 00000000000..20f09f34863 --- /dev/null +++ b/packages/syft-extras/packages/syft-event/syft_event/handlers.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from loguru import logger +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPattern +from typing_extensions import Callable, List +from watchdog.events import FileSystemEvent, FileSystemEventHandler + +__all__ = ["RpcRequestHandler", "AnyPatternHandler"] + + +class PatternMatchingHandler(FileSystemEventHandler): + def __init__(self, patterns: List[str], ignore_directory: bool = True): + self.spec = PathSpec.from_lines(GitWildMatchPattern, patterns) + self.patterns = patterns + self.ignore_directory = ignore_directory + + def dispatch(self, event: FileSystemEvent) -> None: + if self.ignore_directory and event.is_directory: + return + if self.spec.match_file(event.src_path): + super().dispatch(event) + + +class RpcRequestHandler(PatternMatchingHandler): + def __init__(self, handler: Callable[[FileSystemEvent], None]): + super().__init__(patterns=["**/*.request"]) + self.handler = handler + + def on_any_event(self, event: FileSystemEvent): + logger.debug(f"FSEvent - {event.event_type} - {event.src_path}") + self.handler(event) + + +class AnyPatternHandler(PatternMatchingHandler): + def __init__(self, patterns: List[str], handler: Callable[[FileSystemEvent], None]): + super().__init__(patterns) + self.handler = handler + + def on_any_event(self, event: FileSystemEvent): + self.handler(event) diff --git a/packages/syft-extras/packages/syft-event/syft_event/py.typed b/packages/syft-extras/packages/syft-event/syft_event/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-event/syft_event/schema.py b/packages/syft-extras/packages/syft-event/syft_event/schema.py new file mode 100644 index 00000000000..92fe0e1acd5 --- /dev/null +++ b/packages/syft-extras/packages/syft-event/syft_event/schema.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import inspect +from inspect import signature + +from pydantic import BaseModel +from typing_extensions import Any, Callable, Dict, Union, get_type_hints + +from syft_event.types import Request, Response + + +def get_type_schema(type_hint: Any) -> Union[str, Dict[str, Any]]: + """Get a schema representation of a type.""" + # Handle None + if type_hint is None: + return "null" + + # Handle Pydantic models + if isinstance(type_hint, type) and issubclass(type_hint, BaseModel): + return { + "type": "model", + "name": type_hint.__name__, + "schema": type_hint.model_json_schema(), + } + + # Handle Lists + if getattr(type_hint, "__origin__", None) is list: + return {"type": "array", "items": get_type_schema(type_hint.__args__[0])} + + # Handle Optional + if getattr(type_hint, "__origin__", None) is Union: + types = [t for t in type_hint.__args__ if t is not type(None)] + if len(types) == 1: # Optional[T] case + return get_type_schema(types[0]) + return "union" # General Union case + + # Handle basic types + if isinstance(type_hint, type): + return type_hint.__name__.lower() + + return "any" + + +def generate_schema(func: Callable) -> Dict[str, Any]: + """Generate RPC schema from a function.""" + sig = signature(func) + hints = get_type_hints(func) + + # Process parameters + params = {} + for name, param in sig.parameters.items(): + ptype = hints.get(name, Any) + if inspect.isclass(ptype) and ptype is Request: + continue + params[name] = { + "type": get_type_schema(ptype), + "required": param.default is param.empty, + } + + # Process return type + ret_ptype = hints.get("return", Any) + if inspect.isclass(ret_ptype) and ret_ptype is Response: + # could be anything what the dev wants + ret_ptype = Any + + return { + "description": inspect.getdoc(func), + "args": params, + "returns": get_type_schema(ret_ptype), + } diff --git a/packages/syft-extras/packages/syft-event/syft_event/server2.py b/packages/syft-extras/packages/syft-event/syft_event/server2.py new file mode 100644 index 00000000000..745ccb19525 --- /dev/null +++ b/packages/syft-extras/packages/syft-event/syft_event/server2.py @@ -0,0 +1,314 @@ +from __future__ import annotations + +import json +from pathlib import Path +from threading import Event + +from loguru import logger +from pydantic import BaseModel +from syft_core import Client +from syft_rpc import rpc +from syft_rpc.protocol import SyftRequest, SyftStatus +from typing_extensions import Callable, List, Optional, Type, Union +from watchdog.events import FileCreatedEvent, FileModifiedEvent, FileSystemEvent +from watchdog.observers import Observer + +from syft_event.deps import func_args_from_request +from syft_event.handlers import AnyPatternHandler, RpcRequestHandler +from syft_event.schema import generate_schema +from syft_event.types import Response + +DEFAULT_WATCH_EVENTS: List[Type[FileSystemEvent]] = [ + FileCreatedEvent, + FileModifiedEvent, +] +PERMS = """ +- path: 'syftperm.yaml' + user: '*' + permissions: + - read +- path: 'rpc.schema.json' + user: '*' + permissions: + - read +- path: '**/*.request' + user: '*' + permissions: + - admin +- path: '**/*.response' + user: '*' + permissions: + - admin +""" + + +class SyftEvents: + def __init__( + self, + app_name: str, + publish_schema: bool = True, + client: Optional[Client] = None, + ): + self.app_name = app_name + self.schema = publish_schema + self.client = client or Client.load() + self.app_dir = self.client.api_data(self.app_name) + self.app_rpc_dir = self.app_dir / "rpc" + self.obs = Observer() + self.__rpc: dict[Path, Callable] = {} + self._stop_event = Event() + + def start(self) -> None: + # setup dirs + self.app_dir.mkdir(exist_ok=True, parents=True) + self.app_rpc_dir.mkdir(exist_ok=True, parents=True) + + # write perms + perms = self.app_rpc_dir / "syftperm.yaml" + perms.write_text(PERMS) + + # publish schema + if self.schema: + self.publish_schema() + + # process pending requests + try: + self.process_pending_requests() + except Exception as e: + print("Error processing pending requests", e) + raise + + # start Observer + self.obs.start() + + def publish_schema(self) -> None: + schema = {} + for endpoint, handler in self.__rpc.items(): + handler_schema = generate_schema(handler) + ep_name = endpoint.relative_to(self.app_rpc_dir) + ep_name = "/" + str(ep_name).replace("\\", "/") + schema[ep_name] = handler_schema + + schema_path = self.app_rpc_dir / "rpc.schema.json" + schema_path.write_text(json.dumps(schema, indent=2)) + logger.info(f"Published schema to {schema_path}") + + def process_pending_requests(self) -> None: + # process all pending requests + for path in self.app_rpc_dir.glob("**/*.request"): + if path.with_suffix(".response").exists(): + continue + if path.parent in self.__rpc: + handler = self.__rpc[path.parent] + logger.debug(f"Processing pending request {path.name}") + self.__handle_rpc(path, handler) + + def run_forever(self) -> None: + logger.info(f"Started watching for files. RPC Directory = {self.app_rpc_dir}") + self.start() + try: + while not self._stop_event.is_set(): + self._stop_event.wait(timeout=5) + except KeyboardInterrupt: + pass + except Exception as e: + logger.error(f"Error in event loop: {e}") + raise + finally: + self.stop() + + def stop(self) -> None: + logger.debug("Stopping event loop") + self._stop_event.set() + self.obs.stop() + self.obs.join() + + def on_request(self, endpoint: str) -> Callable: + """Bind function to RPC requests at an endpoint""" + + def register_rpc(func): + epath = self.__to_endpoint_path(endpoint) + self.__register_rpc(epath, func) + logger.info(f"Register RPC: {endpoint}") + return func + + return register_rpc + + def watch( + self, + glob_path: Union[str, List[str]], + event_filter: List[Type[FileSystemEvent]] = DEFAULT_WATCH_EVENTS, + ): + """Invoke the handler if any file changes in the glob path""" + + if not isinstance(glob_path, list): + glob_path = [glob_path] + + globs = [self.__format_glob(path) for path in glob_path] + + def register_watch(func): + def watch_cb(event): + return func(event) + + self.obs.schedule( + # use raw path for glob which will be convert to path/*.request + AnyPatternHandler(globs, watch_cb), + path=str(self.client.datasites), + recursive=True, + event_filter=event_filter, + ) + logger.info(f"Register Watch: {globs}") + return watch_cb + + return register_watch + + def __handle_rpc(self, path: Path, func: Callable): + try: + # may happen =) + if not path.exists(): + return + try: + req = SyftRequest.load(path) + except Exception as e: + logger.error(f"Error loading request {path}", e) + rpc.write_response( + path, + body=f"Error loading request: {repr(e)}", + status_code=SyftStatus.SYFT_400_BAD_REQUEST, + client=self.client, + ) + return + + if req.is_expired: + logger.debug(f"Request expired: {req}") + rpc.reply_to( + req, + body="Request expired", + status_code=SyftStatus.SYFT_419_EXPIRED, + client=self.client, + ) + return + + try: + kwargs = func_args_from_request(func, req) + except Exception as e: + logger.warning(f"Invalid request body schema {req.url}: {e}") + rpc.reply_to( + req, + body=f"Invalid request schema: {str(e)}", + status_code=SyftStatus.SYFT_400_BAD_REQUEST, + client=self.client, + ) + return + + # call the function + resp = func(**kwargs) + + if isinstance(resp, Response): + resp_data = resp.body + resp_code = SyftStatus(resp.status_code) + resp_headers = resp.headers + else: + resp_data = resp + resp_code = SyftStatus.SYFT_200_OK + resp_headers = {} + + rpc.reply_to( + req, + body=resp_data, + headers=resp_headers, + status_code=resp_code, + client=self.client, + ) + except Exception as e: + logger.error(f"Error handling request {path}: {e}") + raise + + def __register_rpc(self, endpoint: Path, handler: Callable) -> Callable: + def rpc_callback(event: FileSystemEvent): + return self.__handle_rpc(Path(event.src_path), handler) + + self.obs.schedule( + RpcRequestHandler(rpc_callback), + path=str(endpoint), + recursive=True, + event_filter=[FileCreatedEvent], + ) + # this is used for processing pending requests + generating schema + self.__rpc[endpoint] = handler + return rpc_callback + + def __to_endpoint_path(self, endpoint: str) -> Path: + if "*" in endpoint or "?" in endpoint: + raise ValueError("wildcards are not allowed in path") + + # this path must exist so that watch can emit events + epath = self.app_rpc_dir / endpoint.lstrip("/").rstrip("/") + epath.mkdir(exist_ok=True, parents=True) + return epath + + def __format_glob(self, path: str) -> str: + # replace placeholders with actual values + path = path.format( + email=self.client.email, + datasite=self.client.email, + api_data=self.client.api_data(self.app_name), + ) + if not path.startswith("**/"): + path = f"**/{path}" + return path + + +if __name__ == "__main__": + box = SyftEvents("test_app") + + # requests are always bound to the app + # root path = {datasite}/api_data/{app_name}/rpc + @box.on_request("/endpoint") + def endpoint_request(req): + print("rpc /endpoint:", req) + + # requests are always bound to the app + # root path = {datasite}/api_data/{app_name}/rpc + @box.on_request("/another") + def another_request(req): + print("rpc /another: ", req) + + # root path = ~/SyftBox/datasites/ + @box.watch("{datasite}/**/*.json") + def all_json_on_my_datasite(event): + print("watch {datasite}/**/*.json:".format(datasite=box.client.email), event) + + # root path = ~/SyftBox/datasites/ + @box.watch("test@openined.org/*.json") + def jsons_in_some_datasite(event): + print("watch test@openined.org/*.json:", event) + + # root path = ~/SyftBox/datasites/ + @box.watch("**/*.json") + def all_jsons_everywhere(event): + print("watch **/*.json:", event) + + print("Running rpc server for", box.app_rpc_dir) + box.publish_schema() + box.run_forever() + + +# if __name__ == "__main__": +# box = SyftEvents("vector_store") + +# # requests are always bound to the app +# # root path = {datasite}/api_data/{app_name}/rpc +# @box.on_request("/doc_query") +# def query(query: str) -> list[str]: +# """Return similar documents for a given query""" +# return [] + +# @box.on_request("/doc_similarity") +# def query_embedding(embedding: np.array) -> np.array: +# """Return similar documents for a given embedding""" +# return [] + +# print("Running rpc server for", box.app_rpc_dir) +# box.publish_schema() +# box.run_forever() diff --git a/packages/syft-extras/packages/syft-event/syft_event/types.py b/packages/syft-extras/packages/syft-event/syft_event/types.py new file mode 100644 index 00000000000..7c2045c0a22 --- /dev/null +++ b/packages/syft-extras/packages/syft-event/syft_event/types.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from pydantic import BaseModel, Field +from typing_extensions import Any, Dict, Optional + + +class Request(BaseModel): + id: str + sender: str + url: str + headers: Dict[str, str] = Field(default_factory=dict) + body: Optional[bytes] + + +class Response(BaseModel): + body: Any = None + status_code: int = 200 + headers: Optional[Dict[str, str]] = None diff --git a/packages/syft-extras/packages/syft-proxy/README.md b/packages/syft-extras/packages/syft-proxy/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-proxy/pyproject.toml b/packages/syft-extras/packages/syft-proxy/pyproject.toml new file mode 100644 index 00000000000..66a9c8b61b8 --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/pyproject.toml @@ -0,0 +1,31 @@ +[project] +name = "syft-proxy" +version = "0.1.0" +description = "A local HTTP proxy for executing Syft RPC methods, facilitating seamless communication and computation in distributed machine learning environments." +requires-python = ">=3.9" # same with SyftBox +dependencies = [ + "fastapi>=0.115.8", + "uvicorn>=0.34.0", + "loguru>=0.7.3", + "typing-extensions>=4.12.2", + "syft-rpc", + "pytest>=8.3.4", + "httpx>=0.28.1", + "typer>=0.9.0", +] + +[project.scripts] +syft_proxy = "syft_proxy.cli:app" + +[tool.uv.sources] +syft-rpc = { workspace = true } + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.ruff.lint] +extend-select = ["I"] + +[tool.ruff.lint.per-file-ignores] +"**/__init__.py" = ["F401"] diff --git a/packages/syft-extras/packages/syft-proxy/syft_proxy/__init__.py b/packages/syft-extras/packages/syft-proxy/syft_proxy/__init__.py new file mode 100644 index 00000000000..3dc1f76bc69 --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/syft_proxy/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/packages/syft-extras/packages/syft-proxy/syft_proxy/cli.py b/packages/syft-extras/packages/syft-proxy/syft_proxy/cli.py new file mode 100644 index 00000000000..7b4c3a7ee0e --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/syft_proxy/cli.py @@ -0,0 +1,128 @@ +import os +import shutil +import subprocess +from typing import Tuple + +import typer +import uvicorn + +__version__ = "0.1.0" + +# Constants +DEFAULT_HOST = "127.0.0.1" +DEFAULT_PORT = 9081 +CERT_DIR = "./certs" +PROXY_DOMAIN = "syftbox.localhost" + +app = typer.Typer( + help="Syft Proxy Server CLI", + add_completion=False, + no_args_is_help=True, +) + + +@app.command() +def start(reload: bool = False) -> None: + """Start the Syft Proxy Server.""" + + cert_path, key_path = setup_https_certs() + + typer.echo(f"Starting Syft Proxy Server on https://{PROXY_DOMAIN}:{DEFAULT_PORT}") + uvicorn.run( + "syft_proxy.server:app", + host=DEFAULT_HOST, + port=DEFAULT_PORT, + ssl_certfile=cert_path, + ssl_keyfile=key_path, + timeout_graceful_shutdown=5, + timeout_keep_alive=10, + workers=1, + reload=reload, + ) + + +@app.command(name="bootstrap") +def bootstrap() -> None: + """Initialize certificate chain and hosts file entries.""" + try: + setup_cert_chain() + typer.echo("✅ Setup self-signed cert chain") + update_hosts_file() + typer.echo("✅ Updated hosts file") + typer.echo("✅ Bootstrap completed successfully") + except Exception as e: + typer.echo(f"❌ Bootstrap failed: {e}", err=True) + raise typer.Exit(1) + + +def setup_cert_chain(): + try: + subprocess.run( + ["mkcert", "-install"], + check=True, + capture_output=True, + text=True, + ) + except subprocess.CalledProcessError as e: + typer.echo(f"❌ Failed to generate certificates: {e.stderr}") + raise typer.Exit(1) + + +def setup_https_certs() -> Tuple[str, str]: + """Generate HTTPS certificates using mkcert. + + Returns: + Tuple[str, str]: Paths to the certificate and key files + + Raises: + RuntimeError: If certificate generation fails + """ + + shutil.rmtree(CERT_DIR, ignore_errors=True) + os.makedirs(CERT_DIR, exist_ok=True) + cert_path = f"{CERT_DIR}/cert.pem" + key_path = f"{CERT_DIR}/cert.key" + + try: + subprocess.run( + [ + "mkcert", + "-install", + "-cert-file", + cert_path, + "-key-file", + key_path, + PROXY_DOMAIN, + ], + check=True, + capture_output=True, + text=True, + ) + return (cert_path, key_path) + except subprocess.CalledProcessError as e: + raise RuntimeError("Failed to generate HTTPS certificates") from e + + +def update_hosts_file() -> None: + """Add syftbox.localhost entry to the hosts file. + + Raises: + RuntimeError: If unable to modify the hosts file + """ + hosts_path = ( + "/etc/hosts" if os.name != "nt" else r"C:\Windows\System32\drivers\etc\hosts" + ) + entry = f"{DEFAULT_HOST} {PROXY_DOMAIN}" + + try: + with open(hosts_path, "r") as f: + content = f.read() + if entry in content: + return + + with open(hosts_path, "a") as f: + f.write(f"\n{entry}\n") + except PermissionError: + raise RuntimeError("Insufficient permissions to modify hosts file") + except Exception as e: + raise RuntimeError("Failed to update hosts file") from e diff --git a/packages/syft-extras/packages/syft-proxy/syft_proxy/models.py b/packages/syft-extras/packages/syft-proxy/syft_proxy/models.py new file mode 100644 index 00000000000..afbfd882f1b --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/syft_proxy/models.py @@ -0,0 +1,82 @@ +from enum import Enum + +from pydantic import BaseModel, Field +from syft_rpc import SyftRequest, SyftResponse +from syft_rpc.rpc import DEFAULT_EXPIRY +from typing_extensions import Any, List, Optional, Union + + +class RPCRequestBase(BaseModel): + body: Any + headers: dict[str, str] = Field(default_factory=dict) + expiry: str = Field(default=DEFAULT_EXPIRY) + cache: bool = Field(default=False) + + +class RPCSendRequest(RPCRequestBase): + app_name: str = Field(..., min_length=3) + url: str + + # @field_validator("url", mode="after") + # def validate_url(cls, v): + # """ + # Validates the URL to ensure it starts with the required scheme. + + # Args: + # cls: The class that this method belongs to. + # v: The URL string to validate. + + # Raises: + # ValueError: If the URL does not start with "syft://". + + # Returns: + # The validated URL string if it is valid. + # """ + # if not v.startswith("syft://"): + # raise ValueError('URL must start with "syft://"') + # return v + + +class RPCBroadcastRequest(RPCRequestBase): + urls: List[str] + + # @field_validator("urls", mode="after") + # def validate_urls(cls, v): + # """ + # Validates the list of URLs to ensure it is not empty and that each URL starts with the required scheme. + + # Args: + # cls: The class that this method belongs to. + # v: The list of URL strings to validate. + + # Raises: + # ValueError: If the list of URLs is empty or if any URL does not start with "syft://". + + # Returns: + # The validated list of URL strings if all are valid. + # """ + # if not v: + # raise ValueError("The list of URLs must not be empty") + # for url in v: + # if not url.startswith("syft://"): + # raise ValueError(f'URL "{url}" must start with "syft://"') + # return v + + +class RPCBroadcastResult(BaseModel): + id: str + requests: List[SyftRequest] + + +class RPCStatusCode(Enum): + NOT_FOUND = "RPC_NOT_FOUND" + PENDING = "RPC_PENDING" + COMPLETED = "RPC_COMPLETED" + ERROR = "RPC_ERROR" + + +class RPCStatus(BaseModel): + id: str + status: RPCStatusCode + request: Optional[Union[SyftRequest, List[SyftRequest]]] + response: Optional[Union[SyftResponse, List[SyftResponse]]] diff --git a/packages/syft-extras/packages/syft-proxy/syft_proxy/server.py b/packages/syft-extras/packages/syft-proxy/syft_proxy/server.py new file mode 100644 index 00000000000..0ad567e7b2e --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/syft_proxy/server.py @@ -0,0 +1,177 @@ +import json +from typing import Optional + +from fastapi import FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse, PlainTextResponse +from loguru import logger +from syft_core import Client +from syft_rpc import rpc, rpc_db +from syft_rpc.protocol import SyftFuture, SyftResponse + +from syft_proxy.cli import __version__ +from syft_proxy.models import ( + RPCSendRequest, + RPCStatus, + RPCStatusCode, +) + +HEADER_APP_NAME = "x-app-name" +HEADER_SYFTBOX_URL = "x-syftbox-url" +HEADER_SYFTBOX_URLS = "x-syftbox-urls" +RPC_REQUEST_EXPIRY = "30s" + +client = Client.load() + +app = FastAPI() + +app.add_middleware( + CORSMiddleware, + allow_credentials=True, + allow_origins=["*"], # Allows all origins + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + + +ascii_art = rf""" + ____ __ _ ____ +/ ___| _ _ / _| |_| __ ) _____ __ +\___ \| | | | |_| __| _ \ / _ \ \/ / + ___) | |_| | _| |_| |_) | (_) > < +|____/ \__, |_| \__|____/ \___/_/\_\ + |___/ {__version__:>17} + +SyftBox HTTP Proxy +""" + + +@app.get("/", response_class=PlainTextResponse) +async def index(): + return ascii_art + + +@app.get("/info") +async def info(): + return {"version": __version__} + + +@app.post("/rpc") +async def rpc_send(rpc_req: RPCSendRequest, blocking: bool = False): + try: + future: SyftFuture = rpc.send( + client=client, + url=rpc_req.url, + headers=rpc_req.headers, + body=rpc_req.body, + expiry=rpc_req.expiry, + ) + + if not blocking: + logger.info( + f"Non-blocking RPC request {future.id} sent to {future.request.url}" + ) + app_name = f"proxy-{rpc_req.app_name}" + rpc_db.save_future(future, app_name) + return RPCStatus( + id=str(future.id), + status=RPCStatusCode.PENDING, + request=future.request, + response=None, + ).model_dump(mode="json") + else: + logger.info( + f"Blocking RPC request {future.id} sent to {future.request.url}" + ) + result: SyftResponse = future.wait() + return JSONResponse( + status_code=int(result.status_code), + content=RPCStatus( + id=str(result.id), + status=RPCStatusCode.COMPLETED, + request=future.request, + response=result, + ).model_dump(mode="json"), + ) + except Exception as ex: + logger.error(f"Error sending RPC request: {ex}") + raise HTTPException(status_code=500, detail=str(ex)) + + +@app.get("/rpc/schema/{app_name}") +async def rpc_schema(app_name: str): + try: + app_path = client.api_data(app_name) + app_schema = app_path / "rpc" / "rpc.schema.json" + return json.loads(app_schema.read_text()) + except Exception as ex: + logger.error(f"Error sending RPC request: {ex}") + raise HTTPException(status_code=500, detail=str(ex)) + + +@app.get("/rpc/status/{id}") +async def rpc_status(id: str): + # try to get future from the db + try: + future = rpc_db.get_future(id) + except Exception as ex: + logger.error(f"RPC future {id}: EXCEPTION {ex}") + raise HTTPException(status_code=500, detail=str(ex)) + + if not future: + logger.info(f"RPC future {id}: NOT FOUND") + return JSONResponse( + status_code=404, + content=RPCStatus( + id=id, + status=RPCStatusCode.NOT_FOUND, + request=None, + response=None, + ).model_dump(mode="json"), + ) + + logger.info(f"RPC future {id}: FOUND") + result: Optional[SyftResponse] = future.resolve() + + if result is None: + logger.info(f"RPC future {id}: PENDING") + return JSONResponse( + content=RPCStatus( + id=id, + status=RPCStatusCode.PENDING, + request=future.request, + response=None, + ).model_dump(mode="json") + ) + elif not result.is_success: + logger.info(f"RPC future {id}: ERROR") + rpc_db.delete_future(id) + return JSONResponse( + status_code=int(result.status_code), + headers=result.headers, + content=RPCStatus( + id=id, + status=RPCStatusCode.ERROR, + request=future.request, + response=result, + ).model_dump(mode="json"), + ) + else: + logger.info(f"RPC future {id}: COMPLETED") + rpc_db.delete_future(id) + logger.debug(result.json()) + return JSONResponse( + status_code=int(result.status_code), + headers=result.headers, + content=RPCStatus( + id=id, + status=RPCStatusCode.COMPLETED, + request=future.request, + response=result, + ).model_dump(mode="json"), + ) + + +# @app.post("/llm/chat") +# async def chat(request: Request): +# return JSONResponse(result) diff --git a/packages/syft-extras/packages/syft-proxy/tests/conftest.py b/packages/syft-extras/packages/syft-proxy/tests/conftest.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-proxy/tests/test_models.py b/packages/syft-extras/packages/syft-proxy/tests/test_models.py new file mode 100644 index 00000000000..9bf26a81e9b --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/tests/test_models.py @@ -0,0 +1,120 @@ +import pytest +from syft_rpc import SyftRequest + +from syft_proxy.models import ( + RPCBroadcastRequest, + RPCSendRequest, + RPCStatus, + RPCStatusCode, +) + + +def test_rpc_send_request_valid(): + """Test the creation of a valid RPCSendRequest.""" + request = RPCSendRequest( + app_name="test_app", + url="syft://user@openmined.org/datasite/public", + body={"data": "test"}, + ) + assert request.app_name == "test_app" + assert request.url == "syft://user@openmined.org/datasite/public" + assert request.body == {"data": "test"} + + +def test_rpc_send_request_missing_app_name(): + """Test that RPCSendRequest raises a ValueError when the app_name is missing.""" + with pytest.raises(ValueError): + RPCSendRequest( + app_name="", + url="syft://user@openmined.org/datasite/public", + body={"data": "test"}, + ) + + +def test_rpc_send_request_invalid_headers(): + """Test that RPCSendRequest raises a ValueError when headers are invalid. + + This test checks the behavior of the RPCSendRequest class when + the headers parameter is provided with invalid data types. + Specifically, it ensures that a ValueError is raised when + the headers dictionary contains non-string keys. + """ + with pytest.raises(ValueError): + RPCSendRequest( + app_name="test_app", + url="syft://user@openmined.org/datasite/public", + body={"data": "test"}, + headers={123: "value"}, + ) + + +def test_rpc_broadcast_request_valid(): + """Test the creation of a valid RPCBroadcastRequest. + + This test verifies that an RPCBroadcastRequest can be created with + a list of valid URLs and a body. It asserts that the length of the + URLs in the broadcast request matches the expected number of URLs. + """ + broadcast_request = RPCBroadcastRequest( + urls=[ + "syft://user1@openmined.org/datasite/public", + "syft://user2@openmined.org/datasite/public", + ], + body={"data": "test"}, + ) + assert len(broadcast_request.urls) == 2 + + +def test_rpc_broadcast_request_empty_urls(): + """Test that RPCBroadcastRequest raises a ValueError when the URLs list is empty. + + This test checks the behavior of the RPCBroadcastRequest class when + an empty list is provided for the URLs parameter. It ensures that + a ValueError is raised, indicating that at least one URL must be + provided for the broadcast request to be valid. + """ + with pytest.raises(ValueError): + RPCBroadcastRequest(urls=[], body={"data": "test"}) + + +def test_rpc_status_valid(): + """Test the creation of a valid RPCStatus. + + This test verifies that an RPCStatus can be created with a valid + ID, status code, and request. It asserts that the ID and status + of the created RPCStatus match the expected values. + """ + status = RPCStatus( + id="1", + status=RPCStatusCode.PENDING, + request=SyftRequest( + sender="user@openmined.org", + url="syft://user@openmined.org/datasite/public", + ), + response=None, + ) + assert status.id == "1" + assert status.status == RPCStatusCode.PENDING + + +def test_rpc_status_invalid_code(): + """Test that RPCStatus raises a ValueError when an invalid status code is provided. + + This test checks the behavior of the RPCStatus class when an + invalid status code is passed. It ensures that a ValueError is + raised, indicating that the status code must be valid. + """ + with pytest.raises(ValueError): + RPCStatus(id="1", status="INVALID_CODE", request=None, response=None) + + +def test_rpc_status_missing_fields(): + """Test that RPCStatus raises a ValueError when required fields are missing. + + This test verifies that the RPCStatus class raises a ValueError + when the ID is empty and a valid status code is provided. It + ensures that all required fields must be present for the RPCStatus + to be valid. + """ + with pytest.raises(ValueError): + RPCStatus(id="", status=RPCStatusCode.NOT_FOUND, request=None, response=None) diff --git a/packages/syft-extras/packages/syft-proxy/tests/test_server.py b/packages/syft-extras/packages/syft-proxy/tests/test_server.py new file mode 100644 index 00000000000..4d6068e0311 --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/tests/test_server.py @@ -0,0 +1,115 @@ +import json +import os + +from fastapi.testclient import TestClient +from syft_core import Client + +from syft_proxy.models import RPCSendRequest +from syft_proxy.server import app + +syft_client = Client.load() + +client = TestClient(app) + + +# Workflow Tests +def test_index_endpoint(): + """Test the index endpoint to ensure it returns a 200 status code and contains the expected text.""" + response = client.get("/") + assert response.status_code == 200 + assert "SyftBox HTTP Proxy" in response.text + + +def test_rpc_send_non_blocking(): + """Test sending a non-blocking RPC request and verify the response status and status message.""" + rpc_req = RPCSendRequest( + url="syft://user@openmined.org", + headers={}, + body={}, + expiry="30s", + app_name="test_app", + ) + response = client.post( + "/rpc", json=rpc_req.model_dump(), params={"blocking": False} + ) + assert response.status_code == 200 + assert response.json()["status"] == "RPC_PENDING" + + +def test_rpc_send_blocking(): + """Test sending a blocking RPC request and verify the response status and ID presence.""" + rpc_req = RPCSendRequest( + url="syft://user@openmined.org", + headers={}, + body={}, + expiry="1s", + app_name="test_app", + ) + response = client.post("/rpc", json=rpc_req.model_dump(), params={"blocking": True}) + assert response.status_code in [200, 419] + assert isinstance(response.json(), dict) + assert response.json().get("id", None) is not None + + +def test_rpc_schema(): + """Test the RPC schema endpoint to ensure it returns the correct schema for the specified app.""" + app_path = syft_client.api_data("test_app") + app_schema = app_path / "rpc" / "rpc.schema.json" + + os.makedirs(app_path / "rpc", exist_ok=True) + schema = { + "sender": "user@openmined.org", + "method": "GET", + "url": "syft://user1@openmined.org", + } + if not os.path.isfile(app_schema): + with open(app_schema, "w") as f: + f.write(json.dumps(schema)) + + response = client.get("/rpc/schema/test_app") + assert response.status_code == 200 + assert isinstance(response.json(), dict) + assert response.json() == schema + + +def test_rpc_status_found(): + """Test the RPC status endpoint to ensure it returns a 200 status code for a valid request ID.""" + rpc_req = RPCSendRequest( + url="syft://test@openmined.org/public/rpc", + headers={"Content-Type": "application/json", "User-Agent": "MyApp/1.0"}, + body={}, + expiry="30s", + app_name="test_app", + ) + response = client.post( + "/rpc", json=rpc_req.model_dump(), params={"blocking": False} + ) + + rpc_request_id = response.json()["id"] + response = client.get(f"/rpc/status/{rpc_request_id}") + assert response.status_code == 200 + + +def test_rpc_status_not_found(): + """Test the RPC status endpoint to ensure it returns a 404 status code for a non-existent request ID.""" + response = client.get("/rpc/status/non_existent_id") + assert response.status_code == 404 + + +# Edge Case Tests +def test_rpc_send_invalid_request(): + """Test sending an invalid RPC request to ensure it returns a 422 status code due to missing required fields.""" + response = client.post("/rpc", json={}) # Missing required fields + assert response.status_code == 422 + + +def test_rpc_schema_non_existent_app(): + """Test the RPC schema endpoint to ensure it returns a 500 status code for a non-existent app.""" + response = client.get("/rpc/schema/non_existent_app") + assert response.status_code == 500 + + +def test_rpc_status_non_existent_id(): + """Test the RPC status endpoint to ensure it returns a 404 status code for a non-existent request ID.""" + response = client.get("/rpc/status/non_existent_id") + assert response.status_code == 404 diff --git a/packages/syft-extras/packages/syft-proxy/uv.lock b/packages/syft-extras/packages/syft-proxy/uv.lock new file mode 100644 index 00000000000..2dfd4f6eb54 --- /dev/null +++ b/packages/syft-extras/packages/syft-proxy/uv.lock @@ -0,0 +1,457 @@ +version = 1 +requires-python = ">=3.9" + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "anyio" +version = "4.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 }, +] + +[[package]] +name = "certifi" +version = "2025.1.31" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 }, +] + +[[package]] +name = "email-validator" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/ce/13508a1ec3f8bb981ae4ca79ea40384becc868bfae97fd1c942bb3a001b1/email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7", size = 48967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ee/bf0adb559ad3c786f12bcbc9296b3f5675f529199bef03e2df281fa1fadb/email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631", size = 33521 }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, +] + +[[package]] +name = "fastapi" +version = "0.115.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/b2/5a5dc4affdb6661dea100324e19a7721d5dc524b464fe8e366c093fd7d87/fastapi-0.115.8.tar.gz", hash = "sha256:0ce9111231720190473e222cdf0f07f7206ad7e53ea02beb1d2dc36e2f0741e9", size = 295403 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/7d/2d6ce181d7a5f51dedb8c06206cbf0ec026a99bf145edd309f9e17c3282f/fastapi-0.115.8-py3-none-any.whl", hash = "sha256:753a96dd7e036b34eeef8babdfcfe3f28ff79648f86551eb36bfc1b0bf4a8cbf", size = 94814 }, +] + +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 }, +] + +[package.optional-dependencies] +email = [ + { name = "email-validator" }, +] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/bc/fed5f74b5d802cf9a03e83f60f18864e90e3aed7223adaca5ffb7a8d8d64/pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa", size = 1895938 }, + { url = "https://files.pythonhosted.org/packages/71/2a/185aff24ce844e39abb8dd680f4e959f0006944f4a8a0ea372d9f9ae2e53/pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c", size = 1815684 }, + { url = "https://files.pythonhosted.org/packages/c3/43/fafabd3d94d159d4f1ed62e383e264f146a17dd4d48453319fd782e7979e/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a", size = 1829169 }, + { url = "https://files.pythonhosted.org/packages/a2/d1/f2dfe1a2a637ce6800b799aa086d079998959f6f1215eb4497966efd2274/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5", size = 1867227 }, + { url = "https://files.pythonhosted.org/packages/7d/39/e06fcbcc1c785daa3160ccf6c1c38fea31f5754b756e34b65f74e99780b5/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c", size = 2037695 }, + { url = "https://files.pythonhosted.org/packages/7a/67/61291ee98e07f0650eb756d44998214231f50751ba7e13f4f325d95249ab/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7", size = 2741662 }, + { url = "https://files.pythonhosted.org/packages/32/90/3b15e31b88ca39e9e626630b4c4a1f5a0dfd09076366f4219429e6786076/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a", size = 1993370 }, + { url = "https://files.pythonhosted.org/packages/ff/83/c06d333ee3a67e2e13e07794995c1535565132940715931c1c43bfc85b11/pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236", size = 1996813 }, + { url = "https://files.pythonhosted.org/packages/7c/f7/89be1c8deb6e22618a74f0ca0d933fdcb8baa254753b26b25ad3acff8f74/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962", size = 2005287 }, + { url = "https://files.pythonhosted.org/packages/b7/7d/8eb3e23206c00ef7feee17b83a4ffa0a623eb1a9d382e56e4aa46fd15ff2/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9", size = 2128414 }, + { url = "https://files.pythonhosted.org/packages/4e/99/fe80f3ff8dd71a3ea15763878d464476e6cb0a2db95ff1c5c554133b6b83/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af", size = 2155301 }, + { url = "https://files.pythonhosted.org/packages/2b/a3/e50460b9a5789ca1451b70d4f52546fa9e2b420ba3bfa6100105c0559238/pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4", size = 1816685 }, + { url = "https://files.pythonhosted.org/packages/57/4c/a8838731cb0f2c2a39d3535376466de6049034d7b239c0202a64aaa05533/pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31", size = 1982876 }, + { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421 }, + { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998 }, + { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167 }, + { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071 }, + { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244 }, + { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470 }, + { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291 }, + { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613 }, + { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355 }, + { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661 }, + { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261 }, + { url = "https://files.pythonhosted.org/packages/72/9d/a241db83f973049a1092a079272ffe2e3e82e98561ef6214ab53fe53b1c7/pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c", size = 1812361 }, + { url = "https://files.pythonhosted.org/packages/e8/ef/013f07248041b74abd48a385e2110aa3a9bbfef0fbd97d4e6d07d2f5b89a/pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc", size = 1982484 }, + { url = "https://files.pythonhosted.org/packages/10/1c/16b3a3e3398fd29dca77cea0a1d998d6bde3902fa2706985191e2313cc76/pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4", size = 1867102 }, + { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 }, + { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 }, + { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 }, + { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 }, + { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 }, + { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 }, + { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 }, + { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 }, + { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 }, + { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 }, + { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 }, + { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 }, + { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 }, + { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 }, + { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 }, + { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 }, + { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 }, + { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 }, + { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 }, + { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 }, + { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 }, + { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 }, + { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 }, + { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 }, + { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 }, + { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 }, + { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 }, + { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 }, + { url = "https://files.pythonhosted.org/packages/27/97/3aef1ddb65c5ccd6eda9050036c956ff6ecbfe66cb7eb40f280f121a5bb0/pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993", size = 1896475 }, + { url = "https://files.pythonhosted.org/packages/ad/d3/5668da70e373c9904ed2f372cb52c0b996426f302e0dee2e65634c92007d/pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308", size = 1772279 }, + { url = "https://files.pythonhosted.org/packages/8a/9e/e44b8cb0edf04a2f0a1f6425a65ee089c1d6f9c4c2dcab0209127b6fdfc2/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4", size = 1829112 }, + { url = "https://files.pythonhosted.org/packages/1c/90/1160d7ac700102effe11616e8119e268770f2a2aa5afb935f3ee6832987d/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf", size = 1866780 }, + { url = "https://files.pythonhosted.org/packages/ee/33/13983426df09a36d22c15980008f8d9c77674fc319351813b5a2739b70f3/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76", size = 2037943 }, + { url = "https://files.pythonhosted.org/packages/01/d7/ced164e376f6747e9158c89988c293cd524ab8d215ae4e185e9929655d5c/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118", size = 2740492 }, + { url = "https://files.pythonhosted.org/packages/8b/1f/3dc6e769d5b7461040778816aab2b00422427bcaa4b56cc89e9c653b2605/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630", size = 1995714 }, + { url = "https://files.pythonhosted.org/packages/07/d7/a0bd09bc39283530b3f7c27033a814ef254ba3bd0b5cfd040b7abf1fe5da/pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54", size = 1997163 }, + { url = "https://files.pythonhosted.org/packages/2d/bb/2db4ad1762e1c5699d9b857eeb41959191980de6feb054e70f93085e1bcd/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f", size = 2005217 }, + { url = "https://files.pythonhosted.org/packages/53/5f/23a5a3e7b8403f8dd8fc8a6f8b49f6b55c7d715b77dcf1f8ae919eeb5628/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362", size = 2127899 }, + { url = "https://files.pythonhosted.org/packages/c2/ae/aa38bb8dd3d89c2f1d8362dd890ee8f3b967330821d03bbe08fa01ce3766/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96", size = 2155726 }, + { url = "https://files.pythonhosted.org/packages/98/61/4f784608cc9e98f70839187117ce840480f768fed5d386f924074bf6213c/pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e", size = 1817219 }, + { url = "https://files.pythonhosted.org/packages/57/82/bb16a68e4a1a858bb3768c2c8f1ff8d8978014e16598f001ea29a25bf1d1/pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67", size = 1985382 }, + { url = "https://files.pythonhosted.org/packages/46/72/af70981a341500419e67d5cb45abe552a7c74b66326ac8877588488da1ac/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e", size = 1891159 }, + { url = "https://files.pythonhosted.org/packages/ad/3d/c5913cccdef93e0a6a95c2d057d2c2cba347815c845cda79ddd3c0f5e17d/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8", size = 1768331 }, + { url = "https://files.pythonhosted.org/packages/f6/f0/a3ae8fbee269e4934f14e2e0e00928f9346c5943174f2811193113e58252/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3", size = 1822467 }, + { url = "https://files.pythonhosted.org/packages/d7/7a/7bbf241a04e9f9ea24cd5874354a83526d639b02674648af3f350554276c/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f", size = 1979797 }, + { url = "https://files.pythonhosted.org/packages/4f/5f/4784c6107731f89e0005a92ecb8a2efeafdb55eb992b8e9d0a2be5199335/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133", size = 1987839 }, + { url = "https://files.pythonhosted.org/packages/6d/a7/61246562b651dff00de86a5f01b6e4befb518df314c54dec187a78d81c84/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc", size = 1998861 }, + { url = "https://files.pythonhosted.org/packages/86/aa/837821ecf0c022bbb74ca132e117c358321e72e7f9702d1b6a03758545e2/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50", size = 2116582 }, + { url = "https://files.pythonhosted.org/packages/81/b0/5e74656e95623cbaa0a6278d16cf15e10a51f6002e3ec126541e95c29ea3/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9", size = 2151985 }, + { url = "https://files.pythonhosted.org/packages/63/37/3e32eeb2a451fddaa3898e2163746b0cffbbdbb4740d38372db0490d67f3/pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151", size = 2004715 }, + { url = "https://files.pythonhosted.org/packages/29/0e/dcaea00c9dbd0348b723cae82b0e0c122e0fa2b43fa933e1622fd237a3ee/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656", size = 1891733 }, + { url = "https://files.pythonhosted.org/packages/86/d3/e797bba8860ce650272bda6383a9d8cad1d1c9a75a640c9d0e848076f85e/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278", size = 1768375 }, + { url = "https://files.pythonhosted.org/packages/41/f7/f847b15fb14978ca2b30262548f5fc4872b2724e90f116393eb69008299d/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb", size = 1822307 }, + { url = "https://files.pythonhosted.org/packages/9c/63/ed80ec8255b587b2f108e514dc03eed1546cd00f0af281e699797f373f38/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd", size = 1979971 }, + { url = "https://files.pythonhosted.org/packages/a9/6d/6d18308a45454a0de0e975d70171cadaf454bc7a0bf86b9c7688e313f0bb/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc", size = 1987616 }, + { url = "https://files.pythonhosted.org/packages/82/8a/05f8780f2c1081b800a7ca54c1971e291c2d07d1a50fb23c7e4aef4ed403/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b", size = 1998943 }, + { url = "https://files.pythonhosted.org/packages/5e/3e/fe5b6613d9e4c0038434396b46c5303f5ade871166900b357ada4766c5b7/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b", size = 2116654 }, + { url = "https://files.pythonhosted.org/packages/db/ad/28869f58938fad8cc84739c4e592989730bfb69b7c90a8fff138dff18e1e/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2", size = 2152292 }, + { url = "https://files.pythonhosted.org/packages/a1/0c/c5c5cd3689c32ed1fe8c5d234b079c12c281c051759770c05b8bed6412b5/pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35", size = 2004961 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + +[[package]] +name = "starlette" +version = "0.45.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/fb/2984a686808b89a6781526129a4b51266f678b2d2b97ab2d325e56116df8/starlette-0.45.3.tar.gz", hash = "sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f", size = 2574076 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/61/f2b52e107b1fc8944b33ef56bf6ac4ebbe16d91b94d2b87ce013bf63fb84/starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d", size = 71507 }, +] + +[[package]] +name = "syft-core" +version = "0.1.0" +source = { git = "https://github.com/OpenMined/syft-extras.git?subdirectory=packages%2Fsyft-core&branch=main#371ac341959ef58f655ef667689b790487cb9082" } +dependencies = [ + { name = "pydantic", extra = ["email"] }, + { name = "typing-extensions" }, +] + +[[package]] +name = "syft-proxy" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "fastapi" }, + { name = "httpx" }, + { name = "loguru" }, + { name = "pytest" }, + { name = "syft-rpc" }, + { name = "typing-extensions" }, + { name = "uvicorn" }, +] + +[package.metadata] +requires-dist = [ + { name = "fastapi", specifier = ">=0.115.8" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "loguru", specifier = ">=0.7.3" }, + { name = "pytest", specifier = ">=8.3.4" }, + { name = "syft-rpc", git = "https://github.com/OpenMined/syft-extras.git?subdirectory=packages%2Fsyft-rpc&branch=main" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, + { name = "uvicorn", specifier = ">=0.34.0" }, +] + +[[package]] +name = "syft-rpc" +version = "0.1.0" +source = { git = "https://github.com/OpenMined/syft-extras.git?subdirectory=packages%2Fsyft-rpc&branch=main#371ac341959ef58f655ef667689b790487cb9082" } +dependencies = [ + { name = "pydantic" }, + { name = "syft-core" }, + { name = "typing-extensions" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "uvicorn" +version = "0.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, +] + +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083 }, +] diff --git a/packages/syft-extras/packages/syft-rpc/README.md b/packages/syft-extras/packages/syft-rpc/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-rpc/pyproject.toml b/packages/syft-extras/packages/syft-rpc/pyproject.toml new file mode 100644 index 00000000000..d528eae73be --- /dev/null +++ b/packages/syft-extras/packages/syft-rpc/pyproject.toml @@ -0,0 +1,18 @@ +[project] +name = "syft-rpc" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = [ + "pydantic>=2.9.2", + "syft-core==0.1.0", + "typing-extensions>=4.12.2", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv.sources] +syft-core = { workspace = true } diff --git a/packages/syft-extras/packages/syft-rpc/syft_rpc/__init__.py b/packages/syft-extras/packages/syft-rpc/syft_rpc/__init__.py new file mode 100644 index 00000000000..9884633bad6 --- /dev/null +++ b/packages/syft-extras/packages/syft-rpc/syft_rpc/__init__.py @@ -0,0 +1,14 @@ +from .protocol import SyftBulkFuture, SyftFuture, SyftRequest, SyftResponse +from .rpc import broadcast, reply_to, send + +__version__ = "0.1.0" + +__all__ = [ + "broadcast", + "reply_to", + "send", + "SyftRequest", + "SyftResponse", + "SyftFuture", + "SyftBulkFuture", +] diff --git a/packages/syft-extras/packages/syft-rpc/syft_rpc/protocol.py b/packages/syft-extras/packages/syft-rpc/syft_rpc/protocol.py new file mode 100644 index 00000000000..034b1f08443 --- /dev/null +++ b/packages/syft-extras/packages/syft-rpc/syft_rpc/protocol.py @@ -0,0 +1,561 @@ +from __future__ import annotations + +import hashlib +import json +import logging +import time +from datetime import datetime, timedelta, timezone +from enum import Enum, IntEnum +from pathlib import Path +from uuid import UUID, uuid4 + +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, field_validator +from pydantic import ValidationError as PydanticValidationError +from syft_core.types import PathLike, to_path +from syft_core.url import SyftBoxURL +from typing_extensions import ( + ClassVar, + Dict, + List, + Optional, + Self, + Type, + TypeAlias, + TypeVar, + Union, +) + +logger = logging.getLogger(__name__) + +# Type aliases for better readability +JSONPrimitive: TypeAlias = Union[str, int, float, bool, None] +JSONValue: TypeAlias = Union[Dict[str, "JSONValue"], List["JSONValue"], JSONPrimitive] +JSON: TypeAlias = Union[str, bytes, bytearray] +Headers: TypeAlias = dict[str, str] +PYDANTIC = TypeVar("PYDANTIC", bound=BaseModel) + + +# Constants +DEFAULT_MESSAGE_EXPIRY: int = 60 * 60 * 24 # 1 days in seconds +DEFAULT_POLL_INTERVAL: float = 0.1 +DEFAULT_TIMEOUT: float = 300 # 5 minutes in seconds + + +def validate_syftbox_url(url: Union[SyftBoxURL, str]) -> SyftBoxURL: + if isinstance(url, str): + return SyftBoxURL(url) + if isinstance(url, SyftBoxURL): + return url + raise ValueError(f"Invalid type for url: {type(url)}. Expected str or SyftBoxURL.") + + +class SyftMethod(str, Enum): + """HTTP methods supported by the Syft protocol.""" + + GET = "GET" + HEAD = "HEAD" + POST = "POST" + PUT = "PUT" + PATCH = "PATCH" + DELETE = "DELETE" + + +class SyftStatus(IntEnum): + """Standard HTTP-like status codes for Syft responses.""" + + SYFT_200_OK = 200 + SYFT_400_BAD_REQUEST = 400 + SYFT_403_FORBIDDEN = 403 + SYFT_404_NOT_FOUND = 404 + SYFT_419_EXPIRED = 419 + SYFT_500_SERVER_ERROR = 500 + + @property + def is_success(self) -> bool: + """Check if the status code indicates success.""" + return 200 <= self.value < 300 + + @property + def is_error(self) -> bool: + """Check if the status code indicates an error.""" + return self.value >= 400 + + +class Base(BaseModel): + """Base model with enhanced serialization capabilities.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + json_encoders={ + datetime: lambda dt: dt.isoformat(), + }, + ser_json_bytes="base64", + val_json_bytes="base64", + ) + + def dumps(self) -> str: + """Serialize the model instance to JSON formatted str. + + Returns: + JSON string representation of the model instance. + + Raises: + pydantic.ValidationError: If the model contains invalid data. + TypeError: If the model contains types that cannot be JSON serialized. + """ + return self.model_dump_json() + + def dump(self, path: PathLike) -> None: + """Serialize the model instance as JSON to a file. + + Args: + path: The file path where the JSON data will be written. + + Raises: + pydantic.ValidationError: If the model contains invalid data. + TypeError: If the model contains types that cannot be JSON serialized. + PermissionError: If lacking permission to write to the path. + OSError: If there are I/O related errors. + FileNotFoundError: If the parent directory doesn't exist. + """ + to_path(path).write_text(self.dumps()) + + @classmethod + def loads(cls, data: JSON) -> Self: + """Load a model instance from a JSON string or bytes. + + Args: + data: JSON data to parse. Can be string or binary data. + + Returns: + A new instance of the model class. + + Raises: + pydantic.ValidationError: If JSON doesn't match the model's schema. + ValueError: If the input is not valid JSON. + TypeError: If input type is not str, bytes, or bytearray. + UnicodeDecodeError: If binary input cannot be decoded as UTF-8. + """ + return cls.model_validate_json(data) + + @classmethod + def load(cls, path: PathLike) -> Self: + """Load a model instance from a JSON file. + + Args: + path: Path to the JSON file to read. + + Returns: + A new instance of the model class. + + Raises: + pydantic.ValidationError: If JSON doesn't match the model's schema. + ValueError: If file content is not valid JSON. + FileNotFoundError: If the file doesn't exist. + PermissionError: If lacking permission to read the file. + OSError: If there are I/O related errors. + UnicodeDecodeError: If content cannot be decoded as UTF-8. + """ + return cls.loads(to_path(path).read_text()) + + +class SyftMessage(Base): + """Base message class for Syft protocol communication.""" + + VERSION: ClassVar[int] = 1 + + id: UUID = Field(default_factory=uuid4) + """Unique identifier of the message.""" + + sender: str + """The sender of the message.""" + + url: SyftBoxURL + """The URL of the message.""" + + body: Optional[bytes] = None + """The body of the message in bytes.""" + + headers: Headers = Field(default_factory=dict) + """Additional headers for the message.""" + + created: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + """Timestamp when the message was created.""" + + expires: datetime = Field( + default_factory=lambda: datetime.now(timezone.utc) + + timedelta(seconds=DEFAULT_MESSAGE_EXPIRY) + ) + """Timestamp when the message expires.""" + + @property + def age(self) -> float: + """Return the age of the message in seconds.""" + return (datetime.now(timezone.utc) - self.created).total_seconds() + + @property + def is_expired(self) -> bool: + """Check if the message has expired.""" + return datetime.now(timezone.utc) > self.expires + + @field_validator("url", mode="before") + @classmethod + def validate_url(cls, value) -> SyftBoxURL: + return validate_syftbox_url(value) + + def get_message_id(self) -> UUID: + """Generate a deterministic UUID from the message contents.""" + return UUID(bytes=self.__msg_hash().digest()[:16], version=4) + + def get_message_hash(self) -> str: + """Generate a hash of the message contents.""" + return self.__msg_hash().hexdigest() + + def __msg_hash(self): + """Generate a hash of the message contents.""" + m = self.model_dump_json(include={"url", "method", "sender", "headers", "body"}) + return hashlib.sha256(m.encode()) + + def text(self) -> str: + """Decode the body as a string. + + Args: + encoding: Character encoding to use for decoding bytes. Defaults to "utf-8". + + Returns: + Decoded string representation of the body. + + Raises: + UnicodeDecodeError: If bytes cannot be decoded with specified encoding + """ + if not self.body: + return "" + return self.body.decode() + + def json(self, **kwargs) -> JSONValue: + """Parse bytes body into JSON data. + + Args: + encoding: Character encoding to use for decoding bytes. Defaults to "utf-8". + + Returns: + Parsed JSON data as dict, list, or primitive value. + + Raises: + json.JSONDecodeError: If body contains invalid JSON + UnicodeDecodeError: If bytes cannot be decoded with specified encoding + """ + return json.loads(self.text()) + + def model(self, model_cls: Type[PYDANTIC]) -> PYDANTIC: + """Parse JSON body into a Pydantic model instance. + + Args: + model_cls: A Pydantic model class to parse the JSON into + + Returns: + An instance of the provided model class + + Raises: + ValidationError: If JSON data doesn't match model schema + """ + + return model_cls.model_validate_json(self.body) + + +class SyftError(Exception): + """Base exception for Syft-related errors.""" + + pass + + +class SyftTimeoutError(SyftError): + """Raised when a request times out.""" + + pass + + +class SyftRequest(SyftMessage): + """Request message in the Syft protocol.""" + + method: SyftMethod = SyftMethod.GET + + +class SyftResponse(SyftMessage): + """Response message in the Syft protocol.""" + + status_code: SyftStatus = SyftStatus.SYFT_200_OK + + @property + def is_success(self) -> bool: + """Check if the response indicates success.""" + return self.status_code.is_success + + def raise_for_status(self): + if self.status_code.is_error: + raise SyftError( + f"Request failed with status code {self.status_code}. Reason: {self.body}" + ) + + @classmethod + def system_response(cls, status_code: SyftStatus, message: str) -> Self: + return cls( + status_code=status_code, + body=message.encode(), + url=SyftBoxURL("syft://system@syftbox.localhost"), + sender="system@syftbox.localhost", + ) + + +class SyftFuture(Base): + """Represents an asynchronous Syft RPC operation on a file system transport. + + Attributes: + id: Identifier of the corresponding request and response. + path: Path where request and response files are stored. + expires: Timestamp when the request expires. + """ + + id: UUID + """Identifier of the corresponding request and response.""" + + path: Path + """Path where request and response files are stored""" + + expires: datetime + """Timestamp when the request expires""" + + _request: Optional[SyftRequest] = PrivateAttr() + + def __init__(self, **data): + super().__init__(**data) + self._request = data.get("request") + if not self._request: + self._request = SyftRequest.load(self.request_path) + + @property + def request_path(self) -> Path: + """Path to the request file.""" + return to_path(self.path) / f"{self.id}.request" + + @property + def response_path(self) -> Path: + """Path to the response file.""" + return to_path(self.path) / f"{self.id}.response" + + @property + def rejected_path(self) -> Path: + """Path to the rejected request marker file.""" + return self.request_path.with_suffix(f".syftrejected{self.request_path.suffix}") + + @property + def is_rejected(self) -> bool: + """Check if the request has been rejected.""" + return self.rejected_path.exists() + + @property + def is_expired(self) -> bool: + """Check if the future has expired.""" + return datetime.now(timezone.utc) > self.expires + + @property + def request(self) -> SyftRequest: + """Get the underlying request object.""" + + if not self._request: + self._request = SyftRequest.load(self.request_path) + return self._request + + def wait( + self, + timeout: float = DEFAULT_TIMEOUT, + poll_interval: float = DEFAULT_POLL_INTERVAL, + ) -> SyftResponse: + """Wait for the future to complete and return the Response. + + Args: + timeout: Maximum time to wait in seconds. None means wait until the request expires. + poll_interval: Time in seconds between polling attempts. + + Returns: + The response object. + + Raises: + SyftTimeoutError: If timeout is reached before receiving a response. + ValueError: If timeout or poll_interval is negative. + """ + if timeout is not None and timeout <= 0: + raise ValueError("Timeout must be greater than 0") + if poll_interval <= 0: + raise ValueError("Poll interval must be greater than 0") + + deadline = time.monotonic() + (timeout or float("inf")) + + while time.monotonic() < deadline: + try: + response = self.resolve() + if response is not None: + return response + time.sleep(poll_interval) + except Exception as e: + logger.error(f"Error while resolving future: {str(e)}") + raise + + raise SyftTimeoutError( + f"Timeout reached after waiting {timeout} seconds for response" + ) + + def resolve(self) -> Optional[SyftResponse]: + """Attempt to resolve the future to a response. + + Returns: + The response if available, None if still pending. + """ + + # Check for rejection first + if self.is_rejected: + self.request_path.unlink(missing_ok=True) + self.rejected_path.unlink(missing_ok=True) + return SyftResponse.system_response( + status_code=SyftStatus.SYFT_403_FORBIDDEN, + message="Request was rejected by the SyftBox cache server due to permissions issue", + ) + + # Check for existing response + if self.response_path.exists(): + return self._handle_existing_response() + + # If both request and response are missing, the request has expired + # and they got cleaned up by the server. + if not self.request_path.exists(): + return SyftResponse.system_response( + status_code=SyftStatus.SYFT_404_NOT_FOUND, + message=f"Request with {self.id} not found", + ) + + # Check for expired request + request = SyftRequest.load(self.request_path) + if request.is_expired: + self.request_path.unlink(missing_ok=True) + self.response_path.unlink(missing_ok=True) + return SyftResponse.system_response( + status_code=SyftStatus.SYFT_419_EXPIRED, + message=f"Request with {self.id} expired on {request.expires}", + ) + + # No response yet + return None + + def _handle_existing_response(self) -> SyftResponse: + """Process an existing response file. + + Returns: + The loaded response object. + + Note: + If the response file exists but is invalid or expired, + returns an appropriate error response instead of raising an exception. + """ + try: + response = SyftResponse.load(self.response_path) + # preserve results, but change status code to 419 + if response.is_expired: + response.status_code = SyftStatus.SYFT_419_EXPIRED + return response + except (PydanticValidationError, ValueError, UnicodeDecodeError) as e: + logger.error(f"Error loading response: {str(e)}") + return SyftResponse.system_response( + status_code=SyftStatus.SYFT_500_SERVER_ERROR, + message=f"Error loading response: {str(e)}", + ) + finally: + self.request_path.unlink(missing_ok=True) + self.response_path.unlink(missing_ok=True) + + def __hash__(self): + return hash(self.id) + + def __eq__(self, other): + if not isinstance(other, SyftFuture): + return False + return self.id == other.id + + +class SyftBulkFuture(Base): + futures: List[SyftFuture] + responses: List[SyftResponse] = [] + + def resolve(self) -> None: + """Resolve all futures and store the responses.""" + for future in self.pending: + if response := future.resolve(): + self.responses.append(response) + + def gather_completed( + self, + timeout: float = DEFAULT_TIMEOUT, + poll_interval: float = DEFAULT_POLL_INTERVAL, + ) -> List[SyftResponse]: + """Wait for all futures to complete and return a list of responses. + + Returns a list of responses in the order of the futures list. If a future + times out, it will be omitted from the list. If the timeout is reached before + all futures complete, the function will return the responses received so far. + + Args: + timeout: Maximum time to wait in seconds. + poll_interval: Time in seconds between polling attempts. + Returns: + A list of response objects. + Raises: + ValueError: If timeout or poll_interval is negative. + """ + if timeout is not None and timeout <= 0: + raise ValueError("Timeout must be greater than 0") + if poll_interval <= 0: + raise ValueError("Poll interval must be greater than 0") + + deadline = time.monotonic() + (timeout or float("inf")) + + while time.monotonic() < deadline: + self.resolve() + if not self.pending: + logger.debug("All futures have resolved") + break + time.sleep(poll_interval) + + return self.responses + + @property + def id(self) -> UUID: + """Generate a deterministic UUID from all future IDs. + + Returns: + A single UUID derived from hashing all future IDs. + """ + # Combine all UUIDs and hash them + combined = ",".join(str(f.id) for f in self.futures) + hash_bytes = hashlib.sha256(combined.encode()).digest()[:16] + # Use first 16 bytes of hash to create a new UUID + return UUID(bytes=hash_bytes, version=4) + + @property + def pending(self) -> List[SyftFuture]: + """Return a list of futures that have not yet resolved.""" + completed = {r.id for r in self.responses} + return [f for f in self.futures if f.id not in completed] + + @property + def failures(self) -> List[SyftResponse]: + """Return a list of failed responses.""" + return [r for r in self.responses if not r.is_success] + + @property + def successes(self) -> List[SyftResponse]: + """Return a list of successful responses.""" + return [r for r in self.responses if r.is_success] + + @property + def all_failed(self) -> bool: + """Check if all futures have failed.""" + return len(self.failures) == len(self.futures) diff --git a/packages/syft-extras/packages/syft-rpc/syft_rpc/py.typed b/packages/syft-extras/packages/syft-rpc/syft_rpc/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/packages/syft-rpc/syft_rpc/rpc.py b/packages/syft-extras/packages/syft-rpc/syft_rpc/rpc.py new file mode 100644 index 00000000000..3c3c67b7047 --- /dev/null +++ b/packages/syft-extras/packages/syft-rpc/syft_rpc/rpc.py @@ -0,0 +1,283 @@ +from __future__ import annotations + +import json +from dataclasses import asdict, is_dataclass +from datetime import datetime, timezone +from pathlib import Path +from uuid import UUID + +from pydantic import BaseModel +from syft_core.client_shim import Client +from syft_core.url import SyftBoxURL +from typing_extensions import Any, Dict, List, Optional, Union + +from syft_rpc.protocol import ( + SyftBulkFuture, + SyftError, + SyftFuture, + SyftMethod, + SyftRequest, + SyftResponse, + SyftStatus, +) +from syft_rpc.util import parse_duration + +DEFAULT_EXPIRY = "15m" + +BodyType = Union[str, bytes, dict, list, tuple, float, int, BaseModel, None] +HeaderType = Optional[Dict[str, str]] + + +def make_url(datasite: str, app_name: str, endpoint: str) -> SyftBoxURL: + """Create a Syft Box URL from a datasite, app name, and RPC endpoint.""" + + return SyftBoxURL( + f"syft://{datasite}/api_data/{app_name}/rpc/" + endpoint.lstrip("/") + ) + + +def serialize(obj: Any) -> Optional[bytes]: + if obj is None: + return None + elif isinstance(obj, BaseModel): + return obj.model_dump_json().encode() + elif is_dataclass(obj) and not isinstance(obj, type): + return json.dumps(asdict(obj), default=str, ensure_ascii=False).encode() + elif isinstance(obj, str): + return obj.encode() + else: + # dict, list, tuple, float, int, str + return json.dumps(obj, ensure_ascii=False).encode() + + +def send( + url: Union[SyftBoxURL, str], + body: Optional[BodyType] = None, + headers: Optional[HeaderType] = None, + expiry: str = DEFAULT_EXPIRY, + cache: bool = False, + client: Optional[Client] = None, +) -> SyftFuture: + """Send an asynchronous request to a Syft Box endpoint and return a future for tracking the response. + + This function creates a SyftRequest, writes it to the local filesystem under the client's workspace, + and returns a SyftFuture object that can be used to track and retrieve the response. + + Args: + method: The HTTP method to use. Can be a SyftMethod enum or a string + (e.g., 'GET', 'POST'). + url: The destination URL. Can be a SyftBoxURL instance or a string in the + format 'syft://user@domain.com/path'. + headers: Optional dictionary of HTTP headers to include with the request. + Defaults to None. + body: Optional request body. Can be either a string (will be encoded to bytes) + or raw bytes. Defaults to None. + client: A Syft Client instance used to send the request. If not provided, + the default client will be loaded. + expiry: Duration string specifying how long the request is valid for. + Defaults to '24h' (24 hours). + cache: If True, cache the request on the local filesystem for future use. + + Returns: + SyftFuture: A future object that can be used to track and retrieve the response. + + Example: + >>> future = send( + ... url="syft://data@domain.com/dataset1", + ... expiry_secs="30s" + ... ) + >>> response = future.result() # Wait for response + """ + + # If client is not provided, load the default client + client = Client.load() if client is None else client + + syft_request = SyftRequest( + sender=client.email, + method=SyftMethod.GET, + url=url if isinstance(url, SyftBoxURL) else SyftBoxURL(url), + headers=headers or {}, + body=serialize(body), + expires=datetime.now(timezone.utc) + parse_duration(expiry), + ) + local_path = syft_request.url.to_local_path(client.workspace.datasites) + local_path.mkdir(parents=True, exist_ok=True) + + # caching is enabled, generate a new request + if cache: + # generate a predictable id from message components + id = syft_request.get_message_id() + syft_request.id = id + + req_path = local_path / f"{syft_request.id}.request" + + # Handle cached request scenario + if cache and req_path.exists(): + cached_request = SyftRequest.load(req_path) + if cached_request.is_expired: + print(f"Cached request expired, removing: {req_path}") + req_path.unlink() + else: + return SyftFuture( + id=cached_request.id, + path=local_path, + expires=cached_request.expires, + request=cached_request, + ) + + # Create new request file if needed + if not req_path.exists(): + try: + syft_request.dump(req_path) + except OSError as e: + raise SyftError(f"Request persistence failed: {req_path} - {e}") + + return SyftFuture( + id=syft_request.id, + path=local_path, + expires=syft_request.expires, + request=syft_request, + ) + + +def broadcast( + urls: Union[List[SyftBoxURL], List[str]], + body: Optional[BodyType] = None, + headers: Optional[HeaderType] = None, + expiry: str = DEFAULT_EXPIRY, + cache: bool = False, + client: Optional[Client] = None, +) -> SyftBulkFuture: + """Broadcast an asynchronous request to multiple Syft Box endpoints and return a bulk future. + + This function creates a SyftRequest for each URL in the list, + writes them to the local filesystem under the client's workspace, and + returns a SyftBulkFuture object that can be used to track and retrieve multiple responses. + + Args: + method: The HTTP method to use. Can be a SyftMethod enum or a string + (e.g., 'GET', 'POST'). + urls: List of destination URLs. Each can be a SyftBoxURL instance or a string in + the format 'syft://user@domain.com/path'. + headers: Optional dictionary of HTTP headers to include with the requests. + Defaults to None. + body: Optional request body. Can be either a string (will be encoded to bytes) + or raw bytes. Defaults to None. + client: A Syft Client instance used to send the requests. If not provided, + the default client will be loaded. + expiry: Duration string specifying how long the request is valid for. + Defaults to '24h' (24 hours). + cache: If True, cache the request on the local filesystem for future use. + + Returns: + SyftBulkFuture: A bulk future object that can be used to track and retrieve multiple responses. + + Example: + >>> future = broadcast( + ... urls=["syft://user1@domain.com/api_data/app_name/rpc/endpoint", + ... "syft://user2@domain.com/api_data/app_name/rpc/endpoint"], + ... expiry="1d", + ... ) + >>> responses = future.gather_completed() # Wait for all responses + """ + + # If client is not provided, load the default client + client = Client.load() if client is None else client + + bulk_future = SyftBulkFuture( + futures=[ + send( + url=url, + headers=headers, + body=body, + client=client, + expiry=expiry, + cache=cache, + ) + for url in urls + ] + ) + return bulk_future + + +def reply_to( + request: SyftRequest, + body: Optional[BodyType] = None, + headers: Optional[HeaderType] = None, + status_code: SyftStatus = SyftStatus.SYFT_200_OK, + client: Optional[Client] = None, +) -> SyftResponse: + """Create and store a response to a Syft request. + + This function creates a SyftResponse object corresponding to a given SyftRequest, + writes it to the local filesystem in the client's workspace, and returns the response object. + + Args: + request: The original SyftRequest to respond to. + client: A Syft Client instance used to send the response. + body: Optional response body. Can be either a string (will be encoded to bytes) + or raw bytes. Defaults to None. + headers: Optional dictionary of HTTP headers to include with the response. + Defaults to None. + client: A Syft Client instance used to send the response. If not provided, + the default client will be loaded. + status_code: HTTP status code for the response. Should be a SyftStatus enum value. + Defaults to SyftStatus.SYFT_200_OK. + + Returns: + SyftResponse: The created response object containing all response details. + + Example: + >>> # Assuming we have a request + >>> response = reply_to( + ... request=incoming_request, + ... body="Request processed successfully", + ... status_code=SyftStatus.SYFT_200_OK + ... ) + """ + + # If client is not provided, load the default client + client = Client.load() if client is None else client + + response = SyftResponse( + id=request.id, + sender=client.email, + url=request.url, + headers=headers or {}, + body=serialize(body), + expires=request.expires, + status_code=status_code, + ) + + local_path = response.url.to_local_path(client.workspace.datasites) + file_path = local_path / f"{response.id}.response" + local_path.mkdir(parents=True, exist_ok=True) + response.dump(file_path) + + return response + + +def write_response( + request_path: Union[Path, str], + body: Optional[BodyType] = None, + headers: Optional[HeaderType] = None, + status_code: SyftStatus = SyftStatus.SYFT_200_OK, + client: Optional[Client] = None, +): + """Write a response to a request file on the local filesystem. + Useful when request could not be parsed.""" + + request_path = Path(request_path) + + client = client or Client.load() + + _id = request_path.stem + response = SyftResponse( + id=UUID(_id), + sender=client.email, + url=client.to_syft_url(request_path.parent), + headers=headers or {}, + body=serialize(body), + status_code=status_code, + ) + response.dump(request_path.with_suffix(".response")) diff --git a/packages/syft-extras/packages/syft-rpc/syft_rpc/rpc_db.py b/packages/syft-extras/packages/syft-rpc/syft_rpc/rpc_db.py new file mode 100644 index 00000000000..7252cd56415 --- /dev/null +++ b/packages/syft-extras/packages/syft-rpc/syft_rpc/rpc_db.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +import sqlite3 +import threading +from functools import cache +from uuid import UUID + +from syft_core.client_shim import Client +from typing_extensions import Optional, Union + +from syft_rpc.protocol import SyftBulkFuture, SyftFuture + +__Q_CREATE_TABLE = """ +CREATE TABLE IF NOT EXISTS futures ( + id TEXT PRIMARY KEY, + bid TEXT DEFAULT NULL, + path TEXT NOT NULL, + expires TIMESTAMP NOT NULL, + namespace TEXT NOT NULL +) WITHOUT ROWID +""" + +__Q_INSERT_FUTURE = """ +INSERT OR REPLACE INTO futures (id, path, expires, namespace, bid) +VALUES (:id, :path, :expires, :namespace, :bid) +""" + + +thread_local = threading.local() + + +@cache +def get_default_client(): + return Client.load() + + +def __get_connection(client: Client) -> sqlite3.Connection: + if not hasattr(thread_local, "conn"): + db_dir = client.workspace.plugins + db_dir.mkdir(exist_ok=True, parents=True) + db_path = db_dir / "rpc.futures.db" + conn = sqlite3.connect(str(db_path)) + + # Multi-process optimizations for small writes + conn.execute("PRAGMA journal_mode=WAL") # Better concurrency + conn.execute("PRAGMA synchronous=NORMAL") # Balance between safety and speed + conn.execute("PRAGMA cache_size=-2000") # 2MB cache + conn.execute("PRAGMA busy_timeout=5000") # Wait up to 5s on locks + conn.execute("PRAGMA temp_store=MEMORY") + conn.execute("PRAGMA foreign_keys=OFF") + + conn.row_factory = sqlite3.Row + + conn.execute(__Q_CREATE_TABLE) + conn.commit() + thread_local.conn = conn + + return thread_local.conn + + +def save_future( + future: SyftFuture, + namespace: str, + client: Optional[Client] = None, + bulk_id: Optional[str] = None, +) -> str: + client = client or get_default_client() + conn = __get_connection(client) + data = future.model_dump(mode="json") + + conn.execute(__Q_INSERT_FUTURE, {**data, "namespace": namespace, "bid": bulk_id}) + conn.commit() + + return data["id"] + + +def get_future( + future_id: Union[UUID, str], client: Optional[Client] = None +) -> Optional[SyftFuture]: + client = client or get_default_client() + conn = __get_connection(client) + row = conn.execute( + "SELECT id, path, expires FROM futures WHERE id = ?", (str(future_id),) + ).fetchone() + + if not row: + return None + + return SyftFuture(**dict(row)) + + +def delete_future(future_id: Union[UUID, str], client: Optional[Client] = None) -> None: + client = client or get_default_client() + conn = __get_connection(client) + conn.execute("DELETE FROM futures WHERE id = ?", (str(future_id),)) + conn.commit() + + +def cleanup_expired_futures(client: Optional[Client] = None) -> None: + client = client or Client.load() + conn = __get_connection(client) + conn.execute("DELETE FROM futures WHERE expires < datetime('now')") + conn.commit() + + +def list_futures(namespace: Optional[str] = None, client: Optional[Client] = None): + client = client or Client.load() + conn = __get_connection(client) + query_all = "SELECT id, path, expires FROM futures" + query_app = "SELECT id, path, expires FROM futures WHERE namespace = ?" + + if namespace: + rows = conn.execute(query_app, (namespace,)).fetchall() + else: + rows = conn.execute(query_all).fetchall() + return [SyftFuture(**dict(row)) for row in rows] + + +def save_bulk_future( + bulk_future: SyftBulkFuture, + namespace: str, + client: Optional[Client] = None, +) -> str: + bid = str(bulk_future.id) + for future in bulk_future.futures: + save_future(future, namespace, client, bid) + return bid + + +def get_bulk_future( + bulk_id: Union[str, UUID], client: Optional[Client] = None +) -> Optional[SyftBulkFuture]: + client = client or get_default_client() + conn = __get_connection(client) + rows = conn.execute( + "SELECT id, path, expires FROM futures WHERE bid = ? ORDER BY expires", + (str(bulk_id),), + ).fetchall() + + if not rows: + return None + + futures = [SyftFuture(**dict(row)) for row in rows] + return SyftBulkFuture(futures=futures) + + +def delete_bulk_future( + bulk_id: Union[str, UUID], client: Optional[Client] = None +) -> None: + client = client or get_default_client() + conn = __get_connection(client) + conn.execute("DELETE FROM futures WHERE bid = ?", (str(bulk_id),)) + conn.commit() diff --git a/packages/syft-extras/packages/syft-rpc/syft_rpc/util.py b/packages/syft-extras/packages/syft-rpc/syft_rpc/util.py new file mode 100644 index 00000000000..562701532a0 --- /dev/null +++ b/packages/syft-extras/packages/syft-rpc/syft_rpc/util.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import re +from datetime import timedelta + + +def parse_duration(duration: str) -> timedelta: + """Convert duration strings like '1h', '3d', '24h', '30s' into timedelta.""" + pattern = r"(\d+)([dhms])" # Matches number + unit (d, h, m, s) + match = re.fullmatch(pattern, duration.strip().lower()) + + if not match: + raise ValueError("Invalid duration format. Use 'Nd', 'Nh', 'Nm', or 'Ns'.") + + value, unit = int(match.group(1)), match.group(2) + + if unit == "d": + return timedelta(days=value) + elif unit == "h": + return timedelta(hours=value) + elif unit == "m": + return timedelta(minutes=value) + elif unit == "s": + return timedelta(seconds=value) + + return timedelta() # Default case (should never reach) diff --git a/packages/syft-extras/pyproject.toml b/packages/syft-extras/pyproject.toml new file mode 100644 index 00000000000..319d2c8dc10 --- /dev/null +++ b/packages/syft-extras/pyproject.toml @@ -0,0 +1,32 @@ +[project] +name = "syft-extras" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = ["syft-core", "syft-event", "syft-rpc", "syft-proxy"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv.sources] +syft-core = { workspace = true } +syft-event = { workspace = true } +syft-rpc = { workspace = true } +syft-proxy = { workspace = true } + +[tool.uv.workspace] +members = ["packages/*"] + +[tool.ruff] +exclude = [".archive"] + +[tool.ruff.lint] +extend-select = ["I"] + +[tool.ruff.lint.per-file-ignores] +"**/__init__.py" = ["F401"] + +[dependency-groups] +dev = ["ruff>=0.9.3"] diff --git a/packages/syft-extras/syft_extras/__init__.py b/packages/syft-extras/syft_extras/__init__.py new file mode 100644 index 00000000000..794bda9b2f1 --- /dev/null +++ b/packages/syft-extras/syft_extras/__init__.py @@ -0,0 +1,5 @@ +import syft_core +import syft_rpc +import syft_event + +__all__ = ["syft_core", "syft_rpc", "syft_event"] \ No newline at end of file diff --git a/packages/syft-extras/syft_extras/py.typed b/packages/syft-extras/syft_extras/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft-extras/uv.lock b/packages/syft-extras/uv.lock new file mode 100644 index 00000000000..b8ea2354bad --- /dev/null +++ b/packages/syft-extras/uv.lock @@ -0,0 +1,674 @@ +version = 1 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] + +[manifest] +members = [ + "syft-core", + "syft-event", + "syft-extras", + "syft-proxy", + "syft-rpc", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "anyio" +version = "4.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 }, +] + +[[package]] +name = "certifi" +version = "2025.1.31" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 }, +] + +[[package]] +name = "email-validator" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/ce/13508a1ec3f8bb981ae4ca79ea40384becc868bfae97fd1c942bb3a001b1/email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7", size = 48967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ee/bf0adb559ad3c786f12bcbc9296b3f5675f529199bef03e2df281fa1fadb/email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631", size = 33521 }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, +] + +[[package]] +name = "fastapi" +version = "0.115.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/b2/5a5dc4affdb6661dea100324e19a7721d5dc524b464fe8e366c093fd7d87/fastapi-0.115.8.tar.gz", hash = "sha256:0ce9111231720190473e222cdf0f07f7206ad7e53ea02beb1d2dc36e2f0741e9", size = 295403 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/7d/2d6ce181d7a5f51dedb8c06206cbf0ec026a99bf145edd309f9e17c3282f/fastapi-0.115.8-py3-none-any.whl", hash = "sha256:753a96dd7e036b34eeef8babdfcfe3f28ff79648f86551eb36bfc1b0bf4a8cbf", size = 94814 }, +] + +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "pydantic" +version = "2.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/7e/fb60e6fee04d0ef8f15e4e01ff187a196fa976eb0f0ab524af4599e5754c/pydantic-2.10.4.tar.gz", hash = "sha256:82f12e9723da6de4fe2ba888b5971157b3be7ad914267dea8f05f82b28254f06", size = 762094 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/26/3e1bbe954fde7ee22a6e7d31582c642aad9e84ffe4b5fb61e63b87cd326f/pydantic-2.10.4-py3-none-any.whl", hash = "sha256:597e135ea68be3a37552fb524bc7d0d66dcf93d395acd93a00682f1efcb8ee3d", size = 431765 }, +] + +[package.optional-dependencies] +email = [ + { name = "email-validator" }, +] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/bc/fed5f74b5d802cf9a03e83f60f18864e90e3aed7223adaca5ffb7a8d8d64/pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa", size = 1895938 }, + { url = "https://files.pythonhosted.org/packages/71/2a/185aff24ce844e39abb8dd680f4e959f0006944f4a8a0ea372d9f9ae2e53/pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c", size = 1815684 }, + { url = "https://files.pythonhosted.org/packages/c3/43/fafabd3d94d159d4f1ed62e383e264f146a17dd4d48453319fd782e7979e/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a", size = 1829169 }, + { url = "https://files.pythonhosted.org/packages/a2/d1/f2dfe1a2a637ce6800b799aa086d079998959f6f1215eb4497966efd2274/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5", size = 1867227 }, + { url = "https://files.pythonhosted.org/packages/7d/39/e06fcbcc1c785daa3160ccf6c1c38fea31f5754b756e34b65f74e99780b5/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c", size = 2037695 }, + { url = "https://files.pythonhosted.org/packages/7a/67/61291ee98e07f0650eb756d44998214231f50751ba7e13f4f325d95249ab/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7", size = 2741662 }, + { url = "https://files.pythonhosted.org/packages/32/90/3b15e31b88ca39e9e626630b4c4a1f5a0dfd09076366f4219429e6786076/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a", size = 1993370 }, + { url = "https://files.pythonhosted.org/packages/ff/83/c06d333ee3a67e2e13e07794995c1535565132940715931c1c43bfc85b11/pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236", size = 1996813 }, + { url = "https://files.pythonhosted.org/packages/7c/f7/89be1c8deb6e22618a74f0ca0d933fdcb8baa254753b26b25ad3acff8f74/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962", size = 2005287 }, + { url = "https://files.pythonhosted.org/packages/b7/7d/8eb3e23206c00ef7feee17b83a4ffa0a623eb1a9d382e56e4aa46fd15ff2/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9", size = 2128414 }, + { url = "https://files.pythonhosted.org/packages/4e/99/fe80f3ff8dd71a3ea15763878d464476e6cb0a2db95ff1c5c554133b6b83/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af", size = 2155301 }, + { url = "https://files.pythonhosted.org/packages/2b/a3/e50460b9a5789ca1451b70d4f52546fa9e2b420ba3bfa6100105c0559238/pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4", size = 1816685 }, + { url = "https://files.pythonhosted.org/packages/57/4c/a8838731cb0f2c2a39d3535376466de6049034d7b239c0202a64aaa05533/pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31", size = 1982876 }, + { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421 }, + { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998 }, + { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167 }, + { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071 }, + { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244 }, + { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470 }, + { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291 }, + { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613 }, + { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355 }, + { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661 }, + { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261 }, + { url = "https://files.pythonhosted.org/packages/72/9d/a241db83f973049a1092a079272ffe2e3e82e98561ef6214ab53fe53b1c7/pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c", size = 1812361 }, + { url = "https://files.pythonhosted.org/packages/e8/ef/013f07248041b74abd48a385e2110aa3a9bbfef0fbd97d4e6d07d2f5b89a/pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc", size = 1982484 }, + { url = "https://files.pythonhosted.org/packages/10/1c/16b3a3e3398fd29dca77cea0a1d998d6bde3902fa2706985191e2313cc76/pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4", size = 1867102 }, + { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 }, + { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 }, + { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 }, + { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 }, + { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 }, + { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 }, + { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 }, + { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 }, + { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 }, + { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 }, + { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 }, + { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 }, + { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 }, + { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 }, + { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 }, + { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 }, + { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 }, + { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 }, + { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 }, + { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 }, + { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 }, + { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 }, + { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 }, + { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 }, + { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 }, + { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 }, + { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 }, + { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 }, + { url = "https://files.pythonhosted.org/packages/27/97/3aef1ddb65c5ccd6eda9050036c956ff6ecbfe66cb7eb40f280f121a5bb0/pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993", size = 1896475 }, + { url = "https://files.pythonhosted.org/packages/ad/d3/5668da70e373c9904ed2f372cb52c0b996426f302e0dee2e65634c92007d/pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308", size = 1772279 }, + { url = "https://files.pythonhosted.org/packages/8a/9e/e44b8cb0edf04a2f0a1f6425a65ee089c1d6f9c4c2dcab0209127b6fdfc2/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4", size = 1829112 }, + { url = "https://files.pythonhosted.org/packages/1c/90/1160d7ac700102effe11616e8119e268770f2a2aa5afb935f3ee6832987d/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf", size = 1866780 }, + { url = "https://files.pythonhosted.org/packages/ee/33/13983426df09a36d22c15980008f8d9c77674fc319351813b5a2739b70f3/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76", size = 2037943 }, + { url = "https://files.pythonhosted.org/packages/01/d7/ced164e376f6747e9158c89988c293cd524ab8d215ae4e185e9929655d5c/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118", size = 2740492 }, + { url = "https://files.pythonhosted.org/packages/8b/1f/3dc6e769d5b7461040778816aab2b00422427bcaa4b56cc89e9c653b2605/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630", size = 1995714 }, + { url = "https://files.pythonhosted.org/packages/07/d7/a0bd09bc39283530b3f7c27033a814ef254ba3bd0b5cfd040b7abf1fe5da/pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54", size = 1997163 }, + { url = "https://files.pythonhosted.org/packages/2d/bb/2db4ad1762e1c5699d9b857eeb41959191980de6feb054e70f93085e1bcd/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f", size = 2005217 }, + { url = "https://files.pythonhosted.org/packages/53/5f/23a5a3e7b8403f8dd8fc8a6f8b49f6b55c7d715b77dcf1f8ae919eeb5628/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362", size = 2127899 }, + { url = "https://files.pythonhosted.org/packages/c2/ae/aa38bb8dd3d89c2f1d8362dd890ee8f3b967330821d03bbe08fa01ce3766/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96", size = 2155726 }, + { url = "https://files.pythonhosted.org/packages/98/61/4f784608cc9e98f70839187117ce840480f768fed5d386f924074bf6213c/pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e", size = 1817219 }, + { url = "https://files.pythonhosted.org/packages/57/82/bb16a68e4a1a858bb3768c2c8f1ff8d8978014e16598f001ea29a25bf1d1/pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67", size = 1985382 }, + { url = "https://files.pythonhosted.org/packages/46/72/af70981a341500419e67d5cb45abe552a7c74b66326ac8877588488da1ac/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e", size = 1891159 }, + { url = "https://files.pythonhosted.org/packages/ad/3d/c5913cccdef93e0a6a95c2d057d2c2cba347815c845cda79ddd3c0f5e17d/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8", size = 1768331 }, + { url = "https://files.pythonhosted.org/packages/f6/f0/a3ae8fbee269e4934f14e2e0e00928f9346c5943174f2811193113e58252/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3", size = 1822467 }, + { url = "https://files.pythonhosted.org/packages/d7/7a/7bbf241a04e9f9ea24cd5874354a83526d639b02674648af3f350554276c/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f", size = 1979797 }, + { url = "https://files.pythonhosted.org/packages/4f/5f/4784c6107731f89e0005a92ecb8a2efeafdb55eb992b8e9d0a2be5199335/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133", size = 1987839 }, + { url = "https://files.pythonhosted.org/packages/6d/a7/61246562b651dff00de86a5f01b6e4befb518df314c54dec187a78d81c84/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc", size = 1998861 }, + { url = "https://files.pythonhosted.org/packages/86/aa/837821ecf0c022bbb74ca132e117c358321e72e7f9702d1b6a03758545e2/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50", size = 2116582 }, + { url = "https://files.pythonhosted.org/packages/81/b0/5e74656e95623cbaa0a6278d16cf15e10a51f6002e3ec126541e95c29ea3/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9", size = 2151985 }, + { url = "https://files.pythonhosted.org/packages/63/37/3e32eeb2a451fddaa3898e2163746b0cffbbdbb4740d38372db0490d67f3/pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151", size = 2004715 }, + { url = "https://files.pythonhosted.org/packages/29/0e/dcaea00c9dbd0348b723cae82b0e0c122e0fa2b43fa933e1622fd237a3ee/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656", size = 1891733 }, + { url = "https://files.pythonhosted.org/packages/86/d3/e797bba8860ce650272bda6383a9d8cad1d1c9a75a640c9d0e848076f85e/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278", size = 1768375 }, + { url = "https://files.pythonhosted.org/packages/41/f7/f847b15fb14978ca2b30262548f5fc4872b2724e90f116393eb69008299d/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb", size = 1822307 }, + { url = "https://files.pythonhosted.org/packages/9c/63/ed80ec8255b587b2f108e514dc03eed1546cd00f0af281e699797f373f38/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd", size = 1979971 }, + { url = "https://files.pythonhosted.org/packages/a9/6d/6d18308a45454a0de0e975d70171cadaf454bc7a0bf86b9c7688e313f0bb/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc", size = 1987616 }, + { url = "https://files.pythonhosted.org/packages/82/8a/05f8780f2c1081b800a7ca54c1971e291c2d07d1a50fb23c7e4aef4ed403/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b", size = 1998943 }, + { url = "https://files.pythonhosted.org/packages/5e/3e/fe5b6613d9e4c0038434396b46c5303f5ade871166900b357ada4766c5b7/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b", size = 2116654 }, + { url = "https://files.pythonhosted.org/packages/db/ad/28869f58938fad8cc84739c4e592989730bfb69b7c90a8fff138dff18e1e/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2", size = 2152292 }, + { url = "https://files.pythonhosted.org/packages/a1/0c/c5c5cd3689c32ed1fe8c5d234b079c12c281c051759770c05b8bed6412b5/pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35", size = 2004961 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "rich" +version = "13.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 }, +] + +[[package]] +name = "ruff" +version = "0.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/7f/60fda2eec81f23f8aa7cbbfdf6ec2ca11eb11c273827933fb2541c2ce9d8/ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a", size = 3586740 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/77/4fb790596d5d52c87fd55b7160c557c400e90f6116a56d82d76e95d9374a/ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624", size = 11656815 }, + { url = "https://files.pythonhosted.org/packages/a2/a8/3338ecb97573eafe74505f28431df3842c1933c5f8eae615427c1de32858/ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c", size = 11594821 }, + { url = "https://files.pythonhosted.org/packages/8e/89/320223c3421962762531a6b2dd58579b858ca9916fb2674874df5e97d628/ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4", size = 11040475 }, + { url = "https://files.pythonhosted.org/packages/b2/bd/1d775eac5e51409535804a3a888a9623e87a8f4b53e2491580858a083692/ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439", size = 11856207 }, + { url = "https://files.pythonhosted.org/packages/7f/c6/3e14e09be29587393d188454064a4aa85174910d16644051a80444e4fd88/ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5", size = 11420460 }, + { url = "https://files.pythonhosted.org/packages/ef/42/b7ca38ffd568ae9b128a2fa76353e9a9a3c80ef19746408d4ce99217ecc1/ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4", size = 12605472 }, + { url = "https://files.pythonhosted.org/packages/a6/a1/3167023f23e3530fde899497ccfe239e4523854cb874458ac082992d206c/ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1", size = 13243123 }, + { url = "https://files.pythonhosted.org/packages/d0/b4/3c600758e320f5bf7de16858502e849f4216cb0151f819fa0d1154874802/ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5", size = 12744650 }, + { url = "https://files.pythonhosted.org/packages/be/38/266fbcbb3d0088862c9bafa8b1b99486691d2945a90b9a7316336a0d9a1b/ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4", size = 14458585 }, + { url = "https://files.pythonhosted.org/packages/63/a6/47fd0e96990ee9b7a4abda62de26d291bd3f7647218d05b7d6d38af47c30/ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6", size = 12419624 }, + { url = "https://files.pythonhosted.org/packages/84/5d/de0b7652e09f7dda49e1a3825a164a65f4998175b6486603c7601279baad/ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730", size = 11843238 }, + { url = "https://files.pythonhosted.org/packages/9e/be/3f341ceb1c62b565ec1fb6fd2139cc40b60ae6eff4b6fb8f94b1bb37c7a9/ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2", size = 11484012 }, + { url = "https://files.pythonhosted.org/packages/a3/c8/ff8acbd33addc7e797e702cf00bfde352ab469723720c5607b964491d5cf/ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519", size = 12038494 }, + { url = "https://files.pythonhosted.org/packages/73/b1/8d9a2c0efbbabe848b55f877bc10c5001a37ab10aca13c711431673414e5/ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b", size = 12473639 }, + { url = "https://files.pythonhosted.org/packages/cb/44/a673647105b1ba6da9824a928634fe23186ab19f9d526d7bdf278cd27bc3/ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c", size = 9834353 }, + { url = "https://files.pythonhosted.org/packages/c3/01/65cadb59bf8d4fbe33d1a750103e6883d9ef302f60c28b73b773092fbde5/ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4", size = 10821444 }, + { url = "https://files.pythonhosted.org/packages/69/cb/b3fe58a136a27d981911cba2f18e4b29f15010623b79f0f2510fd0d31fd3/ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b", size = 10038168 }, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + +[[package]] +name = "starlette" +version = "0.45.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/fb/2984a686808b89a6781526129a4b51266f678b2d2b97ab2d325e56116df8/starlette-0.45.3.tar.gz", hash = "sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f", size = 2574076 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/61/f2b52e107b1fc8944b33ef56bf6ac4ebbe16d91b94d2b87ce013bf63fb84/starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d", size = 71507 }, +] + +[[package]] +name = "syft-core" +version = "0.1.0" +source = { editable = "packages/syft-core" } +dependencies = [ + { name = "pydantic", extra = ["email"] }, + { name = "typing-extensions" }, +] + +[package.metadata] +requires-dist = [ + { name = "pydantic", extras = ["email"], specifier = ">=2.10.4" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, +] + +[[package]] +name = "syft-event" +version = "0.1.0" +source = { editable = "packages/syft-event" } +dependencies = [ + { name = "loguru" }, + { name = "pathspec" }, + { name = "pydantic" }, + { name = "syft-rpc" }, + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "loguru", specifier = ">=0.7.3" }, + { name = "pathspec", specifier = ">=0.12.1" }, + { name = "pydantic", specifier = ">=2.10.4" }, + { name = "syft-rpc", editable = "packages/syft-rpc" }, + { name = "watchdog", specifier = ">=6.0.0" }, +] + +[[package]] +name = "syft-extras" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "syft-core" }, + { name = "syft-event" }, + { name = "syft-proxy" }, + { name = "syft-rpc" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "syft-core", editable = "packages/syft-core" }, + { name = "syft-event", editable = "packages/syft-event" }, + { name = "syft-proxy", editable = "packages/syft-proxy" }, + { name = "syft-rpc", editable = "packages/syft-rpc" }, +] + +[package.metadata.requires-dev] +dev = [{ name = "ruff", specifier = ">=0.9.3" }] + +[[package]] +name = "syft-proxy" +version = "0.1.0" +source = { editable = "packages/syft-proxy" } +dependencies = [ + { name = "fastapi" }, + { name = "httpx" }, + { name = "loguru" }, + { name = "pytest" }, + { name = "syft-rpc" }, + { name = "typer" }, + { name = "typing-extensions" }, + { name = "uvicorn" }, +] + +[package.metadata] +requires-dist = [ + { name = "fastapi", specifier = ">=0.115.8" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "loguru", specifier = ">=0.7.3" }, + { name = "pytest", specifier = ">=8.3.4" }, + { name = "syft-rpc", editable = "packages/syft-rpc" }, + { name = "typer", specifier = ">=0.9.0" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, + { name = "uvicorn", specifier = ">=0.34.0" }, +] + +[[package]] +name = "syft-rpc" +version = "0.1.0" +source = { editable = "packages/syft-rpc" } +dependencies = [ + { name = "pydantic" }, + { name = "syft-core" }, + { name = "typing-extensions" }, +] + +[package.metadata] +requires-dist = [ + { name = "pydantic", specifier = ">=2.9.2" }, + { name = "syft-core", editable = "packages/syft-core" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "typer" +version = "0.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/dca7b219718afd37a0068f4f2530a727c2b74a8b6e8e0c0080a4c0de4fcd/typer-0.15.1.tar.gz", hash = "sha256:a0588c0a7fa68a1978a069818657778f86abe6ff5ea6abf472f940a08bfe4f0a", size = 99789 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/cc/0a838ba5ca64dc832aa43f727bd586309846b0ffb2ce52422543e6075e8a/typer-0.15.1-py3-none-any.whl", hash = "sha256:7994fb7b8155b64d3402518560648446072864beefd44aa2dc36972a5972e847", size = 44908 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "uvicorn" +version = "0.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390 }, + { url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389 }, + { url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020 }, + { url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393 }, + { url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392 }, + { url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019 }, + { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471 }, + { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449 }, + { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054 }, + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480 }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451 }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057 }, + { url = "https://files.pythonhosted.org/packages/05/52/7223011bb760fce8ddc53416beb65b83a3ea6d7d13738dde75eeb2c89679/watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8", size = 96390 }, + { url = "https://files.pythonhosted.org/packages/9c/62/d2b21bc4e706d3a9d467561f487c2938cbd881c69f3808c43ac1ec242391/watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a", size = 88386 }, + { url = "https://files.pythonhosted.org/packages/ea/22/1c90b20eda9f4132e4603a26296108728a8bfe9584b006bd05dd94548853/watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c", size = 89017 }, + { url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902 }, + { url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380 }, + { url = "https://files.pythonhosted.org/packages/5b/79/69f2b0e8d3f2afd462029031baafb1b75d11bb62703f0e1022b2e54d49ee/watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa", size = 87903 }, + { url = "https://files.pythonhosted.org/packages/e2/2b/dc048dd71c2e5f0f7ebc04dd7912981ec45793a03c0dc462438e0591ba5d/watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e", size = 88381 }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079 }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078 }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076 }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077 }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078 }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077 }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078 }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065 }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070 }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067 }, +] + +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083 }, +] diff --git a/packages/syft/.dockerignore b/packages/syft/.dockerignore deleted file mode 100644 index fcac49cb125..00000000000 --- a/packages/syft/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -.mypy_cache -**/.mypy_cache diff --git a/packages/syft/MANIFEST.in b/packages/syft/MANIFEST.in index 0458665c8df..cd6afbbdbc4 100644 --- a/packages/syft/MANIFEST.in +++ b/packages/syft/MANIFEST.in @@ -1,7 +1,8 @@ include src/syft/VERSION include src/syft/capnp/* include src/syft/cache/* -include src/syft/img/* +graft src/syft/assets include src/syft/protocol/protocol_version.json include src/syft/protocol/releases/* - +include packages/syft/src/syft/util/api_snapshot/syft_api_spec_beta.json +include packages/syft/src/syft/util/api_snapshot/syft_api_spec_stable.json diff --git a/packages/syft/PYPI.md b/packages/syft/PYPI.md index dbecf765207..0220c9aa9b6 100644 --- a/packages/syft/PYPI.md +++ b/packages/syft/PYPI.md @@ -1,352 +1,170 @@ -
    +


    -Syft Logo +Syft Logo -Perform data science on `data` that remains in `someone else's` server +

    Data Science on data you are not allowed to see

    -# Quickstart +PySyft enables a new way to do data science, where you can use non-public information, without seeing nor obtaining a copy of the data itself. All you need is to connect to a Datasite! -✅ `Linux` ✅ `macOS` ✅ `Windows` ✅ `Docker` ✅ `Podman` ✅ `Kubernetes` +Datasites are like websites, but for data. Designed with the principles of structured transparency, they enable data owners to control how their data is protected and data scientists to use data without obtaining a copy. -## Install Client +PySyft supports any statistical analysis or machine learning, offering support for directly running Python code - even using third-party Python libraries. -```bash -$ pip install -U syft[data_science] -``` +

    Supported on:

    -## Launch Server +✅ Linux +✅ macOS +✅ Windows +✅ Docker +✅ Kubernetes -```python -# from Jupyter / Python -import syft as sy -sy.requires(">=0.8.5,<0.8.6") -node = sy.orchestra.launch(name="my-domain", port=8080, dev_mode=True, reset=True) -``` - -```bash -# or from the command line -$ syft launch --name=my-domain --port=8080 --reset=True +# Quickstart -Starting syft-node server on 0.0.0.0:8080 -``` +Try out your first query against a live demo Datasite! -## Launch Client +## Install Client -```python -import syft as sy -sy.requires(">=0.8.5,<0.8.6") -domain_client = sy.login(port=8080, email="info@openmined.org", password="changethis") +```bash +pip install -U "syft[data_science]" ``` -## PySyft in 10 minutes +More instructions are available here. -📝 API Example Notebooks - -- 00-load-data.ipynb -- 01-submit-code.ipynb -- 02-review-code-and-approve.ipynb -- 03-data-scientist-download-result.ipynb -- 04-jax-example.ipynb -- 05-custom-policy.ipynb -- 06-multiple-code-requests.ipynb -- 07-domain-register-control-flow.ipynb -- 08-code-version.ipynb -- 09-blob-storage.ipynb -- 10-container-images.ipynb -- 11-container-images-k8s.ipynb +## Launch Server -## Deploy Kubernetes Helm Chart +Launch a development server directly in your Jupyter Notebook: -**Note**: Assuming we have a Kubernetes cluster already setup. +```python +import syft as sy -#### 1. Add and update Helm repo for Syft +sy.requires(">=0.9.5,<0.9.6") -```sh -helm repo add openmined https://openmined.github.io/PySyft/helm -helm repo update openmined +server = sy.orchestra.launch( + name="my-datasite", + port=8080, + create_producer=True, + n_consumers=1, + dev_mode=False, + reset=True, # resets database +) ``` -#### 2. Search for available Syft versions - -```sh -helm search repo openmined/syft --versions --devel -``` +or from the command line: -#### 3. Set your preferred Syft Chart version +```bash +$ syft launch --name=my-datasite --port=8080 --reset=True -```sh -SYFT_VERSION="" +Starting syft-datasite server on 0.0.0.0:8080 ``` -#### 4. Provisioning Helm Charts - -```sh -helm install my-domain openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className="traefik" -``` +Datasite servers can be deployed as a single container using Docker or directly in Kubernetes. Check out our deployment guide. -### Ingress Controllers +## Launch Client -For Azure AKS +Main way to use a Datasite is via our Syft client, in a Jupyter Notebook. Check out our PySyft client guide: -```sh -helm install ... --set ingress.className="azure-application-gateway" -``` +```python +import syft as sy -For AWS EKS +sy.requires(">=0.9.5,<0.9.6") -```sh -helm install ... --set ingress.className="alb" +datasite_client = sy.login( + port=8080, + email="info@openmined.org", + password="changethis" +) ``` -For Google GKE we need the [`gce` annotation](https://cloud.google.com/kubernetes-engine/docs/how-to/load-balance-ingress#create-ingress) annotation. +## PySyft - Getting started 📝 -```sh -helm install ... --set ingress.class="gce" -``` +Learn about PySyft via our getting started guide: -## Deploy to a Container Engine or Cloud +- PySyft from the ground up +- Part 1: Datasets & Assets +- Part 2: Client and Datasite Access +- Part 3: Propose the research study +- Part 4: Review Code Requests +- Part 5: Retrieving Results -1. Install our handy 🛵 cli tool which makes deploying a Domain or Gateway server to Docker or VM a one-liner: - `pip install -U hagrid` +# PySyft In-depth -2. Then run our interactive jupyter Install 🧙🏽‍♂️ WizardBETA: - `hagrid quickstart` +📚 Check out our docs website. -3. In the tutorial you will learn how to install and deploy: - `PySyft` = our `numpy`-like 🐍 Python library for computing on `private data` in someone else's `Domain` +Quick PySyft components links: - `PyGrid` = our 🐳 `docker` / 🐧 `vm` `Domain` & `Gateway` Servers where `private data` lives +- DataSite Server -## Docs and Support +- Syft Client -- 📚 Docs -- `#support` on Slack +- Datasets API (`.datasets`) -# Install Notes +- Users API (`.users`) -- HAGrid 0.3 Requires: 🐍 `python` 🐙 `git` - Run: `pip install -U hagrid` -- Interactive Install 🧙🏽‍♂️ WizardBETA Requires 🛵 `hagrid`: - Run: `hagrid quickstart` -- PySyft 0.8.1 Requires: 🐍 `python 3.10 - 3.12` - Run: `pip install -U syft` -- PyGrid Requires: 🐳 `docker`, 🦦 `podman` or ☸️ `kubernetes` - Run: `hagrid launch ...` + -# Versions +- Request API (`.requests`) -`0.9.0` - Coming soon... -`0.8.6` (Beta) - `dev` branch 👈🏽 API - Coming soon... -`0.8.5` (Stable) - API +- Code API (`.code`) -Deprecated: +- Syft Policies API (`.policy`) -- `0.8.4` - API -- `0.8.3` - API -- `0.8.2` - API -- `0.8.1` - API -- `0.8.0` - API -- `0.7.0` - Course 3 Updated -- `0.6.0` - Course 3 -- `0.5.1` - Course 2 + M1 Hotfix -- `0.2.0` - `0.5.0` +- Settings API (`.settings`) -PySyft and PyGrid use the same `version` and its best to match them up where possible. We release weekly betas which can be used in each context: +- Notifications API (`.notifications`) -PySyft (Stable): `pip install -U syft` -PyGrid (Stable) `hagrid launch ... tag=latest` +- Sync API (`.sync`) -PySyft (Beta): `pip install -U syft --pre` -PyGrid (Beta): `hagrid launch ... tag=beta` +## Why use PySyft? -HAGrid is a cli / deployment tool so the latest version of `hagrid` is usually the best. +In a variety of domains across society, data owners have **valid concerns about the risks associated with sharing their data**, such as legal risks, privacy invasion (_misuing the data_), or intellectual property (_copying and redistributing it_). -# What is Syft? +Datasites enable data scientists to **answer questions** without even seeing or acquiring a copy of the data, **within the data owners's definition of acceptable use**. We call this process Remote Data Science. -Syft +This means that the **current risks** of sharing information with someone will **no longer prevent** the vast benefits such as innovation, insights and scientific discovery. With each Datasite, data owners are able to enable `1000x more accesible data` in each scientific field and lead, together with data scientists, breakthrough innovation. -`Syft` is OpenMined's `open source` stack that provides `secure` and `private` Data Science in Python. Syft decouples `private data` from model training, using techniques like [Federated Learning](https://ai.googleblog.com/2017/04/federated-learning-collaborative.html), [Differential Privacy](https://en.wikipedia.org/wiki/Differential_privacy), and [Encrypted Computation](https://en.wikipedia.org/wiki/Homomorphic_encryption). This is done with a `numpy`-like interface and integration with `Deep Learning` frameworks, so that you as a `Data Scientist` can maintain your current workflow while using these new `privacy-enhancing techniques`. +Learn more about our work on our website. -### Why should I use Syft? +## Support -`Syft` allows a `Data Scientist` to ask `questions` about a `dataset` and, within `privacy limits` set by the `data owner`, get `answers` to those `questions`, all without obtaining a `copy` of the data itself. We call this process `Remote Data Science`. It means in a wide variety of `domains` across society, the current `risks` of sharing information (`copying` data) with someone such as, privacy invasion, IP theft and blackmail will no longer prevent the vast `benefits` such as innovation, insights and scientific discovery which secure access will provide. +For questions about PySyft, reach out via `#support` on Slack. -No more cold calls to get `access` to a dataset. No more weeks of `wait times` to get a `result` on your `query`. It also means `1000x more data` in every domain. PySyft opens the doors to a streamlined Data Scientist `workflow`, all with the individual's `privacy` at its heart. +## Syft Versions - +- `0.9.5` (Stable) - Docs +- Install PySyft (Stable): `pip install -U syft` -# Terminology - - - - - - - - - - - - - - - - - - - - -
    - -

    👨🏻‍💼 Data Owners

    -
    - -

    👩🏽‍🔬 Data Scientists

    -
    - - -Provide `datasets` which they would like to make available for `study` by an `outside party` they may or may not `fully trust` has good intentions. - - - - -Are end `users` who desire to perform `computations` or `answer` a specific `question` using one or more data owners' `datasets`. - -
    - -

    🏰 Domain Server

    -
    - -

    🔗 Gateway Server

    -
    - - -Manages the `remote study` of the data by a `Data Scientist` and allows the `Data Owner` to manage the `data` and control the `privacy guarantees` of the subjects under study. It also acts as a `gatekeeper` for the `Data Scientist's` access to the data to compute and experiment with the results. - - - - -Provides services to a group of `Data Owners` and `Data Scientists`, such as dataset `search` and bulk `project approval` (legal / technical) to participate in a project. A gateway server acts as a bridge between it's members (`Domains`) and their subscribers (`Data Scientists`) and can provide access to a collection of `domains` at once.
    +Find more about previous releases here. # Community - - - - - - -
    - -
    - - +Supported by the OpenMined Foundation, the OpenMined Community is an online network of over 17,000 technologists, researchers, and industry professionals keen to _unlock 1000x more data in every scientific field and industry_. - - -
    -
    - - - - -
    - - - - - -
    -
    + # Courses @@ -354,68 +172,65 @@ Provides services to a group of `Data Owners` and `Data Scientists`, such as dat # Contributors -OpenMined and Syft appreciates all contributors, if you would like to fix a bug or suggest a new feature, please see our [guidelines](https://openmined.github.io/PySyft/developer_guide/index.html).
    +OpenMined and Syft appreciates all contributors, if you would like to fix a bug or suggest a new feature, please reach out via Github or Slack! + +Contributors + +# About OpenMined -Contributors +OpenMined is a non-profit foundation creating technology infrastructure that helps researchers get answers from data without needing a copy or direct access. Our community of technologists is building Syft. + + # Supporters
    - +
    - +
    - +
    - +
    - +
    - +
    - + - + - + - + - + - + - + - + - + - + - +
    -# Open Collective - -`OpenMined` is a fiscally sponsored `501(c)(3)` in the USA. We are funded by our generous supporters on Open Collective.

    - -Contributors - -# Disclaimer - -Syft is under active development and is not yet ready for pilots on private data without our assistance. As early access participants, please contact us via [Slack](https://slack.openmined.org/) or email if you would like to ask a question or have a use case that you would like to discuss. - # License [Apache License 2.0](LICENSE)
    diff --git a/packages/syft/setup.cfg b/packages/syft/setup.cfg index eb4d7e14537..bc255f56add 100644 --- a/packages/syft/setup.cfg +++ b/packages/syft/setup.cfg @@ -1,13 +1,13 @@ [metadata] name = syft -version = attr: "0.8.6-beta.1" +version = attr: "0.9.6-beta.6" description = Perform numpy-like analysis on data that remains in someone elses server author = OpenMined author_email = info@openmined.org license = Apache-2.0 long_description = file: PYPI.md long_description_content_type = text/markdown; charset=UTF-8; variant=GFM -url = https://openmined.github.io/PySyft/ +url = https://docs.openmined.org project_urls = Source=https://github.com/OpenMined/PySyft Tracker=https://github.com/OpenMined/PySyft/issues @@ -30,43 +30,51 @@ syft = bcrypt==4.1.2 boto3==1.34.56 forbiddenfruit==0.1.4 - gevent==23.9.1 - loguru==0.7.2 - networkx==3.2.1 packaging>=23.0 - pyarrow==15.0.0 - # pycapnp is beta version, update to stable version when available - pycapnp==2.0.0b2 - pydantic[email]==2.6.0 - pydantic-settings==2.2.1 - pymongo==4.6.2 + pyarrow==17.0.0 + pycapnp==2.0.0 + pydantic[email]>=2.6.0 + pydantic-settings>=2.2.1 pynacl==1.5.0 pyzmq>=23.2.1,<=25.1.1 - requests==2.31.0 + requests==2.32.3 RestrictedPython==7.0 - result==0.16.1 - tqdm==4.66.2 + tqdm>=4.66.4 typeguard==4.1.5 - typing_extensions==4.10.0 + typing_extensions>=4.12.0 sherlock[filelock]==0.4.1 - uvicorn[standard]==0.27.1 - fastapi==0.110.0 - psutil==5.9.8 - hagrid>=0.3 + uvicorn[standard]>=0.30.0 + markdown==3.5.2 + fastapi>=0.111.0 + psutil>=6.0.0 itables==1.7.1 argon2-cffi==23.1.0 - matplotlib==3.8.3 - # jaxlib is a DL library but we are needing it for serialization - jaxlib==0.4.20 - jax==0.4.20 + matplotlib>=3.7.1,<3.9.1 # numpy and pandas are ML packages but are needed in many places througout the codebase numpy>=1.23.5,<=1.24.4; python_version<"3.12" numpy>=1.26.4,<1.27; python_version>="3.12" - pandas==2.2.1 - docker==6.1.3 + pandas==2.2.2 + docker==7.1.0 kr8s==0.13.5 - PyYAML==6.0.1 + PyYAML>=6.0.1 azure-storage-blob==12.19.1 + ipywidgets==8.1.2 + tomli==2.0.1 # Later for python 3.11 > we can just use tomlib that comes with python + tomli_w==1.0.0 + rich>=13.7.1 + jinja2>=3.1.4 + tenacity==8.3.0 + nh3==0.2.17 + psycopg[binary]==3.1.19 + psycopg[pool]==3.1.19 + ipython<8.27.0 + dynaconf==3.2.6 + sqlalchemy==2.0.32 + psycopg2-binary==2.9.9 + syft-core==0.1.0 + syft-event==0.1.0 + syft-rpc==0.1.0 + syftbox==0.3.5 install_requires = %(syft)s @@ -83,31 +91,41 @@ exclude = [options.extras_require] data_science = - transformers==4.38.2 + transformers==4.41.2 opendp==0.9.2 - evaluate==0.4.1 + evaluate==0.4.2 recordlinkage==0.16 - dm-haiku==0.0.10 - torch[cpu]==2.2.1 + # backend.dockerfile installs torch separately, so update the version over there as well! + torch==2.2.2 dev = %(test_plugins)s %(telemetry)s - bandit==1.7.7 - ruff==0.3.0 - importlib-metadata==6.8.0 + bandit==1.7.8 + debugpy==1.8.2 + importlib-metadata==7.1.0 isort==5.13.2 - mypy==1.7.1 - pre-commit==3.6.2 + mypy==1.10.0 + pre-commit==3.7.1 + ruff==0.4.7 safety>=2.4.0b2 + aiosmtpd==1.4.6 telemetry = - opentelemetry-api==1.14.0 - opentelemetry-sdk==1.14.0 - opentelemetry-exporter-jaeger==1.14.0 - opentelemetry-instrumentation==0.35b0 - opentelemetry-instrumentation-requests==0.35b0 - ; opentelemetry-instrumentation-digma==0.9.0 + opentelemetry-api>=1.27.0 + opentelemetry-sdk>=1.27.0 + opentelemetry-exporter-otlp>=1.27.0 + opentelemetry-instrumentation>=0.48b0 + opentelemetry-instrumentation-requests>=0.48b0 + opentelemetry-instrumentation-fastapi>=0.48b0 + opentelemetry-instrumentation-botocore>=0.48b0 + opentelemetry-instrumentation-logging>=0.48b0 + opentelemetry-instrumentation-sqlalchemy>=0.48b0 + opentelemetry-instrumentation-threading>=0.48b0 + ; opentelemetry-instrumentation-asyncio==0.48b0 + ; opentelemetry-instrumentation-sqlite3==0.48b0 + ; opentelemetry-instrumentation-jinja2==0.48b0 + ; opentelemetry-instrumentation-system-metrics==0.48b0 # pytest>=8.0 broke pytest-lazy-fixture which doesn't seem to be actively maintained # temporarily pin to pytest<8 @@ -124,10 +142,14 @@ test_plugins = coverage faker distro + dynaconf + pytest-asyncio + pytest-timeout + anyio [options.entry_points] console_scripts = - syft=syft.node.run:run + syft=syft.server.run:run [test] addopts = --verbose @@ -229,9 +251,7 @@ extensions = # Add here 'data_files', 'packages' or 'namespace_packages'. # Additional data files are defined as key value pairs of source and target: packages = - syft -data_files = - img = img/* + syft [mypy] python_version = 3.12 diff --git a/packages/syft/src/syft/VERSION b/packages/syft/src/syft/VERSION index 7e08b6f2c0c..368606c3066 100644 --- a/packages/syft/src/syft/VERSION +++ b/packages/syft/src/syft/VERSION @@ -1,5 +1,5 @@ # Mono Repo Global Version -__version__ = "0.8.6-beta.1" +__version__ = "0.9.6-beta.6" # elsewhere we can call this file: `python VERSION` and simply take the stdout # stdlib diff --git a/packages/syft/src/syft/__init__.py b/packages/syft/src/syft/__init__.py index 770ccbb6256..0875d509f95 100644 --- a/packages/syft/src/syft/__init__.py +++ b/packages/syft/src/syft/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.8.6-beta.1" +__version__ = "0.9.6-beta.6" # stdlib from collections.abc import Callable @@ -8,78 +8,93 @@ from typing import Any # relative -from . import gevent_patch # noqa: F401 -from .abstract_node import NodeSideType # noqa: F401 -from .abstract_node import NodeType # noqa: F401 -from .client.client import connect # noqa: F401 -from .client.client import login # noqa: F401 -from .client.client import login_as_guest # noqa: F401 -from .client.client import register # noqa: F401 -from .client.deploy import Orchestra # noqa: F401 -from .client.domain_client import DomainClient # noqa: F401 -from .client.gateway_client import GatewayClient # noqa: F401 -from .client.registry import DomainRegistry # noqa: F401 -from .client.registry import EnclaveRegistry # noqa: F401 -from .client.registry import NetworkRegistry # noqa: F401 -from .client.search import Search # noqa: F401 -from .client.search import SearchResults # noqa: F401 -from .client.user_settings import UserSettings # noqa: F401 -from .client.user_settings import settings # noqa: F401 -from .custom_worker.config import DockerWorkerConfig # noqa: F401 -from .external import OBLV_ENABLED # noqa: F401 -from .external import enable_external_lib # noqa: F401 -from .node.credentials import SyftSigningKey # noqa: F401 -from .node.domain import Domain # noqa: F401 -from .node.enclave import Enclave # noqa: F401 -from .node.gateway import Gateway # noqa: F401 -from .node.server import serve_node # noqa: F401 -from .node.server import serve_node as bind_worker # noqa: F401 -from .node.worker import Worker # noqa: F401 -from .protocol.data_protocol import bump_protocol_version # noqa: F401 -from .protocol.data_protocol import check_or_stage_protocol # noqa: F401 -from .protocol.data_protocol import get_data_protocol # noqa: F401 -from .protocol.data_protocol import stage_protocol_changes # noqa: F401 -from .serde import NOTHING # noqa: F401 -from .serde.deserialize import _deserialize as deserialize # noqa: F401 -from .serde.serializable import serializable # noqa: F401 -from .serde.serialize import _serialize as serialize # noqa: F401 -from .service.action.action_data_empty import ActionDataEmpty # noqa: F401 -from .service.action.action_object import ActionObject # noqa: F401 -from .service.action.plan import Plan # noqa: F401 -from .service.action.plan import planify # noqa: F401 -from .service.code.user_code import UserCodeStatus # noqa: F401; noqa: F401 -from .service.code.user_code import syft_function # noqa: F401; noqa: F401 -from .service.code.user_code import syft_function_single_use # noqa: F401; noqa: F401 -from .service.data_subject import DataSubjectCreate as DataSubject # noqa: F401 -from .service.dataset.dataset import Contributor # noqa: F401 -from .service.dataset.dataset import CreateAsset as Asset # noqa: F401 -from .service.dataset.dataset import CreateDataset as Dataset # noqa: F401 -from .service.notification.notifications import NotificationStatus # noqa: F401 -from .service.policy.policy import CustomInputPolicy # noqa: F401 -from .service.policy.policy import CustomOutputPolicy # noqa: F401 -from .service.policy.policy import ExactMatch # noqa: F401 -from .service.policy.policy import SingleExecutionExactOutput # noqa: F401 -from .service.policy.policy import UserInputPolicy # noqa: F401 -from .service.policy.policy import UserOutputPolicy # noqa: F401 -from .service.project.project import ProjectSubmit as Project # noqa: F401 -from .service.request.request import SubmitRequest as Request # noqa: F401 -from .service.response import SyftError # noqa: F401 -from .service.response import SyftNotReady # noqa: F401 -from .service.response import SyftSuccess # noqa: F401 -from .service.user.roles import Roles as roles # noqa: F401 -from .service.user.user_service import UserService # noqa: F401 +from .abstract_server import ServerSideType +from .abstract_server import ServerType +from .client.client import connect +from .client.client import login +from .client.client import login_as_guest +from .client.client import register +from .client.datasite_client import DatasiteClient +from .client.gateway_client import GatewayClient +from .client.registry import DatasiteRegistry +from .client.registry import EnclaveRegistry +from .client.registry import NetworkRegistry + +# from .client.search import Search +# from .client.search import SearchResults +from .client.syncing import compare_clients +from .client.syncing import compare_states +from .client.syncing import sync +from .client.user_settings import UserSettings +from .client.user_settings import settings +from .custom_worker.config import DockerWorkerConfig +from .custom_worker.config import PrebuiltWorkerConfig +from .custom_worker.workerpool_upgrade_utils import upgrade_custom_workerpools +from .orchestra import Orchestra as orchestra +from .protocol.data_protocol import bump_protocol_version +from .protocol.data_protocol import check_or_stage_protocol +from .protocol.data_protocol import get_data_protocol +from .protocol.data_protocol import stage_protocol_changes +from .serde import NOTHING +from .serde.deserialize import _deserialize as deserialize +from .serde.serializable import serializable +from .serde.serialize import _serialize as serialize +from .server.credentials import SyftSigningKey +from .server.datasite import Datasite +from .server.enclave import Enclave +from .server.gateway import Gateway +from .server.uvicorn import serve_server +from .server.uvicorn import serve_server as bind_worker +from .server.worker import Worker +from .service.action.action_data_empty import ActionDataEmpty +from .service.action.action_object import ActionObject +from .service.action.plan import Plan +from .service.action.plan import planify +from .service.api.api import api_endpoint +from .service.api.api import api_endpoint_method +from .service.api.api import create_new_api_endpoint as TwinAPIEndpoint +from .service.code.user_code import UserCodeStatus +from .service.code.user_code import syft_function +from .service.code.user_code import syft_function_single_use +from .service.data_subject import DataSubjectCreate as DataSubject +from .service.dataset.dataset import Contributor +from .service.dataset.dataset import CreateAsset as Asset +from .service.dataset.dataset import CreateDataset as Dataset +from .service.notification.notifications import NotificationStatus +from .service.policy.policy import CreatePolicyRuleConstant as Constant +from .service.policy.policy import CustomInputPolicy +from .service.policy.policy import CustomOutputPolicy +from .service.policy.policy import ExactMatch +from .service.policy.policy import MixedInputPolicy +from .service.policy.policy import SingleExecutionExactOutput +from .service.policy.policy import UserInputPolicy +from .service.policy.policy import UserOutputPolicy +from .service.project.project import ProjectSubmit as Project +from .service.request.request import SubmitRequest as Request +from .service.response import SyftError +from .service.response import SyftNotReady +from .service.response import SyftSuccess +from .service.user.roles import Roles as roles +from .service.user.user_service import UserService from .stable_version import LATEST_STABLE_SYFT -from .types.twin_object import TwinObject # noqa: F401 -from .types.uid import UID # noqa: F401 -from .util import filterwarnings # noqa: F401 -from .util import jax_settings # noqa: F401 -from .util import logger # noqa: F401 -from .util import options # noqa: F401 -from .util.autoreload import disable_autoreload # noqa: F401 -from .util.autoreload import enable_autoreload # noqa: F401 -from .util.telemetry import instrument # noqa: F401 -from .util.util import autocache # noqa: F401 -from .util.util import get_root_data_path # noqa: F401 +from .types.errors import SyftException +from .types.errors import raises +from .types.result import as_result +from .types.twin_object import TwinObject +from .types.uid import UID +from .util import filterwarnings +from .util.api_snapshot.api_snapshot import show_api_diff +from .util.api_snapshot.api_snapshot import take_api_snapshot +from .util.autoreload import disable_autoreload +from .util.autoreload import enable_autoreload +from .util.commit import __commit__ +from .util.patch_ipython import patch_ipython +from .util.reset_server import make_copy +from .util.reset_server import restore_copy +from .util.telemetry import instrument +from .util.telemetry import instrument_threads +from .util.util import autocache +from .util.util import get_root_data_path from .util.version_compare import make_requires requires = make_requires(LATEST_STABLE_SYFT, __version__) @@ -90,26 +105,10 @@ sys.path.append(str(Path(__file__))) -logger.start() -try: - # third party - from IPython import get_ipython +instrument_threads() - get_ipython() # noqa: F821 - # TODO: add back later or auto detect - # display( - # Markdown( - # "\nWarning: syft is imported in light mode by default. \ - # \nTo switch to dark mode, please run `sy.options.color_theme = 'dark'`" - # ) - # ) -except: # noqa: E722 - pass # nosec - -# For server-side, to enable by environment variable -if OBLV_ENABLED: - enable_external_lib("oblv") +patch_ipython() def module_property(func: Any) -> Callable: @@ -143,8 +142,8 @@ def _enclaves() -> EnclaveRegistry: @module_property -def _domains() -> DomainRegistry: - return DomainRegistry() +def _datasites() -> DatasiteRegistry: + return DatasiteRegistry() @module_property @@ -153,9 +152,18 @@ def _settings() -> UserSettings: @module_property -def _orchestra() -> Orchestra: - return Orchestra +def _test_settings() -> Any: + # relative + from .util.util import test_settings + + return test_settings() + + +@module_property +def hello_baby() -> None: + print("Hello baby!") + print("Welcome to the world. \u2764\ufe0f") -def search(name: str) -> SearchResults: - return Search(_domains()).search(name=name) +# def search(name: str) -> SearchResults: +# return Search(_datasites()).search(name=name) diff --git a/packages/syft/src/syft/__main__.py b/packages/syft/src/syft/__main__.py index 5763fbf00af..c37df473008 100644 --- a/packages/syft/src/syft/__main__.py +++ b/packages/syft/src/syft/__main__.py @@ -2,6 +2,6 @@ if __name__ == "__main__": # relative - from .node.run import run + from .server.run import run run() diff --git a/packages/syft/src/syft/abstract_node.py b/packages/syft/src/syft/abstract_node.py deleted file mode 100644 index c3e54c85159..00000000000 --- a/packages/syft/src/syft/abstract_node.py +++ /dev/null @@ -1,44 +0,0 @@ -# stdlib -from collections.abc import Callable -from enum import Enum -from typing import TYPE_CHECKING - -# relative -from .serde.serializable import serializable -from .types.uid import UID - -if TYPE_CHECKING: - # relative - from .service.service import AbstractService - - -@serializable() -class NodeType(str, Enum): - DOMAIN = "domain" - NETWORK = "network" - ENCLAVE = "enclave" - GATEWAY = "gateway" - - def __str__(self) -> str: - # Use values when transforming NodeType to str - return self.value - - -@serializable() -class NodeSideType(str, Enum): - LOW_SIDE = "low" - HIGH_SIDE = "high" - - def __str__(self) -> str: - return self.value - - -class AbstractNode: - id: UID | None - name: str | None - node_type: NodeType | None - node_side_type: NodeSideType | None - in_memory_workers: bool - - def get_service(self, path_or_func: str | Callable) -> "AbstractService": - raise NotImplementedError diff --git a/packages/syft/src/syft/abstract_server.py b/packages/syft/src/syft/abstract_server.py new file mode 100644 index 00000000000..8c945f9abbb --- /dev/null +++ b/packages/syft/src/syft/abstract_server.py @@ -0,0 +1,50 @@ +# stdlib +from collections.abc import Callable +from enum import Enum +from typing import TYPE_CHECKING + +# relative +from .serde.serializable import serializable +from .store.db.db import DBConfig +from .store.db.db import DBManager +from .types.uid import UID + +if TYPE_CHECKING: + # relative + from .server.service_registry import ServiceRegistry + from .service.service import AbstractService + + +@serializable(canonical_name="ServerType", version=1) +class ServerType(str, Enum): + DATASITE = "datasite" + NETWORK = "network" + ENCLAVE = "enclave" + GATEWAY = "gateway" + + def __str__(self) -> str: + # Use values when transforming ServerType to str + return self.value + + +@serializable(canonical_name="ServerSideType", version=1) +class ServerSideType(str, Enum): + LOW_SIDE = "low" + HIGH_SIDE = "high" + + def __str__(self) -> str: + return self.value + + +class AbstractServer: + id: UID | None + name: str | None + server_type: ServerType | None + server_side_type: ServerSideType | None + in_memory_workers: bool + services: "ServiceRegistry" + db_config: DBConfig + db: DBManager[DBConfig] + + def get_service(self, path_or_func: str | Callable) -> "AbstractService": + raise NotImplementedError diff --git a/packages/syft/src/syft/assets/css/fonts.css b/packages/syft/src/syft/assets/css/fonts.css new file mode 100644 index 00000000000..50acce0539b --- /dev/null +++ b/packages/syft/src/syft/assets/css/fonts.css @@ -0,0 +1,93 @@ +/* cyrillic-ext */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSKmu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+0460-052F, U+1C80-1C88, U+20B4, U+2DE0-2DFF, U+A640-A69F, + U+FE2E-FE2F; +} + +/* cyrillic */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSumu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+0301, U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116; +} + +/* greek-ext */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSOmu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+1F00-1FFF; +} + +/* greek */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSymu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+0370-03FF; +} + +/* hebrew */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTS2mu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+0590-05FF, U+200C-2010, U+20AA, U+25CC, U+FB1D-FB4F; +} + +/* vietnamese */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSCmu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+0102-0103, U+0110-0111, U+0128-0129, U+0168-0169, U+01A0-01A1, + U+01AF-01B0, U+0300-0301, U+0303-0304, U+0308-0309, U+0323, U+0329, + U+1EA0-1EF9, U+20AB; +} + +/* latin-ext */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSGmu0SC55K5gw.woff2) + format("woff2"); + unicode-range: U+0100-02AF, U+0304, U+0308, U+0329, U+1E00-1E9F, U+1EF2-1EFF, + U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; +} + +/* latin */ +@font-face { + font-family: "Open Sans"; + font-style: normal; + font-weight: 300 800; + font-stretch: 100%; + src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTS-mu0SC55I.woff2) + format("woff2"); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, + U+02DC, U+0304, U+0308, U+0329, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, + U+2193, U+2212, U+2215, U+FEFF, U+FFFD; +} diff --git a/packages/syft/src/syft/assets/css/style.css b/packages/syft/src/syft/assets/css/style.css new file mode 100644 index 00000000000..fe35e772b85 --- /dev/null +++ b/packages/syft/src/syft/assets/css/style.css @@ -0,0 +1,660 @@ +body[data-jp-theme-light="false"], +body.vscode-dark { + --primary-color: #111111; + --secondary-color: #212121; + --tertiary-color: #cfcdd6; + --button-color: #111111; + --colors-black: #ffffff; + --surface-color: #fff; + --text-color: #ffffff; + --surface-highest-color: #ffffff; +} + +body { + --primary-color: #ffffff; + --secondary-color: #f5f5f5; + --tertiary-color: #000000de; + --button-color: #d1d5db; + --colors-black: #17161d; + --surface-color: #464158; + --text-color: #2e2b3b; + --surface-highest-color: #534f64; +} + +.header-1 { + font-style: normal; + font-weight: 600; + font-size: 2.0736em; + line-height: 100%; + leading-trim: both; + text-edge: cap; + color: var(--colors-black); +} + +.header-2 { + font-style: normal; + font-weight: 600; + font-size: 1.728em; + line-height: 100%; + leading-trim: both; + text-edge: cap; + color: var(--colors-black); +} + +.header-3 { + font-style: normal; + font-weight: 600; + font-size: 1.44em; + line-height: 100%; + leading-trim: both; + text-edge: cap; + color: var(--tertiary-color); +} + +.header-4 { + font-style: normal; + font-weight: 600; + font-size: 1.2em; + line-height: 100%; + leading-trim: both; + text-edge: cap; + color: var(--colors-black); +} + +.paragraph { + font-style: normal; + font-weight: 400; + font-size: 14px; + line-height: 100%; + leading-trim: both; + text-edge: cap; + color: var(--text-color); +} + +.paragraph-sm { + font-family: "Roboto"; + font-style: normal; + font-weight: 400; + font-size: 11.62px; + line-height: 100%; + leading-trim: both; + text-edge: cap; + color: var(--text-color); +} + +.code-text { + font-family: "Consolas"; + font-style: normal; + font-weight: 400; + font-size: 13px; + line-height: 130%; + leading-trim: both; + text-edge: cap; + color: var(--text-color); +} + +.numbering-entry { + display: none; +} + +/* Tooltip container */ +.tooltip { + position: relative; + display: inline-block; + border-bottom: 1px dotted black; + /* If you want dots under the hoverable text */ +} + +/* Tooltip text */ +.tooltip .tooltiptext { + visibility: hidden; + width: 120px; + background-color: black; + color: #fff; + text-align: center; + padding: 5px 0; + border-radius: 6px; + + /* Position the tooltip text - see examples below! */ + position: absolute; + z-index: 1; +} + +.repr-cell { + padding-top: 20px; +} + +.text-bold { + font-weight: bold; +} + +.pr-8 { + padding-right: 8px; +} + +.pt-8 { + padding-top: 8px; +} + +.pl-8 { + padding-left: 8px; +} + +.pb-8 { + padding-bottom: 8px; +} + +.py-25 { + padding-top: 25px; + padding-bottom: 25px; +} + +.flex { + display: flex; +} + +.gap-10 { + gap: 10px; +} + +.items-center { + align-items: center; +} + +.folder-icon { + color: var(--tertiary-color); +} + +.search-input { + display: flex; + flex-direction: row; + align-items: center; + padding: 8px 12px; + width: 343px; + height: 24px; + /* Lt On Surface/Low */ + background-color: var(--secondary-color); + border-radius: 30px; + + /* Lt On Surface/Highest */ + color: var(--tertiary-color); + border: none; + /* Inside auto layout */ + flex: none; + order: 0; + flex-grow: 0; +} + +.search-input:focus { + outline: none; +} + +.search-input:focus::placeholder, +.search-input::placeholder { + /* Chrome, Firefox, Opera, Safari 10.1+ */ + color: var(--tertiary-color); + opacity: 1; + /* Firefox */ +} + +.search-button { + /* Search */ + leading-trim: both; + text-edge: cap; + display: flex; + align-items: center; + text-align: center; + + /* Primary/On Light */ + background-color: var(--button-color); + color: var(--tertiary-color); + + border-radius: 30px; + border-color: var(--secondary-color); + border-style: solid; + box-shadow: + rgba(60, 64, 67, 0.3) 0px 1px 2px 0px, + rgba(60, 64, 67, 0.15) 0px 1px 3px 1px; + cursor: pointer; + /* Inside auto layout */ + flex: none; + order: 1; + flex-grow: 0; +} + +.grid-index-cells { + grid-column: span 1; + /* tmp fix to make left col stand out (fix with font-family) */ + font-weight: 600; + background-color: var(--secondary-color) !important; + color: var(--tertiary-color); +} + +.center-content-cell { + margin: auto; +} + +.grid-header { + /* Auto layout */ + display: flex; + flex-direction: column; + align-items: center; + padding: 6px 4px; + + resize: horizontal; + /* Lt On Surface/Surface */ + /* Lt On Surface/High */ + border: 1px solid #cfcdd6; + /* tmp fix to make header stand out (fix with font-family) */ + font-weight: 600; + background-color: var(--secondary-color); + color: var(--tertiary-color); +} + +.grid-row { + display: flex; + flex-direction: column; + align-items: flex-start; + padding: 6px 4px; + overflow: hidden; + border: 1px solid #cfcdd6; + background-color: var(--primary-color); + color: var(--tertiary-color); +} + +.syncstate-col-footer { + font-family: "DejaVu Sans Mono", "Open Sans"; + font-size: 12px; + font-weight: 400; + line-height: 16.8px; + text-align: left; + color: #5e5a72; +} + +.syncstate-description { + font-family: Open Sans; + font-size: 14px; + font-weight: 600; + line-height: 19.6px; + text-align: left; + white-space: nowrap; + flex-grow: 1; +} + +.widget-header2 { + display: flex; + gap: 8px; + justify-content: start; + width: 100%; + overflow: hidden; + align-items: center; +} + +.widget-header2-2 { + display: flex; + gap: 8px; + justify-content: start; + align-items: center; +} + +.jobs-title { + font-family: + Open Sans, + sans-serif; + font-size: 18px; + font-weight: 600; + line-height: 25.2px; + text-align: left; + color: #1f567a; +} + +.diff-state-orange-text { + color: #b8520a; +} + +.diff-state-no-obj { + font-family: "DejaVu Sans Mono", "Open Sans"; + font-size: 12px; + font-weight: 400; + line-height: 16.8px; + text-align: left; + color: #5e5a72; +} + +.diff-state-intro { + font-family: Open Sans; + font-size: 14px; + font-weight: 400; + line-height: 19.6px; + text-align: left; + color: #b4b0bf; +} + +.diff-state-header { + font-family: Open Sans; + font-size: 22px; + font-weight: 600; + line-height: 30.8px; + text-align: left; + color: #353243; + display: flex; + gap: 8px; +} + +.diff-state-sub-header { + font-family: Open Sans; + font-size: 14px; + font-weight: 400; + line-height: 19.6px; + text-align: left; + color: #5e5a72; +} + +.badge { + /* code-text; */ + border-radius: 30px; + padding: 6px; + white-space: nowrap; + overflow: hidden; + line-height: 1.2; + font-family: monospace; +} + +.label { + /* code-text; */ + border-radius: 4px; + padding: 6px 4px; + white-space: nowrap; + overflow: hidden; + line-height: 1.2; + font-family: monospace; +} + +.label-light-purple { + /* label; */ + background-color: #c9cfe8; + color: #373b7b; +} + +.label-light-blue { + /* label; */ + background-color: #c2def0; + color: #1f567a; +} + +.label-orange { + /* badge; */ + background-color: #fee9cd; + color: #b8520a; +} + +.label-gray { + /* badge; */ + background-color: #ecebef; + color: #353243; +} + +.label-green { + /* badge; */ + background-color: #d5f1d5; + color: #256b24; +} + +.label-red { + /* label; */ + background-color: #f2d9de; + color: #9b2737; +} + +.badge-blue { + /* badge; */ + background-color: #c2def0; + color: #1f567a; +} + +.badge-purple { + /* badge; */ + background-color: #c9cfe8; + color: #373b7b; +} + +.badge-green { + background-color: #d5f1d5; + color: #256b24; +} + +.badge-red { + /* badge; */ + background-color: #f2d9de; + color: #9b2737; +} + +.badge-gray { + /* badge; */ + background-color: #ecebef; + color: #2e2b3b; +} + +.paginationContainer { + width: 100%; + /*height: 30px;*/ + display: flex; + justify-content: center; + gap: 8px; + padding: 5px; + color: var(--tertiary-color); +} + +.widget-label-basic { + display: flex; +} + +.widget-label-basic input[type="checkbox"][disabled] { + filter: sepia(0.3) hue-rotate(67deg) saturate(3); +} + +.page { + color: black; + font-weight: bold; + color: var(--tertiary-color); +} + +.page:hover { + color: #38bdf8; + cursor: pointer; +} + +.clipboard:hover { + cursor: pointer; + color: var(--tertiary-color); +} + +.rendered_html tbody tr:nth-child(odd) { + background: transparent; +} + +.search-field { + display: flex; + align-items: center; + border-radius: 30px; + background-color: var(--secondary-color); +} + +.syft-dropdown { + margin: 5px; + margin-left: 5px; + position: relative; + display: inline-block; + text-align: center; + background-color: var(--button-color); + min-width: 100px; + padding: 2px; + border-radius: 30px; +} + +.syft-dropdown:hover { + cursor: pointer; +} + +.syft-dropdown-content { + margin-top: 26px; + display: none; + position: absolute; + min-width: 100px; + box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.2); + padding: 12px 6px; + z-index: 1; + background-color: var(--primary-color); + color: var(--tertiary-color); +} + +.dd-options { + padding-top: 4px; +} + +.dd-options:first-of-type { + padding-top: 0px; +} + +.dd-options:hover { + cursor: pointer; + background: #d1d5db; +} + +.arrow { + border: solid black; + border-width: 0 3px 3px 0; + display: inline-block; + padding: 3px; +} + +.down { + transform: rotate(45deg); + -webkit-transform: rotate(45deg); +} + +.syft-widget ul { + list-style-type: none; + margin: 0; + padding: 0; + overflow: hidden; +} + +.syft-widget li { + float: left; + border-bottom: solid; + border-bottom-color: #cfcdd6; +} + +.syft-widget li a { + display: block; + text-align: center; + padding: 14px 16px; + color: #cfcdd6; +} + +.log-tab-header { + border-bottom: solid 2px #ecebef; + padding: 4px 16px; +} + +.active-border { + border-bottom: solid 2px #1f567a; + font-weight: 700; +} + +.active { + color: #1f567a; +} + +.syft-widget li a:hover { + background-color: #c2def0; +} + +.syft-code, +.syft-user_code, +.syft-project, +.syft-project-create, +.syft-settings, +.syft-dataset, +.syft-asset, +.syft-contributor, +.syft-request, +.syft-syncstate, +.job-info { + color: var(--surface-color); +} + +.syft-dataset h1, +.syft-dataset h2, +.syft-dataset h3, +.syft-dataset p, +.syft-asset h3, +.syft-asset p, +.syft-syncstate h3, +.syft-syncstate p, +.syft-code p, +.syft-code h3 { + font-family: "Open Sans"; +} + +.diff-container { + border: 0.5px solid #b4b0bf; +} + +.syft-container { + padding: 5px; + font-family: "Open Sans"; +} + +.syft-alert-info { + color: #1f567a; + background-color: #c2def0; + border-radius: 4px; + padding: 5px; + padding: 13px 10px; +} + +.syft-code-block { + background-color: #f7f7f7; + border: 1px solid #cfcfcf; + padding: 0px 2px; +} + +.syft-space { + margin-top: 1em; +} + +.itables { + font-family: "Consolas", monospace, sans-serif; +} + +.itables table { + margin: 0 auto; + float: left; + color: var(--surface-highest-color); +} + +.itables table th { + color: var(--text-color); +} + +.syft-request { + line-height: 1; +} + +div.alert-warning, +div.alert-info, +div.alert-danger, +div.alert-success { + padding: 5px; +} + +pre.alert-warning, +pre.alert-info, +pre.alert-danger, +pre.alert-success { + display: inline; + font-family: inherit; + color: var(--text-color); +} diff --git a/packages/syft/src/syft/assets/css/tabulator_pysyft.min.css b/packages/syft/src/syft/assets/css/tabulator_pysyft.min.css new file mode 100644 index 00000000000..fde1ee7edc8 --- /dev/null +++ b/packages/syft/src/syft/assets/css/tabulator_pysyft.min.css @@ -0,0 +1,1651 @@ +:root { + --tabulator-background-color: #fff; + --tabulator-border-color: rgba(0, 0, 0, .12); + --tabulator-text-size: 16px; + --tabulator-header-background-color: #f5f5f5; + --tabulator-header-text-color: #555; + --tabulator-header-border-color: rgba(0, 0, 0, .12); + --tabulator-header-separator-color: rgba(0, 0, 0, .12); + --tabulator-header-margin: 4px; + --tabulator-sort-arrow-hover: #555; + --tabulator-sort-arrow-active: #666; + --tabulator-sort-arrow-inactive: #bbb; + --tabulator-column-resize-guide-color: #999; + --tabulator-row-background-color: #fff; + --tabulator-row-alt-background-color: #f8f8f8; + --tabulator-row-border-color: rgba(0, 0, 0, .12); + --tabulator-row-text-color: #333; + --tabulator-row-hover-background: #e1f5fe; + --tabulator-row-selected-background: #ace5ff; + --tabulator-row-selected-background-hover: #9bcfe8; + --tabulator-edit-box-color: #17161d; + --tabulator-error-color: #d00; + --tabulator-footer-background-color: transparent; + --tabulator-footer-text-color: #555; + --tabulator-footer-border-color: rgba(0, 0, 0, .12); + --tabulator-footer-separator-color: rgba(0, 0, 0, .12); + --tabulator-footer-active-color: #17161d; + --tabulator-spreadsheet-active-tab-color: #fff; + --tabulator-range-border-color: #17161d; + --tabulator-range-handle-color: #17161d; + --tabulator-range-header-selected-background: var(--tabulator-range-border-color); + --tabulator-range-header-selected-text-color: #fff; + --tabulator-range-header-highlight-background: colors-gray-timberwolf; + --tabulator-range-header-text-highlight-background: #fff; + --tabulator-pagination-button-background: #fff; + --tabulator-pagination-button-background-hover: #06c; + --tabulator-pagination-button-color: #999; + --tabulator-pagination-button-color-hover: #fff; + --tabulator-pagination-button-color-active: #000; + --tabulator-cell-padding: 15px +} + +body.vscode-dark, +body[data-jp-theme-light=false] { + --tabulator-background-color: #080808; + --tabulator-border-color: #666; + --tabulator-text-size: 16px; + --tabulator-header-background-color: #212121; + --tabulator-header-text-color: #555; + --tabulator-header-border-color: #666; + --tabulator-header-separator-color: #666; + --tabulator-header-margin: 4px; + --tabulator-sort-arrow-hover: #fff; + --tabulator-sort-arrow-active: #e6e6e6; + --tabulator-sort-arrow-inactive: #666; + --tabulator-column-resize-guide-color: #999; + --tabulator-row-background-color: #080808; + --tabulator-row-alt-background-color: #212121; + --tabulator-row-border-color: #666; + --tabulator-row-text-color: #f8f8f8; + --tabulator-row-hover-background: #333; + --tabulator-row-selected-background: #3d355d; + --tabulator-row-selected-background-hover: #483f69; + --tabulator-edit-box-color: #333; + --tabulator-error-color: #d00; + --tabulator-footer-background-color: transparent; + --tabulator-footer-text-color: #555; + --tabulator-footer-border-color: rgba(0, 0, 0, .12); + --tabulator-footer-separator-color: rgba(0, 0, 0, .12); + --tabulator-footer-active-color: #17161d; + --tabulator-spreadsheet-active-tab-color: #fff; + --tabulator-range-border-color: #17161d; + --tabulator-range-handle-color: var(--tabulator-range-border-color); + --tabulator-range-header-selected-background: var(--tabulator-range-border-color); + --tabulator-range-header-selected-text-color: #fff; + --tabulator-range-header-highlight-background: #d6d6d6; + --tabulator-range-header-text-highlight-background: #fff; + --tabulator-pagination-button-background: #212121; + --tabulator-pagination-button-background-hover: #555; + --tabulator-pagination-button-color: #999; + --tabulator-pagination-button-color-hover: #fff; + --tabulator-pagination-button-color-active: #fff; + --tabulator-cell-padding: 15px +} + +.tabulator { + border: 1px solid var(--tabulator-border-color); + font-size: var(--tabulator-text-size); + overflow: hidden; + position: relative; + text-align: left; + -webkit-transform: translateZ(0); + -moz-transform: translateZ(0); + -ms-transform: translateZ(0); + -o-transform: translateZ(0); + transform: translateZ(0) +} + +.tabulator[tabulator-layout=fitDataFill] .tabulator-tableholder .tabulator-table { + min-width: 100% +} + +.tabulator[tabulator-layout=fitDataTable] { + display: inline-block +} + +.tabulator.tabulator-block-select, +.tabulator.tabulator-ranges .tabulator-cell:not(.tabulator-editing) { + user-select: none +} + +.tabulator .tabulator-header { + background-color: var(--tabulator-header-background-color); + border-bottom: 1px solid var(--tabulator-header-separator-color); + box-sizing: border-box; + color: var(--tabulator-header-text-color); + font-weight: 700; + outline: none; + overflow: hidden; + position: relative; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; + white-space: nowrap; + width: 100% +} + +.tabulator .tabulator-header.tabulator-header-hidden { + display: none +} + +.tabulator .tabulator-header .tabulator-header-contents { + overflow: hidden; + position: relative +} + +.tabulator .tabulator-header .tabulator-header-contents .tabulator-headers { + display: inline-block +} + +.tabulator .tabulator-header .tabulator-col { + background: var(--tabulator-header-background-color); + border-right: 1px solid var(--tabulator-header-border-color); + box-sizing: border-box; + display: inline-flex; + flex-direction: column; + justify-content: flex-start; + overflow: hidden; + position: relative; + text-align: left; + vertical-align: bottom +} + +.tabulator .tabulator-header .tabulator-col.tabulator-moving { + background: hsl(var(--tabulator-header-background-color), calc(var(--tabulator-header-background-color) - 5%)) !important; + border: 1px solid var(--tabulator-header-separator-color); + pointer-events: none; + position: absolute +} + +.tabulator .tabulator-header .tabulator-col.tabulator-range-highlight { + background-color: var(--tabulator-range-header-highlight-background); + color: var(--tabulator-range-header-text-highlight-background) +} + +.tabulator .tabulator-header .tabulator-col.tabulator-range-selected { + background-color: var(--tabulator-range-header-selected-background); + color: var(--tabulator-range-header-selected-text-color) +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content { + box-sizing: border-box; + padding: 4px; + position: relative +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-header-popup-button { + padding: 0 8px +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-header-popup-button:hover { + cursor: pointer; + opacity: .6 +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title-holder { + position: relative +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title { + box-sizing: border-box; + overflow: hidden; + text-overflow: ellipsis; + vertical-align: bottom; + white-space: nowrap; + width: 100% +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title.tabulator-col-title-wrap { + text-overflow: clip; + white-space: normal +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title .tabulator-title-editor { + background: #fff; + border: 1px solid #999; + box-sizing: border-box; + padding: 1px; + width: 100% +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-title .tabulator-header-popup-button+.tabulator-title-editor { + width: calc(100% - 22px) +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-sorter { + align-items: center; + bottom: 0; + display: flex; + position: absolute; + right: 4px; + top: 0 +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-sorter .tabulator-arrow { + border-bottom: 6px solid var(--tabulator-sort-arrow-inactive); + border-left: 6px solid transparent; + border-right: 6px solid transparent; + height: 0; + width: 0 +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-group .tabulator-col-group-cols { + border-top: 1px solid var(--tabulator-header-border-color); + display: flex; + margin-right: -1px; + overflow: hidden; + position: relative +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter { + box-sizing: border-box; + margin-top: 2px; + position: relative; + text-align: center; + width: 100% +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter textarea { + height: auto !important +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter svg { + margin-top: 3px +} + +.tabulator .tabulator-header .tabulator-col .tabulator-header-filter input::-ms-clear { + height: 0; + width: 0 +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable .tabulator-col-title { + padding-right: 25px +} + +@media (hover:hover) and (pointer:fine) { + .tabulator .tabulator-header .tabulator-col.tabulator-sortable.tabulator-col-sorter-element:hover { + background-color: hsl(var(--tabulator-header-background-color), calc(var(--tabulator-header-background-color) - 10%)) !important; + cursor: pointer + } +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=none] .tabulator-col-content .tabulator-col-sorter { + color: var(--tabulator-sort-arrow-inactive) +} + +@media (hover:hover) and (pointer:fine) { + .tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=none] .tabulator-col-content .tabulator-col-sorter.tabulator-col-sorter-element .tabulator-arrow:hover { + border-bottom: 6px solid var(--tabulator-sort-arrow-hover); + cursor: pointer + } +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=none] .tabulator-col-content .tabulator-col-sorter .tabulator-arrow { + border-bottom: 6px solid var(--tabulator-sort-arrow-inactive); + border-top: none +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=ascending] .tabulator-col-content .tabulator-col-sorter { + color: var(--tabulator-sort-arrow-active) +} + +@media (hover:hover) and (pointer:fine) { + .tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=ascending] .tabulator-col-content .tabulator-col-sorter.tabulator-col-sorter-element .tabulator-arrow:hover { + border-bottom: 6px solid var(--tabulator-sort-arrow-hover); + cursor: pointer + } +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=ascending] .tabulator-col-content .tabulator-col-sorter .tabulator-arrow { + border-bottom: 6px solid var(--tabulator-sort-arrow-active); + border-top: none +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=descending] .tabulator-col-content .tabulator-col-sorter { + color: var(--tabulator-sort-arrow-active) +} + +@media (hover:hover) and (pointer:fine) { + .tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=descending] .tabulator-col-content .tabulator-col-sorter.tabulator-col-sorter-element .tabulator-arrow:hover { + border-top: 6px solid var(--tabulator-sort-arrow-hover); + cursor: pointer + } +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable[aria-sort=descending] .tabulator-col-content .tabulator-col-sorter .tabulator-arrow { + border-bottom: none; + border-top: 6px solid var(--tabulator-sort-arrow-active); + color: var(--tabulator-sort-arrow-active) +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical .tabulator-col-content .tabulator-col-title { + align-items: center; + display: flex; + justify-content: center; + text-orientation: mixed; + writing-mode: vertical-rl +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-col-vertical-flip .tabulator-col-title { + transform: rotate(180deg) +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-sortable .tabulator-col-title { + padding-right: 0; + padding-top: 20px +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-sortable.tabulator-col-vertical-flip .tabulator-col-title { + padding-bottom: 20px; + padding-right: 0 +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-vertical.tabulator-sortable .tabulator-col-sorter { + bottom: auto; + justify-content: center; + left: 0; + right: 0; + top: 4px +} + +.tabulator .tabulator-header .tabulator-frozen { + left: 0; + position: sticky; + z-index: 11 +} + +.tabulator .tabulator-header .tabulator-frozen.tabulator-frozen-left { + border-right: 2px solid var(--tabulator-row-border-color) +} + +.tabulator .tabulator-header .tabulator-frozen.tabulator-frozen-right { + border-left: 2px solid var(--tabulator-row-border-color) +} + +.tabulator .tabulator-header .tabulator-calcs-holder { + border-bottom: 1px solid var(--tabulator-header-border-color); + border-top: 1px solid var(--tabulator-row-border-color); + box-sizing: border-box; + display: inline-block +} + +.tabulator .tabulator-header .tabulator-calcs-holder, +.tabulator .tabulator-header .tabulator-calcs-holder .tabulator-row { + background: hsl(var(--tabulator-header-background-color), calc(var(--tabulator-header-background-color) + 5%)) !important +} + +.tabulator .tabulator-header .tabulator-calcs-holder .tabulator-row .tabulator-col-resize-handle { + display: none +} + +.tabulator .tabulator-header .tabulator-frozen-rows-holder { + display: inline-block +} + +.tabulator .tabulator-tableholder { + -webkit-overflow-scrolling: touch; + overflow: auto; + position: relative; + white-space: nowrap; + width: 100% +} + +.tabulator .tabulator-tableholder:focus { + outline: none +} + +.tabulator .tabulator-tableholder .tabulator-placeholder { + align-items: center; + box-sizing: border-box; + display: flex; + justify-content: center; + min-width: 100%; + width: 100% +} + +.tabulator .tabulator-tableholder .tabulator-placeholder[tabulator-render-mode=virtual] { + min-height: 100% +} + +.tabulator .tabulator-tableholder .tabulator-placeholder .tabulator-placeholder-contents { + color: #ccc; + display: inline-block; + font-size: 20px; + font-weight: 700; + padding: 10px; + text-align: center; + white-space: normal +} + +.tabulator .tabulator-tableholder .tabulator-table { + background-color: var(--tabulator-row-background-color); + color: var(--tabulator-row-text-color); + display: inline-block; + overflow: visible; + position: relative; + white-space: nowrap +} + +.tabulator .tabulator-tableholder .tabulator-table .tabulator-row.tabulator-calcs { + background: hsl(var(--tabulator-row-atl-background-color), calc(var(--tabulator-row-alt-background-color) - 5%)) !important; + font-weight: 700 +} + +.tabulator .tabulator-tableholder .tabulator-table .tabulator-row.tabulator-calcs.tabulator-calcs-top { + border-bottom: 2px solid var(--tabulator-row-border-color) +} + +.tabulator .tabulator-tableholder .tabulator-table .tabulator-row.tabulator-calcs.tabulator-calcs-bottom { + border-top: 2px solid var(--tabulator-row-border-color) +} + +.tabulator .tabulator-tableholder .tabulator-range-overlay { + inset: 0; + pointer-events: none; + position: absolute; + z-index: 10 +} + +.tabulator .tabulator-tableholder .tabulator-range-overlay .tabulator-range { + border: 1px solid var(--tabulator-range-border-color); + box-sizing: border-box; + position: absolute +} + +.tabulator .tabulator-tableholder .tabulator-range-overlay .tabulator-range.tabulator-range-active:after { + background-color: var(--tabulator-range-handle-color); + border-radius: 999px; + bottom: -3px; + content: ""; + height: 6px; + position: absolute; + right: -3px; + width: 6px +} + +.tabulator .tabulator-tableholder .tabulator-range-overlay .tabulator-range-cell-active { + border: 2px solid var(--tabulator-range-border-color); + box-sizing: border-box; + position: absolute +} + +.tabulator .tabulator-footer { + color: var(--tabulator-footer-text-color); + font-weight: 700; + user-select: none; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; + white-space: nowrap +} + +.tabulator .tabulator-footer .tabulator-footer-contents { + align-items: center; + display: flex; + flex-direction: row; + justify-content: space-between; + padding: 5px 10px +} + +.tabulator .tabulator-footer .tabulator-footer-contents:empty { + display: none +} + +.tabulator .tabulator-footer .tabulator-spreadsheet-tabs { + margin-top: -5px; + overflow-x: auto +} + +.tabulator .tabulator-footer .tabulator-spreadsheet-tabs .tabulator-spreadsheet-tab { + border: 1px solid var(--tabulator-border-color); + border-bottom-left-radius: 5px; + border-bottom-right-radius: 5px; + border-top: none; + display: inline-block; + font-size: .9em; + padding: 5px +} + +.tabulator .tabulator-footer .tabulator-spreadsheet-tabs .tabulator-spreadsheet-tab:hover { + cursor: pointer; + opacity: .7 +} + +.tabulator .tabulator-footer .tabulator-spreadsheet-tabs .tabulator-spreadsheet-tab.tabulator-spreadsheet-tab-active { + background: var(--tabulator-spreadsheet-active-tab-color) +} + +.tabulator .tabulator-footer .tabulator-calcs-holder { + border-bottom: 1px solid var(--tabulator-row-border-color); + border-top: 1px solid var(--tabulator-row-border-color); + box-sizing: border-box; + overflow: hidden; + text-align: left; + width: 100% +} + +.tabulator .tabulator-footer .tabulator-calcs-holder .tabulator-row { + display: inline-block +} + +.tabulator .tabulator-footer .tabulator-calcs-holder .tabulator-row .tabulator-col-resize-handle { + display: none +} + +.tabulator .tabulator-footer .tabulator-calcs-holder:only-child { + border-bottom: none; + margin-bottom: -5px +} + +.tabulator .tabulator-footer>*+.tabulator-page-counter { + margin-left: 10px +} + +.tabulator .tabulator-footer .tabulator-page-counter { + font-weight: 400 +} + +.tabulator .tabulator-footer .tabulator-paginator { + color: var(--tabulator-footer-text-color); + flex: 1; + font-family: inherit; + font-size: inherit; + font-weight: inherit; + text-align: right +} + +.tabulator .tabulator-footer .tabulator-page-size { + border: 1px solid var(--tabulator-footer-border-color); + border-radius: 3px; + display: inline-block; + margin: 0 5px; + padding: 2px 5px +} + +.tabulator .tabulator-footer .tabulator-pages { + margin: 0 7px +} + +.tabulator .tabulator-footer .tabulator-page { + background: hsla(0, 0%, 100%, .2); + border: 1px solid var(--tabulator-footer-border-color); + border-radius: 3px; + display: inline-block; + margin: 0 2px; + padding: 2px 5px +} + +.tabulator .tabulator-footer .tabulator-page.active { + color: var(--tabulator-footer-active-color) +} + +.tabulator .tabulator-footer .tabulator-page:disabled { + opacity: .5 +} + +@media (hover:hover) and (pointer:fine) { + .tabulator .tabulator-footer .tabulator-page:not(disabled):hover { + background: rgba(0, 0, 0, .2); + color: #fff; + cursor: pointer + } +} + +.tabulator .tabulator-col-resize-handle { + display: inline-block; + margin-left: -3px; + margin-right: -3px; + position: relative; + vertical-align: middle; + width: 6px; + z-index: 11 +} + +@media (hover:hover) and (pointer:fine) { + .tabulator .tabulator-col-resize-handle:hover { + cursor: ew-resize + } +} + +.tabulator .tabulator-col-resize-handle:last-of-type { + margin-right: 0; + width: 3px +} + +.tabulator .tabulator-col-resize-guide { + height: 100%; + margin-left: -.5px; + top: 0; + width: 4px +} + +.tabulator .tabulator-col-resize-guide, +.tabulator .tabulator-row-resize-guide { + background-color: var(--tabulator-column-resize-guide-color); + opacity: .5; + position: absolute +} + +.tabulator .tabulator-row-resize-guide { + height: 4px; + left: 0; + margin-top: -.5px; + width: 100% +} + +.tabulator .tabulator-alert { + align-items: center; + background: rgba(0, 0, 0, .4); + display: flex; + height: 100%; + left: 0; + position: absolute; + text-align: center; + top: 0; + width: 100%; + z-index: 100 +} + +.tabulator .tabulator-alert .tabulator-alert-msg { + background: #fff; + border-radius: 10px; + display: inline-block; + font-size: 16px; + font-weight: 700; + margin: 0 auto; + padding: 10px 20px +} + +.tabulator .tabulator-alert .tabulator-alert-msg.tabulator-alert-state-msg { + border: 4px solid #333; + color: #000 +} + +.tabulator .tabulator-alert .tabulator-alert-msg.tabulator-alert-state-error { + border: 4px solid #d00; + color: #590000 +} + +.tabulator-row { + background-color: var(--tabulator-row-background-color); + box-sizing: border-box; + min-height: calc(var(--tabulator-text-size) + var(--tabulator-header-margin)*2); + position: relative +} + +.tabulator-row.tabulator-row-even { + background-color: var(--tabulator-row-alt-background-color) +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-row.tabulator-selectable:hover { + background-color: var(--tabulator-row-hover-background); + cursor: pointer + } +} + +.tabulator-row.tabulator-selected { + background-color: var(--tabulator-row-selected-background) !important; +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-row.tabulator-selected:hover { + background-color: var(--tabulator-row-selected-background-hover) !important; + cursor: pointer + } +} + +.tabulator-row.tabulator-row-moving { + background: #fff; + border: 1px solid #000 +} + +.tabulator-row.tabulator-moving { + border-bottom: 1px solid var(--tabulator-row-border-color); + border-top: 1px solid var(--tabulator-row-border-color); + pointer-events: none; + position: absolute; + z-index: 15 +} + +.tabulator-row.tabulator-range-highlight .tabulator-cell.tabulator-range-row-header { + background-color: var(--tabulator-range-header-highlight-background); + color: var(--tabulator-range-header-text-highlight-background) +} + +.tabulator-row.tabulator-range-highlight.tabulator-range-selected .tabulator-cell.tabulator-range-row-header, +.tabulator-row.tabulator-range-selected .tabulator-cell.tabulator-range-row-header { + background-color: var(--tabulator-range-header-selected-background); + color: var(--tabulator-range-header-selected-text-color) +} + +.tabulator-row .tabulator-row-resize-handle { + bottom: 0; + height: 5px; + left: 0; + position: absolute; + right: 0 +} + +.tabulator-row .tabulator-row-resize-handle.prev { + bottom: auto; + top: 0 +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-row .tabulator-row-resize-handle:hover { + cursor: ns-resize + } +} + +.tabulator-row .tabulator-responsive-collapse { + border-bottom: 1px solid var(--tabulator-row-border-color); + border-top: 1px solid var(--tabulator-row-border-color); + box-sizing: border-box; + padding: 5px +} + +.tabulator-row .tabulator-responsive-collapse:empty { + display: none +} + +.tabulator-row .tabulator-responsive-collapse table { + font-size: var(--tabulator-text-size) +} + +.tabulator-row .tabulator-responsive-collapse table tr td { + position: relative +} + +.tabulator-row .tabulator-responsive-collapse table tr td:first-of-type { + padding-right: 10px +} + +.tabulator-row .tabulator-cell { + border-right: 1px solid var(--tabulator-row-border-color); + box-sizing: border-box; + display: inline-block; + outline: none; + overflow: hidden; + padding: 4px; + position: relative; + text-overflow: ellipsis; + vertical-align: middle; + white-space: nowrap +} + +.tabulator-row .tabulator-cell.tabulator-row-header { + border-bottom: 1px solid var(--tabulator-row-border-color) +} + +.tabulator-row .tabulator-cell.tabulator-frozen { + background-color: inherit; + display: inline-block; + left: 0; + position: sticky; + z-index: 11 +} + +.tabulator-row .tabulator-cell.tabulator-frozen.tabulator-frozen-left { + border-right: 2px solid var(--tabulator-row-border-color) +} + +.tabulator-row .tabulator-cell.tabulator-frozen.tabulator-frozen-right { + border-left: 2px solid var(--tabulator-row-border-color) +} + +.tabulator-row .tabulator-cell.tabulator-editing { + border: 1px solid var(--tabulator-edit-box-color); + outline: none; + padding: 0 +} + +.tabulator-row .tabulator-cell.tabulator-editing input, +.tabulator-row .tabulator-cell.tabulator-editing select { + background: transparent; + border: 1px; + outline: none +} + +.tabulator-row .tabulator-cell.tabulator-validation-fail { + border: 1px solid var(--tabulator-error-color) +} + +.tabulator-row .tabulator-cell.tabulator-validation-fail input, +.tabulator-row .tabulator-cell.tabulator-validation-fail select { + background: transparent; + border: 1px; + color: var(--tabulator-error-color) +} + +.tabulator-row .tabulator-cell.tabulator-row-handle { + align-items: center; + display: inline-flex; + justify-content: center; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none +} + +.tabulator-row .tabulator-cell.tabulator-row-handle .tabulator-row-handle-box { + width: 80% +} + +.tabulator-row .tabulator-cell.tabulator-row-handle .tabulator-row-handle-box .tabulator-row-handle-bar { + background: #666; + height: 3px; + margin-top: 2px; + width: 100% +} + +.tabulator-row .tabulator-cell.tabulator-range-selected:not(.tabulator-range-only-cell-selected):not(.tabulator-range-row-header) { + background-color: var(--tabulator-row-selected-background) +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-branch-empty { + display: inline-block; + width: 7px +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-branch { + border-bottom: 2px solid var(--tabulator-row-border-color); + border-bottom-left-radius: 1px; + border-left: 2px solid var(--tabulator-row-border-color); + display: inline-block; + height: 9px; + margin-right: 5px; + margin-top: -9px; + vertical-align: middle; + width: 7px +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control { + align-items: center; + background: rgba(0, 0, 0, .1); + border: 1px solid var(--tabulator-row-text-color); + border-radius: 2px; + display: inline-flex; + height: 11px; + justify-content: center; + margin-right: 5px; + overflow: hidden; + vertical-align: middle; + width: 11px +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-row .tabulator-cell .tabulator-data-tree-control:hover { + background: rgba(0, 0, 0, .2); + cursor: pointer + } +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-collapse { + background: transparent; + display: inline-block; + height: 7px; + position: relative; + width: 1px +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-collapse:after { + background: var(--tabulator-row-text-color); + content: ""; + height: 1px; + left: -3px; + position: absolute; + top: 3px; + width: 7px +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-expand { + background: var(--tabulator-row-text-color); + display: inline-block; + height: 7px; + position: relative; + width: 1px +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-expand:after { + background: var(--tabulator-row-text-color); + content: ""; + height: 1px; + left: -3px; + position: absolute; + top: 3px; + width: 7px +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle { + align-items: center; + background: #666; + border-radius: 20px; + color: var(--tabulator-row-background-color); + display: inline-flex; + font-size: 1.1em; + font-weight: 700; + height: 15px; + justify-content: center; + -moz-user-select: none; + -khtml-user-select: none; + -webkit-user-select: none; + -o-user-select: none; + width: 15px +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle:hover { + cursor: pointer; + opacity: .7 + } +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle.open .tabulator-responsive-collapse-toggle-close { + display: initial +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle.open .tabulator-responsive-collapse-toggle-open { + display: none +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle svg { + stroke: var(--tabulator-row-background-color) +} + +.tabulator-row .tabulator-cell .tabulator-responsive-collapse-toggle .tabulator-responsive-collapse-toggle-close { + display: none +} + +.tabulator-row .tabulator-cell .tabulator-traffic-light { + border-radius: 14px; + display: inline-block; + height: 14px; + width: 14px +} + +.tabulator-row.tabulator-group { + background: #ccc; + border-bottom: 1px solid #999; + border-right: 1px solid var(--tabulator-row-border-color); + border-top: 1px solid #999; + box-sizing: border-box; + font-weight: 700; + min-width: 100%; + padding: 5px 5px 5px 10px +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-row.tabulator-group:hover { + background-color: rgba(0, 0, 0, .1); + cursor: pointer + } +} + +.tabulator-row.tabulator-group.tabulator-group-visible .tabulator-arrow { + border-bottom: 0; + border-left: 6px solid transparent; + border-right: 6px solid transparent; + border-top: 6px solid var(--tabulator-sort-arrow-active); + margin-right: 10px +} + +.tabulator-row.tabulator-group.tabulator-group-level-1 { + padding-left: 30px +} + +.tabulator-row.tabulator-group.tabulator-group-level-2 { + padding-left: 50px +} + +.tabulator-row.tabulator-group.tabulator-group-level-3 { + padding-left: 70px +} + +.tabulator-row.tabulator-group.tabulator-group-level-4 { + padding-left: 90px +} + +.tabulator-row.tabulator-group.tabulator-group-level-5 { + padding-left: 110px +} + +.tabulator-row.tabulator-group .tabulator-group-toggle { + display: inline-block +} + +.tabulator-row.tabulator-group .tabulator-arrow { + border-bottom: 6px solid transparent; + border-left: 6px solid var(--tabulator-sort-arrow-active); + border-right: 0; + border-top: 6px solid transparent; + display: inline-block; + height: 0; + margin-right: 16px; + vertical-align: middle; + width: 0 +} + +.tabulator-row.tabulator-group span { + color: #d00 +} + +.tabulator-toggle { + background: #dcdcdc; + border: 1px solid #ccc; + box-sizing: border-box; + display: flex; + flex-direction: row +} + +.tabulator-toggle.tabulator-toggle-on { + background: #1c6cc2 +} + +.tabulator-toggle .tabulator-toggle-switch { + background: #fff; + border: 1px solid #ccc; + box-sizing: border-box +} + +.tabulator-popup-container { + -webkit-overflow-scrolling: touch; + background: var(--tabulator-row-background-color); + border: 1px solid var(--tabulator-row-border-color); + box-shadow: 0 0 5px 0 rgba(0, 0, 0, .2); + box-sizing: border-box; + display: inline-block; + font-size: var(--tabulator-text-size); + overflow-y: auto; + position: absolute; + z-index: 10000 +} + +.tabulator-popup { + border-radius: 3px; + padding: 5px +} + +.tabulator-tooltip { + border-radius: 2px; + box-shadow: none; + font-size: 12px; + max-width: min(500px, 100%); + padding: 3px 5px; + pointer-events: none +} + +.tabulator-menu .tabulator-menu-item { + box-sizing: border-box; + padding: 5px 10px; + position: relative; + user-select: none +} + +.tabulator-menu .tabulator-menu-item.tabulator-menu-item-disabled { + opacity: .5 +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-menu .tabulator-menu-item:not(.tabulator-menu-item-disabled):hover { + background: var(--tabulator-row-alt-background-color); + cursor: pointer + } +} + +.tabulator-menu .tabulator-menu-item.tabulator-menu-item-submenu { + padding-right: 25px +} + +.tabulator-menu .tabulator-menu-item.tabulator-menu-item-submenu:after { + border-color: var(--tabulator-row-border-color); + border-style: solid; + border-width: 1px 1px 0 0; + content: ""; + display: inline-block; + height: 7px; + position: absolute; + right: 10px; + top: calc(5px + .4em); + transform: rotate(45deg); + vertical-align: top; + width: 7px +} + +.tabulator-menu .tabulator-menu-separator { + border-top: 1px solid var(--tabulator-row-border-color) +} + +.tabulator-edit-list { + -webkit-overflow-scrolling: touch; + font-size: var(--tabulator-text-size); + max-height: 200px; + overflow-y: auto +} + +.tabulator-edit-list .tabulator-edit-list-item { + color: var(--tabulator-row-text-color); + outline: none; + padding: 4px +} + +.tabulator-edit-list .tabulator-edit-list-item.active { + background: var(--tabulator-edit-box-color); + color: var(--tabulator-row-background-color) +} + +.tabulator-edit-list .tabulator-edit-list-item.active.focused { + outline: 1px solid rgba(var(--tabulator-row-background-color), .5) +} + +.tabulator-edit-list .tabulator-edit-list-item.focused { + outline: 1px solid var(--tabulator-edit-box-color) +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-edit-list .tabulator-edit-list-item:hover { + background: var(--tabulator-edit-box-color); + color: var(--tabulator-row-background-color); + cursor: pointer + } +} + +.tabulator-edit-list .tabulator-edit-list-placeholder { + color: var(--tabulator-row-text-color); + padding: 4px; + text-align: center +} + +.tabulator-edit-list .tabulator-edit-list-group { + border-bottom: 1px solid var(--tabulator-row-border-color); + color: var(--tabulator-row-text-color); + font-weight: 700; + padding: 6px 4px 4px +} + +.tabulator-edit-list .tabulator-edit-list-group.tabulator-edit-list-group-level-2, +.tabulator-edit-list .tabulator-edit-list-item.tabulator-edit-list-group-level-2 { + padding-left: 12px +} + +.tabulator-edit-list .tabulator-edit-list-group.tabulator-edit-list-group-level-3, +.tabulator-edit-list .tabulator-edit-list-item.tabulator-edit-list-group-level-3 { + padding-left: 20px +} + +.tabulator-edit-list .tabulator-edit-list-group.tabulator-edit-list-group-level-4, +.tabulator-edit-list .tabulator-edit-list-item.tabulator-edit-list-group-level-4 { + padding-left: 28px +} + +.tabulator-edit-list .tabulator-edit-list-group.tabulator-edit-list-group-level-5, +.tabulator-edit-list .tabulator-edit-list-item.tabulator-edit-list-group-level-5 { + padding-left: 36px +} + +.tabulator.tabulator-ltr { + direction: ltr +} + +.tabulator.tabulator-rtl { + direction: rtl; + text-align: initial +} + +.tabulator.tabulator-rtl .tabulator-header .tabulator-col { + border-left: 1px solid var(--tabulator-header-border-color); + border-right: initial; + text-align: initial +} + +.tabulator.tabulator-rtl .tabulator-header .tabulator-col.tabulator-col-group .tabulator-col-group-cols { + margin-left: -1px; + margin-right: 0 +} + +.tabulator.tabulator-rtl .tabulator-header .tabulator-col.tabulator-sortable .tabulator-col-title { + padding-left: 25px; + padding-right: 0 +} + +.tabulator.tabulator-rtl .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-sorter { + left: 8px; + right: auto +} + +.tabulator.tabulator-rtl .tabulator-tableholder .tabulator-range-overlay .tabulator-range.tabulator-range-active:after { + background-color: var(--tabulator-range-handle-color); + border-radius: 999px; + bottom: -3px; + content: ""; + height: 6px; + left: -3px; + position: absolute; + right: auto; + width: 6px +} + +.tabulator.tabulator-rtl .tabulator-row .tabulator-cell { + border-left: 1px solid var(--tabulator-row-border-color); + border-right: initial +} + +.tabulator.tabulator-rtl .tabulator-row .tabulator-cell .tabulator-data-tree-branch { + border-bottom-left-radius: 0; + border-bottom-right-radius: 1px; + border-left: initial; + border-right: 2px solid var(--tabulator-row-border-color); + margin-left: 5px; + margin-right: 0 +} + +.tabulator.tabulator-rtl .tabulator-row .tabulator-cell .tabulator-data-tree-control { + margin-left: 5px; + margin-right: 0 +} + +.tabulator.tabulator-rtl .tabulator-row .tabulator-cell.tabulator-frozen.tabulator-frozen-left { + border-left: 2px solid var(--tabulator-row-border-color) +} + +.tabulator.tabulator-rtl .tabulator-row .tabulator-cell.tabulator-frozen.tabulator-frozen-right { + border-right: 2px solid var(--tabulator-row-border-color) +} + +.tabulator.tabulator-rtl .tabulator-row .tabulator-col-resize-handle:last-of-type { + margin-left: 0; + margin-right: -3px; + width: 3px +} + +.tabulator.tabulator-rtl .tabulator-footer .tabulator-calcs-holder { + text-align: initial +} + +.tabulator-print-fullscreen { + bottom: 0; + left: 0; + position: absolute; + right: 0; + top: 0; + z-index: 10000 +} + +body.tabulator-print-fullscreen-hide>:not(.tabulator-print-fullscreen) { + display: none !important +} + +.tabulator-print-table { + border-collapse: collapse +} + +.tabulator-print-table .tabulator-data-tree-branch { + border-bottom: 2px solid var(--tabulator-row-border-color); + border-bottom-left-radius: 1px; + border-left: 2px solid var(--tabulator-row-border-color); + display: inline-block; + height: 9px; + margin-right: 5px; + margin-top: -9px; + vertical-align: middle; + width: 7px +} + +.tabulator-print-table .tabulator-print-table-group { + background: #ccc; + border-bottom: 1px solid #999; + border-right: 1px solid var(--tabulator-row-border-color); + border-top: 1px solid #999; + box-sizing: border-box; + font-weight: 700; + min-width: 100%; + padding: 5px 5px 5px 10px +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-print-table .tabulator-print-table-group:hover { + background-color: rgba(0, 0, 0, .1); + cursor: pointer + } +} + +.tabulator-print-table .tabulator-print-table-group.tabulator-group-visible .tabulator-arrow { + border-bottom: 0; + border-left: 6px solid transparent; + border-right: 6px solid transparent; + border-top: 6px solid var(--tabulator-sort-arrow-active); + margin-right: 10px +} + +.tabulator-print-table .tabulator-print-table-group.tabulator-group-level-1 td { + padding-left: 30px !important +} + +.tabulator-print-table .tabulator-print-table-group.tabulator-group-level-2 td { + padding-left: 50px !important +} + +.tabulator-print-table .tabulator-print-table-group.tabulator-group-level-3 td { + padding-left: 70px !important +} + +.tabulator-print-table .tabulator-print-table-group.tabulator-group-level-4 td { + padding-left: 90px !important +} + +.tabulator-print-table .tabulator-print-table-group.tabulator-group-level-5 td { + padding-left: 110px !important +} + +.tabulator-print-table .tabulator-print-table-group .tabulator-group-toggle { + display: inline-block +} + +.tabulator-print-table .tabulator-print-table-group .tabulator-arrow { + border-bottom: 6px solid transparent; + border-left: 6px solid var(--tabulator-sort-arrow-active); + border-right: 0; + border-top: 6px solid transparent; + display: inline-block; + height: 0; + margin-right: 16px; + vertical-align: middle; + width: 0 +} + +.tabulator-print-table .tabulator-print-table-group span { + color: #d00 +} + +.tabulator-print-table .tabulator-data-tree-control { + align-items: center; + background: rgba(0, 0, 0, .1); + border: 1px solid var(--tabulator-row-text-color); + border-radius: 2px; + display: inline-flex; + height: 11px; + justify-content: center; + margin-right: 5px; + overflow: hidden; + vertical-align: middle; + width: 11px +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-print-table .tabulator-data-tree-control:hover { + background: rgba(0, 0, 0, .2); + cursor: pointer + } +} + +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-collapse { + background: transparent; + display: inline-block; + height: 7px; + position: relative; + width: 1px +} + +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-collapse:after { + background: var(--tabulator-row-text-color); + content: ""; + height: 1px; + left: -3px; + position: absolute; + top: 3px; + width: 7px +} + +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-expand { + background: var(--tabulator-row-text-color); + display: inline-block; + height: 7px; + position: relative; + width: 1px +} + +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-expand:after { + background: var(--tabulator-row-text-color); + content: ""; + height: 1px; + left: -3px; + position: absolute; + top: 3px; + width: 7px +} + +.tabulator { + background-color: var(--tabulator-background-color); + max-width: 100%; + width: 100% +} + +.tabulator .tabulator-header { + color: inherit +} + +.tabulator .tabulator-header .tabulator-col { + border-top: none +} + +.tabulator .tabulator-header .tabulator-col:first-of-type { + border-left: none +} + +.tabulator .tabulator-header .tabulator-col:last-of-type { + border-right: none +} + +.tabulator .tabulator-header .tabulator-col:not(first-of-type), +.tabulator .tabulator-header .tabulator-col:not(last-of-type) { + border-right: 1px solid var(--tabulator-header-border-color) +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content { + padding: var(--tabulator-cell-padding) +} + +.tabulator .tabulator-header .tabulator-col .tabulator-col-content .tabulator-col-sorter { + right: -10px +} + +.tabulator .tabulator-header .tabulator-col.tabulator-col-group .tabulator-col-group-cols { + border-top: 1px solid var(--tabulator-border-color) +} + +.tabulator .tabulator-header .tabulator-col.tabulator-sortable .tabulator-col-title { + padding-right: 10px +} + +.tabulator .tabulator-header .tabulator-calcs-holder { + border-bottom: 1px solid var(--tabulator-header-separator-color); + width: 100% +} + +.tabulator .tabulator-header .tabulator-frozen-rows-holder { + min-width: 600% +} + +.tabulator .tabulator-header .tabulator-frozen-rows-holder:empty { + display: none +} + +.tabulator .tabulator-header .tabulator-frozen .tabulator-frozen-left, +.tabulator .tabulator-header .tabulator-frozen .tabulator-frozen-right { + background: inherit +} + +.tabulator .tabulator-tableholder .tabulator-table { + color: inherit +} + +.tabulator .tabulator-footer { + background-color: var(--tabulator-footer-background-color); + color: inherit +} + +.tabulator .tabulator-footer .tabulator-spreadsheet-tabs .tabulator-spreadsheet-tab { + font-weight: 400; + padding: 8px 12px +} + +.tabulator .tabulator-footer .tabulator-spreadsheet-tabs .tabulator-spreadsheet-tab.tabulator-spreadsheet-tab-active { + color: var(--tabulator-footer-active-color) +} + +.tabulator .tabulator-footer .tabulator-paginator { + color: inherit +} + +.tabulator .tabulator-footer .tabulator-page { + background: var(--tabulator-pagination-button-background); + border-radius: 0; + border-right: none; + color: var(--tabulator-pagination-button-color); + margin: 5px 0 0; + padding: 8px 12px +} + +.tabulator .tabulator-footer .tabulator-page:first-of-type, +.tabulator .tabulator-footer .tabulator-page[data-page=next] { + border-bottom-left-radius: 4px; + border-top-left-radius: 4px +} + +.tabulator .tabulator-footer .tabulator-page:last-of-type, +.tabulator .tabulator-footer .tabulator-page[data-page=prev] { + border: 1px solid var(--tabulator-footer-border-color); + border-bottom-right-radius: 4px; + border-top-right-radius: 4px +} + +.tabulator .tabulator-footer .tabulator-page:not(disabled):hover { + background: var(--tabulator-pagination-button-background-hover); + color: var(--tabulator-pagination-button-color-hover) +} + +.tabulator .tabulator-footer .tabulator-page.active, +.tabulator .tabulator-footer .tabulator-page[data-page=first] :not(disabled):not(:hover), +.tabulator .tabulator-footer .tabulator-page[data-page=last] :not(disabled):not(:hover), +.tabulator .tabulator-footer .tabulator-page[data-page=next] :not(disabled):not(:hover), +.tabulator .tabulator-footer .tabulator-page[data-page=prev] :not(disabled):not(:hover) { + color: var(--tabulator-pagination-button-color-active) +} + +.tabulator.striped .tabulator-row:nth-child(2n) { + background-color: var(--tabulator-row-alt-background-color) +} + +.tabulator.striped .tabulator-row:nth-child(2n).tabulator-selected { + background-color: var(--tabulator-row-selected-background) !important +} + +@media (hover:hover) and (pointer:fine) { + .tabulator.striped .tabulator-row:nth-child(2n).tabulator-selectable:hover { + background-color: var(--tabulator-row-hover-background); + cursor: pointer + } + + .tabulator.striped .tabulator-row:nth-child(2n).tabulator-selected:hover { + background-color: var(--tabulator-row-selected-background-hover) !important; + cursor: pointer + } +} + +.tabulator-row { + border-bottom: 1px solid var(--tabulator-row-border-color); + min-height: calc(var(--tabulator-text-size) + var(--tabulator-cell-padding)*2) +} + +.tabulator-row.tabulator-row-even { + background-color: var(--tabulator-row-background-color) +} + +.tabulator-row .tabulator-cell { + padding: var(--tabulator-cell-padding) +} + +.tabulator-row .tabulator-cell:last-of-type { + border-right: none +} + +.tabulator-row .tabulator-cell.tabulator-row-header { + background: var(--tabulator-header-background-color); + border-bottom: none; + border-right: 1px solid var(--tabulator-border-color) +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control { + border: 1px solid #ccc +} + +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-collapse:after, +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-expand, +.tabulator-row .tabulator-cell .tabulator-data-tree-control .tabulator-data-tree-control-expand:after { + background: #ccc +} + +.tabulator-row.tabulator-group { + background: #fafafa +} + +.tabulator-row.tabulator-group span { + color: #666; + margin-left: 10px +} + +.tabulator-edit-select-list { + background: var(--tabulator-header-background-color) +} + +.tabulator-edit-select-list .tabulator-edit-select-list-item { + color: inherit +} + +.tabulator-edit-select-list .tabulator-edit-select-list-item.active { + color: var(--tabulator-header-background-color) +} + +.tabulator-edit-select-list .tabulator-edit-select-list-item.active.focused { + outline: 1px solid rgba(var(--tabulator-header-background-color), .5) +} + +@media (hover:hover) and (pointer:fine) { + .tabulator-edit-select-list .tabulator-edit-select-list-item:hover { + color: var(--tabulator-header-background-color) + } +} + +.tabulator-edit-select-list .tabulator-edit-select-list-group, +.tabulator-edit-select-list .tabulator-edit-select-list-notice { + color: inherit +} + +.tabulator.tabulator-rtl .tabulator-header .tabulator-col { + border-left: none; + border-right: none +} + +.tabulator-print-table .tabulator-print-table-group { + background: #fafafa +} + +.tabulator-print-table .tabulator-print-table-group span { + color: #666; + margin-left: 10px +} + +.tabulator-print-table .tabulator-data-tree-control { + border: 1px solid #ccc +} + +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-collapse:after, +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-expand, +.tabulator-print-table .tabulator-data-tree-control .tabulator-data-tree-control-expand:after { + background: #ccc +} + +/*# sourceMappingURL=tabulator_pysyft.min.css.map */ \ No newline at end of file diff --git a/packages/syft/src/syft/img/logo.png b/packages/syft/src/syft/assets/img/logo.png similarity index 100% rename from packages/syft/src/syft/img/logo.png rename to packages/syft/src/syft/assets/img/logo.png diff --git a/packages/syft/src/syft/assets/img/small-syft-symbol-logo.png b/packages/syft/src/syft/assets/img/small-syft-symbol-logo.png new file mode 100644 index 00000000000..08086eac8e0 Binary files /dev/null and b/packages/syft/src/syft/assets/img/small-syft-symbol-logo.png differ diff --git a/packages/syft/src/syft/assets/jinja/syft_exception.jinja2 b/packages/syft/src/syft/assets/jinja/syft_exception.jinja2 new file mode 100644 index 00000000000..eab1977edb4 --- /dev/null +++ b/packages/syft/src/syft/assets/jinja/syft_exception.jinja2 @@ -0,0 +1,52 @@ +
    + + + {{name}}: +
    {{message | escape}}
    +
    +
    +
    + {% if server_trace %} +
    Server Trace:
    +
    +      {% if dev_mode %}
    +        {{ server_trace | make_links | safe }}
    +      {% else %}
    +        {{ server_trace | escape }}
    +      {% endif %}
    +    
    +
    +
    + {% endif %} +
    Client Trace:
    +
    +      {% if dev_mode %}
    +        {{ traceback_str | make_links | safe }}
    +      {% else %}
    +        {{ traceback_str | escape }}
    +      {% endif %}
    +    
    +
    +
    + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/jinja/table.jinja2 b/packages/syft/src/syft/assets/jinja/table.jinja2 new file mode 100644 index 00000000000..7a44d798540 --- /dev/null +++ b/packages/syft/src/syft/assets/jinja/table.jinja2 @@ -0,0 +1,54 @@ + + + + + + +
    + +
    +
    {{ icon | safe}}
    +

    {{ name }}

    +
    +
    +
    + +
    +
    +

    Total: 0

    +
    +
    +
    + + + + diff --git a/packages/syft/src/syft/assets/js/table.js b/packages/syft/src/syft/assets/js/table.js new file mode 100644 index 00000000000..35fee482bd9 --- /dev/null +++ b/packages/syft/src/syft/assets/js/table.js @@ -0,0 +1,226 @@ +TABULATOR_SRC = + "https://unpkg.com/tabulator-tables@6.2.1/dist/js/tabulator.min"; +TABULATOR_CSS = + "https://cdn.jsdelivr.net/gh/openmined/pysyft/packages/syft/src/syft/assets/css/tabulator_pysyft.min.css"; + +document.querySelectorAll(".escape-unfocus").forEach((input) => { + input.addEventListener("keydown", (event) => { + if (event.key === "Escape") { + event.stopPropagation(); + input.blur(); + } + }); +}); + +function load_script(scriptPath, elementId, on_load, on_error) { + var element = document.getElementById(elementId); + var script = document.createElement("script"); + script.type = "application/javascript"; + script.src = scriptPath; + script.onload = on_load; + script.onerror = on_error; + console.debug("Injecting script:", scriptPath); + element.appendChild(script); +} + +function load_css(cssPath, elementId, on_load, on_error) { + var element = document.getElementById(elementId); + var css = document.createElement("link"); + css.onload = on_load; + css.onerror = on_error; + css.rel = "stylesheet"; + css.type = "text/css"; + css.href = cssPath; + console.debug("Injecting css:", cssPath); + element.appendChild(css); +} + +function fix_url_for_require(url) { + return url.endsWith(".js") ? url.replace(/(\.js)(?!.*\1)/, "") : url; +} + +function load_tabulator(elementId) { + load_css(TABULATOR_CSS, elementId); + + return new Promise((resolve, reject) => { + if (typeof require !== "undefined") { + url = fix_url_for_require(TABULATOR_SRC); + return require([url], function (module) { + window.Tabulator = module; + resolve(); + }, reject); + } else if (typeof window.Tabulator === "undefined") { + load_script(TABULATOR_SRC, elementId, resolve, reject); + } else { + resolve(); + } + }); +} + +function buildTable( + columns, + rowHeader, + data, + uid, + pagination = true, + maxHeight = null, + headerSort = true, +) { + const tableId = `table-${uid}`; + const searchBarId = `search-${uid}`; + const numrowsId = `numrows-${uid}`; + + const tableElement = document.querySelector(`#${tableId}`); + if (!tableElement) { + console.error(`Element with id "${tableId}" not found.`); + return; + } + + const table = new Tabulator(`#${tableId}`, { + data: data, + columns: columns, + rowHeader: rowHeader, + index: "_table_repr_index", + layout: "fitDataStretch", + resizableColumnFit: true, + resizableColumnGuide: true, + pagination: pagination, + paginationSize: 5, + maxHeight: maxHeight, + headerSort: headerSort, + }); + + // Events needed for cell overflow: + // fixes incomplete border + cells too much height for overflowing cells + table.on("pageLoaded", function (_pageno) { + // paginate + table.redraw(); + }); + table.on("columnResized", function (_column) { + // drag resize + table.redraw(); + }); + table.on("tableBuilt", function () { + // first build + table.redraw(); + }); + + const numrowsElement = document.querySelector(`#${numrowsId}`); + if (numrowsElement) { + numrowsElement.innerHTML = data.length; + } + + configureHighlightSingleRow(table, uid); + configureSearch(table, searchBarId, columns); + + return table; +} + +function configureSearch(table, searchBarId, columns) { + // https://stackoverflow.com/questions/76208880/tabulator-global-search-across-multiple-columns + const searchBar = document.getElementById(searchBarId); + if (!searchBar) { + console.error(`Element with id "${searchBarId}" not found.`); + return; + } + + const columnFields = columns.map((column) => column.field); + const ignoreColumns = []; + const searchFields = columnFields.filter( + (field) => !ignoreColumns.includes(field), + ); + + searchBar.addEventListener("input", function () { + let searchValue = searchBar.value.trim(); + + let filterArray = searchFields.map((field) => { + return { field: field, type: "like", value: searchValue }; + }); + + table.setFilter([filterArray]); + }); +} + +function configureHighlightSingleRow(table, uid) { + // Listener for rowHighlight events, with fields: + // uid: string, table uid + // index: number | string, row index to highlight + // jumpToRow: bool, if true, jumps to page where the row is located + document.addEventListener("rowHighlight", function (e) { + if (e.detail.uid === uid) { + let row_idx = e.detail.index; + let rows = table.getRows(); + for (let row of rows) { + if (row.getIndex() == row_idx) { + row.select(); + if (e.detail.jumpToRow) { + // catch promise in case the table does not have pagination + table.setPageToRow(row_idx).catch((_) => {}); + table.scrollToRow(row_idx, "top", false); + } + } else { + row.deselect(); + } + } + } + }); +} + +function waitForTable(uid, timeout = 1000) { + return new Promise((resolve, reject) => { + // Check if the table is ready immediately + if (window["table_" + uid]) { + resolve(); + } else { + // Otherwise, poll until the table is ready or timeout + var startTime = Date.now(); + var checkTableInterval = setInterval(function () { + if (window["table_" + uid]) { + clearInterval(checkTableInterval); + resolve(); + } else if (Date.now() - startTime > timeout) { + clearInterval(checkTableInterval); + reject(`Timeout: table_"${uid}" not found.`); + } + }, 100); + } + }); +} + +function highlightSingleRow(uid, index = null, jumpToRow = false) { + // Highlight a single row in the table with the given uid + // If index is not provided or doesn't exist, all rows are deselected + waitForTable(uid) + .then(() => { + document.dispatchEvent( + new CustomEvent("rowHighlight", { + detail: { uid, index, jumpToRow }, + }), + ); + }) + .catch((error) => { + console.log(error); + }); +} + +function updateTableCell(uid, index, field, value) { + // Update the value of a cell in the table with the given uid + waitForTable(uid) + .then(() => { + const table = window["table_" + uid]; + if (!table) { + throw new Error(`Table with uid ${uid} not found.`); + } + + const row = table.getRow(index); + if (!row) { + throw new Error(`Row with index ${index} not found.`); + } + + // Update the cell value + row.update({ [field]: value }); + }) + .catch((error) => { + console.error(error); + }); +} diff --git a/packages/syft/src/syft/assets/js/tabulator.min.js b/packages/syft/src/syft/assets/js/tabulator.min.js new file mode 100644 index 00000000000..61d077c0c3b --- /dev/null +++ b/packages/syft/src/syft/assets/js/tabulator.min.js @@ -0,0 +1,3 @@ +/* Tabulator v6.2.1 (c) Oliver Folkerd 2024 */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).Tabulator=t()}(this,(function(){"use strict";var e={debugEventsExternal:!1,debugEventsInternal:!1,debugInvalidOptions:!0,debugInvalidComponentFuncs:!0,debugInitialization:!0,debugDeprecation:!0,height:!1,minHeight:!1,maxHeight:!1,columnHeaderVertAlign:"top",popupContainer:!1,columns:[],columnDefaults:{},rowHeader:!1,data:!1,autoColumns:!1,autoColumnsDefinitions:!1,nestedFieldSeparator:".",footerElement:!1,index:"id",textDirection:"auto",addRowPos:"bottom",headerVisible:!0,renderVertical:"virtual",renderHorizontal:"basic",renderVerticalBuffer:0,scrollToRowPosition:"top",scrollToRowIfVisible:!0,scrollToColumnPosition:"left",scrollToColumnIfVisible:!0,rowFormatter:!1,rowFormatterPrint:null,rowFormatterClipboard:null,rowFormatterHtmlOutput:null,rowHeight:null,placeholder:!1,dataLoader:!0,dataLoaderLoading:!1,dataLoaderError:!1,dataLoaderErrorTimeout:3e3,dataSendParams:{},dataReceiveParams:{}};class t{constructor(e){this.table=e}reloadData(e,t,i){return this.table.dataLoader.load(e,void 0,void 0,void 0,t,i)}langText(){return this.table.modules.localize.getText(...arguments)}langBind(){return this.table.modules.localize.bind(...arguments)}langLocale(){return this.table.modules.localize.getLocale(...arguments)}commsConnections(){return this.table.modules.comms.getConnections(...arguments)}commsSend(){return this.table.modules.comms.send(...arguments)}layoutMode(){return this.table.modules.layout.getMode()}layoutRefresh(e){return this.table.modules.layout.layout(e)}subscribe(){return this.table.eventBus.subscribe(...arguments)}unsubscribe(){return this.table.eventBus.unsubscribe(...arguments)}subscribed(e){return this.table.eventBus.subscribed(e)}subscriptionChange(){return this.table.eventBus.subscriptionChange(...arguments)}dispatch(){return this.table.eventBus.dispatch(...arguments)}chain(){return this.table.eventBus.chain(...arguments)}confirm(){return this.table.eventBus.confirm(...arguments)}dispatchExternal(){return this.table.externalEvents.dispatch(...arguments)}subscribedExternal(e){return this.table.externalEvents.subscribed(e)}subscriptionChangeExternal(){return this.table.externalEvents.subscriptionChange(...arguments)}options(e){return this.table.options[e]}setOption(e,t){return void 0!==t&&(this.table.options[e]=t),this.table.options[e]}deprecationCheck(e,t,i){return this.table.deprecationAdvisor.check(e,t,i)}deprecationCheckMsg(e,t){return this.table.deprecationAdvisor.checkMsg(e,t)}deprecationMsg(e){return this.table.deprecationAdvisor.msg(e)}module(e){return this.table.module(e)}}class i{constructor(e){return this._column=e,this.type="ColumnComponent",new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._column.table.componentFunctionBinder.handle("column",e._column,t)}})}getElement(){return this._column.getElement()}getDefinition(){return this._column.getDefinition()}getField(){return this._column.getField()}getTitleDownload(){return this._column.getTitleDownload()}getCells(){var e=[];return this._column.cells.forEach((function(t){e.push(t.getComponent())})),e}isVisible(){return this._column.visible}show(){this._column.isGroup?this._column.columns.forEach((function(e){e.show()})):this._column.show()}hide(){this._column.isGroup?this._column.columns.forEach((function(e){e.hide()})):this._column.hide()}toggle(){this._column.visible?this.hide():this.show()}delete(){return this._column.delete()}getSubColumns(){var e=[];return this._column.columns.length&&this._column.columns.forEach((function(t){e.push(t.getComponent())})),e}getParentColumn(){return this._column.getParentComponent()}_getSelf(){return this._column}scrollTo(e,t){return this._column.table.columnManager.scrollToColumn(this._column,e,t)}getTable(){return this._column.table}move(e,t){var i=this._column.table.columnManager.findColumn(e);i?this._column.table.columnManager.moveColumn(this._column,i,t):console.warn("Move Error - No matching column found:",i)}getNextColumn(){var e=this._column.nextColumn();return!!e&&e.getComponent()}getPrevColumn(){var e=this._column.prevColumn();return!!e&&e.getComponent()}updateDefinition(e){return this._column.updateDefinition(e)}getWidth(){return this._column.getWidth()}setWidth(e){var t;return t=!0===e?this._column.reinitializeWidth(!0):this._column.setWidth(e),this._column.table.columnManager.rerenderColumns(!0),t}}var s={title:void 0,field:void 0,columns:void 0,visible:void 0,hozAlign:void 0,vertAlign:void 0,width:void 0,minWidth:40,maxWidth:void 0,maxInitialWidth:void 0,cssClass:void 0,variableHeight:void 0,headerVertical:void 0,headerHozAlign:void 0,headerWordWrap:!1,editableTitle:void 0};class o{constructor(e){return this._cell=e,new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._cell.table.componentFunctionBinder.handle("cell",e._cell,t)}})}getValue(){return this._cell.getValue()}getOldValue(){return this._cell.getOldValue()}getInitialValue(){return this._cell.initialValue}getElement(){return this._cell.getElement()}getRow(){return this._cell.row.getComponent()}getData(e){return this._cell.row.getData(e)}getType(){return"cell"}getField(){return this._cell.column.getField()}getColumn(){return this._cell.column.getComponent()}setValue(e,t){void 0===t&&(t=!0),this._cell.setValue(e,t)}restoreOldValue(){this._cell.setValueActual(this._cell.getOldValue())}restoreInitialValue(){this._cell.setValueActual(this._cell.initialValue)}checkHeight(){this._cell.checkHeight()}getTable(){return this._cell.table}_getSelf(){return this._cell}}class n extends t{constructor(e,t){super(e.table),this.table=e.table,this.column=e,this.row=t,this.element=null,this.value=null,this.initialValue,this.oldValue=null,this.modules={},this.height=null,this.width=null,this.minWidth=null,this.component=null,this.loaded=!1,this.build()}build(){this.generateElement(),this.setWidth(),this._configureCell(),this.setValueActual(this.column.getFieldValue(this.row.data)),this.initialValue=this.value}generateElement(){this.element=document.createElement("div"),this.element.className="tabulator-cell",this.element.setAttribute("role","gridcell"),this.column.isRowHeader&&this.element.classList.add("tabulator-row-header")}_configureCell(){var e=this.element,t=this.column.getField();(e.style.textAlign=this.column.hozAlign,this.column.vertAlign&&(e.style.display="inline-flex",e.style.alignItems={top:"flex-start",bottom:"flex-end",middle:"center"}[this.column.vertAlign]||"",this.column.hozAlign&&(e.style.justifyContent={left:"flex-start",right:"flex-end",center:"center"}[this.column.hozAlign]||"")),t&&e.setAttribute("tabulator-field",t),this.column.definition.cssClass)&&this.column.definition.cssClass.split(" ").forEach((t=>{e.classList.add(t)}));this.dispatch("cell-init",this),this.column.visible||this.hide()}_generateContents(){var e;switch(typeof(e=this.chain("cell-format",this,null,(()=>this.element.innerHTML=this.value)))){case"object":if(e instanceof Node){for(;this.element.firstChild;)this.element.removeChild(this.element.firstChild);this.element.appendChild(e)}else this.element.innerHTML="",null!=e&&console.warn("Format Error - Formatter has returned a type of object, the only valid formatter object return is an instance of Node, the formatter returned:",e);break;case"undefined":this.element.innerHTML="";break;default:this.element.innerHTML=e}}cellRendered(){this.dispatch("cell-rendered",this)}getElement(e){return this.loaded||(this.loaded=!0,e||this.layoutElement()),this.element}getValue(){return this.value}getOldValue(){return this.oldValue}setValue(e,t,i){this.setValueProcessData(e,t,i)&&(this.dispatch("cell-value-updated",this),this.cellRendered(),this.column.definition.cellEdited&&this.column.definition.cellEdited.call(this.table,this.getComponent()),this.dispatchExternal("cellEdited",this.getComponent()),this.subscribedExternal("dataChanged")&&this.dispatchExternal("dataChanged",this.table.rowManager.getData()))}setValueProcessData(e,t,i){var s=!1;return(this.value!==e||i)&&(s=!0,t&&(e=this.chain("cell-value-changing",[this,e],null,e))),this.setValueActual(e),s&&this.dispatch("cell-value-changed",this),s}setValueActual(e){this.oldValue=this.value,this.value=e,this.dispatch("cell-value-save-before",this),this.column.setFieldValue(this.row.data,e),this.dispatch("cell-value-save-after",this),this.loaded&&this.layoutElement()}layoutElement(){this._generateContents(),this.dispatch("cell-layout",this)}setWidth(){this.width=this.column.width,this.element.style.width=this.column.widthStyled}clearWidth(){this.width="",this.element.style.width=""}getWidth(){return this.width||this.element.offsetWidth}setMinWidth(){this.minWidth=this.column.minWidth,this.element.style.minWidth=this.column.minWidthStyled}setMaxWidth(){this.maxWidth=this.column.maxWidth,this.element.style.maxWidth=this.column.maxWidthStyled}checkHeight(){this.row.reinitializeHeight()}clearHeight(){this.element.style.height="",this.height=null,this.dispatch("cell-height",this,"")}setHeight(){this.height=this.row.height,this.element.style.height=this.row.heightStyled,this.dispatch("cell-height",this,this.row.heightStyled)}getHeight(){return this.height||this.element.offsetHeight}show(){this.element.style.display=this.column.vertAlign?"inline-flex":""}hide(){this.element.style.display="none"}delete(){this.dispatch("cell-delete",this),!this.table.rowManager.redrawBlock&&this.element.parentNode&&this.element.parentNode.removeChild(this.element),this.element=!1,this.column.deleteCell(this),this.row.deleteCell(this),this.calcs={}}getIndex(){return this.row.getCellIndex(this)}getComponent(){return this.component||(this.component=new o(this)),this.component}}class r extends t{static defaultOptionList=s;constructor(e,t,i){super(t.table),this.definition=e,this.parent=t,this.type="column",this.columns=[],this.cells=[],this.isGroup=!1,this.isRowHeader=i,this.element=this.createElement(),this.contentElement=!1,this.titleHolderElement=!1,this.titleElement=!1,this.groupElement=this.createGroupElement(),this.hozAlign="",this.vertAlign="",this.field="",this.fieldStructure="",this.getFieldValue="",this.setFieldValue="",this.titleDownload=null,this.titleFormatterRendered=!1,this.mapDefinitions(),this.setField(this.definition.field),this.modules={},this.width=null,this.widthStyled="",this.maxWidth=null,this.maxWidthStyled="",this.maxInitialWidth=null,this.minWidth=null,this.minWidthStyled="",this.widthFixed=!1,this.visible=!0,this.component=null,this.definition.columns?(this.isGroup=!0,this.definition.columns.forEach(((e,t)=>{var i=new r(e,this);this.attachColumn(i)})),this.checkColumnVisibility()):t.registerColumnField(this),this._initialize()}createElement(){var e=document.createElement("div");switch(e.classList.add("tabulator-col"),e.setAttribute("role","columnheader"),e.setAttribute("aria-sort","none"),this.isRowHeader&&e.classList.add("tabulator-row-header"),this.table.options.columnHeaderVertAlign){case"middle":e.style.justifyContent="center";break;case"bottom":e.style.justifyContent="flex-end"}return e}createGroupElement(){var e=document.createElement("div");return e.classList.add("tabulator-col-group-cols"),e}mapDefinitions(){var e=this.table.options.columnDefaults;if(e)for(let t in e)void 0===this.definition[t]&&(this.definition[t]=e[t]);this.definition=this.table.columnManager.optionsList.generate(r.defaultOptionList,this.definition)}checkDefinition(){Object.keys(this.definition).forEach((e=>{-1===r.defaultOptionList.indexOf(e)&&console.warn("Invalid column definition option in '"+(this.field||this.definition.title)+"' column:",e)}))}setField(e){this.field=e,this.fieldStructure=e?this.table.options.nestedFieldSeparator?e.split(this.table.options.nestedFieldSeparator):[e]:[],this.getFieldValue=this.fieldStructure.length>1?this._getNestedData:this._getFlatData,this.setFieldValue=this.fieldStructure.length>1?this._setNestedData:this._setFlatData}registerColumnPosition(e){this.parent.registerColumnPosition(e)}registerColumnField(e){this.parent.registerColumnField(e)}reRegisterPosition(){this.isGroup?this.columns.forEach((function(e){e.reRegisterPosition()})):this.registerColumnPosition(this)}_initialize(){for(var e=this.definition;this.element.firstChild;)this.element.removeChild(this.element.firstChild);e.headerVertical&&(this.element.classList.add("tabulator-col-vertical"),"flip"===e.headerVertical&&this.element.classList.add("tabulator-col-vertical-flip")),this.contentElement=this._buildColumnHeaderContent(),this.element.appendChild(this.contentElement),this.isGroup?this._buildGroupHeader():this._buildColumnHeader(),this.dispatch("column-init",this)}_buildColumnHeader(){var e=this.definition;(this.dispatch("column-layout",this),void 0!==e.visible&&(e.visible?this.show(!0):this.hide(!0)),e.cssClass)&&e.cssClass.split(" ").forEach((e=>{this.element.classList.add(e)}));e.field&&this.element.setAttribute("tabulator-field",e.field),this.setMinWidth(parseInt(e.minWidth)),e.maxInitialWidth&&(this.maxInitialWidth=parseInt(e.maxInitialWidth)),e.maxWidth&&this.setMaxWidth(parseInt(e.maxWidth)),this.reinitializeWidth(),this.hozAlign=this.definition.hozAlign,this.vertAlign=this.definition.vertAlign,this.titleElement.style.textAlign=this.definition.headerHozAlign}_buildColumnHeaderContent(){var e=document.createElement("div");return e.classList.add("tabulator-col-content"),this.titleHolderElement=document.createElement("div"),this.titleHolderElement.classList.add("tabulator-col-title-holder"),e.appendChild(this.titleHolderElement),this.titleElement=this._buildColumnHeaderTitle(),this.titleHolderElement.appendChild(this.titleElement),e}_buildColumnHeaderTitle(){var e=this.definition,t=document.createElement("div");if(t.classList.add("tabulator-col-title"),e.headerWordWrap&&t.classList.add("tabulator-col-title-wrap"),e.editableTitle){var i=document.createElement("input");i.classList.add("tabulator-title-editor"),i.addEventListener("click",(e=>{e.stopPropagation(),i.focus()})),i.addEventListener("mousedown",(e=>{e.stopPropagation()})),i.addEventListener("change",(()=>{e.title=i.value,this.dispatchExternal("columnTitleChanged",this.getComponent())})),t.appendChild(i),e.field?this.langBind("columns|"+e.field,(t=>{i.value=t||e.title||" "})):i.value=e.title||" "}else e.field?this.langBind("columns|"+e.field,(i=>{this._formatColumnHeaderTitle(t,i||e.title||" ")})):this._formatColumnHeaderTitle(t,e.title||" ");return t}_formatColumnHeaderTitle(e,t){var i=this.chain("column-format",[this,t,e],null,(()=>t));switch(typeof i){case"object":i instanceof Node?e.appendChild(i):(e.innerHTML="",console.warn("Format Error - Title formatter has returned a type of object, the only valid formatter object return is an instance of Node, the formatter returned:",i));break;case"undefined":e.innerHTML="";break;default:e.innerHTML=i}}_buildGroupHeader(){(this.element.classList.add("tabulator-col-group"),this.element.setAttribute("role","columngroup"),this.element.setAttribute("aria-title",this.definition.title),this.definition.cssClass)&&this.definition.cssClass.split(" ").forEach((e=>{this.element.classList.add(e)}));this.titleElement.style.textAlign=this.definition.headerHozAlign,this.element.appendChild(this.groupElement)}_getFlatData(e){return e[this.field]}_getNestedData(e){var t,i=e,s=this.fieldStructure,o=s.length;for(let e=0;e{t.push(e),t=t.concat(e.getColumns(!0))})):t=this.columns,t}getCells(){return this.cells}getTopColumn(){return this.parent.isGroup?this.parent.getTopColumn():this}getDefinition(e){var t=[];return this.isGroup&&e&&(this.columns.forEach((function(e){t.push(e.getDefinition(!0))})),this.definition.columns=t),this.definition}checkColumnVisibility(){var e=!1;this.columns.forEach((function(t){t.visible&&(e=!0)})),e?(this.show(),this.dispatchExternal("columnVisibilityChanged",this.getComponent(),!1)):this.hide()}show(e,t){this.visible||(this.visible=!0,this.element.style.display="",this.parent.isGroup&&this.parent.checkColumnVisibility(),this.cells.forEach((function(e){e.show()})),this.isGroup||null!==this.width||this.reinitializeWidth(),this.table.columnManager.verticalAlignHeaders(),this.dispatch("column-show",this,t),e||this.dispatchExternal("columnVisibilityChanged",this.getComponent(),!0),this.parent.isGroup&&this.parent.matchChildWidths(),this.silent||this.table.columnManager.rerenderColumns())}hide(e,t){this.visible&&(this.visible=!1,this.element.style.display="none",this.table.columnManager.verticalAlignHeaders(),this.parent.isGroup&&this.parent.checkColumnVisibility(),this.cells.forEach((function(e){e.hide()})),this.dispatch("column-hide",this,t),e||this.dispatchExternal("columnVisibilityChanged",this.getComponent(),!1),this.parent.isGroup&&this.parent.matchChildWidths(),this.silent||this.table.columnManager.rerenderColumns())}matchChildWidths(){var e=0;this.contentElement&&this.columns.length&&(this.columns.forEach((function(t){t.visible&&(e+=t.getWidth())})),this.contentElement.style.maxWidth=e-1+"px",this.parent.isGroup&&this.parent.matchChildWidths())}removeChild(e){var t=this.columns.indexOf(e);t>-1&&this.columns.splice(t,1),this.columns.length||this.delete()}setWidth(e){this.widthFixed=!0,this.setWidthActual(e)}setWidthActual(e){isNaN(e)&&(e=Math.floor(this.table.element.clientWidth/100*parseInt(e))),e=Math.max(this.minWidth,e),this.maxWidth&&(e=Math.min(this.maxWidth,e)),this.width=e,this.widthStyled=e?e+"px":"",this.element.style.width=this.widthStyled,this.isGroup||this.cells.forEach((function(e){e.setWidth()})),this.parent.isGroup&&this.parent.matchChildWidths(),this.dispatch("column-width",this),this.subscribedExternal("columnWidth")&&this.dispatchExternal("columnWidth",this.getComponent())}checkCellHeights(){var e=[];this.cells.forEach((function(t){t.row.heightInitialized&&(null!==t.row.getElement().offsetParent?(e.push(t.row),t.row.clearCellHeight()):t.row.heightInitialized=!1)})),e.forEach((function(e){e.calcHeight()})),e.forEach((function(e){e.setCellHeight()}))}getWidth(){var e=0;return this.isGroup?this.columns.forEach((function(t){t.visible&&(e+=t.getWidth())})):e=this.width,e}getLeftOffset(){var e=this.element.offsetLeft;return this.parent.isGroup&&(e+=this.parent.getLeftOffset()),e}getHeight(){return Math.ceil(this.element.getBoundingClientRect().height)}setMinWidth(e){this.maxWidth&&e>this.maxWidth&&(e=this.maxWidth,console.warn("the minWidth ("+e+"px) for column '"+this.field+"' cannot be bigger that its maxWidth ("+this.maxWidthStyled+")")),this.minWidth=e,this.minWidthStyled=e?e+"px":"",this.element.style.minWidth=this.minWidthStyled,this.cells.forEach((function(e){e.setMinWidth()}))}setMaxWidth(e){this.minWidth&&e{this.isGroup&&this.columns.forEach((function(e){e.delete()})),this.dispatch("column-delete",this);var i=this.cells.length;for(let e=0;e-1&&this._nextVisibleColumn(e+1)}_nextVisibleColumn(e){var t=this.table.columnManager.getColumnByIndex(e);return!t||t.visible?t:this._nextVisibleColumn(e+1)}prevColumn(){var e=this.table.columnManager.findColumnIndex(this);return e>-1&&this._prevVisibleColumn(e-1)}_prevVisibleColumn(e){var t=this.table.columnManager.getColumnByIndex(e);return!t||t.visible?t:this._prevVisibleColumn(e-1)}reinitializeWidth(e){this.widthFixed=!1,void 0===this.definition.width||e||this.setWidth(this.definition.width),this.dispatch("column-width-fit-before",this),this.fitToData(e),this.dispatch("column-width-fit-after",this)}fitToData(e){if(!this.isGroup){this.widthFixed||(this.element.style.width="",this.cells.forEach((e=>{e.clearWidth()})));var t=this.element.offsetWidth;if((!this.width||!this.widthFixed)&&(this.cells.forEach((e=>{var i=e.getWidth();i>t&&(t=i)})),t)){var i=t+1;this.maxInitialWidth&&!e&&(i=Math.min(i,this.maxInitialWidth)),this.setWidthActual(i)}}}updateDefinition(e){var t;return this.isGroup||this.parent.isGroup?(console.error("Column Update Error - The updateDefinition function is only available on ungrouped columns"),Promise.reject("Column Update Error - The updateDefinition function is only available on columns, not column groups")):(t=Object.assign({},this.getDefinition()),t=Object.assign(t,e),this.table.columnManager.addColumn(t,!1,this).then((e=>(t.field==this.field&&(this.field=!1),this.delete().then((()=>e.getComponent()))))))}deleteCell(e){var t=this.cells.indexOf(e);t>-1&&this.cells.splice(t,1)}getComponent(){return this.component||(this.component=new i(this)),this.component}getPosition(){return this.table.columnManager.getVisibleColumnsByIndex().indexOf(this)+1}getParentComponent(){return this.parent instanceof r&&this.parent.getComponent()}}class a{static elVisible(e){return!(e.offsetWidth<=0&&e.offsetHeight<=0)}static elOffset(e){var t=e.getBoundingClientRect();return{top:t.top+window.pageYOffset-document.documentElement.clientTop,left:t.left+window.pageXOffset-document.documentElement.clientLeft}}static retrieveNestedData(e,t,i){var s,o=e?t.split(e):[t],n=o.length;for(let e=0;ee.subject===l)),r>-1?t[n]=i[r].copy:(a=Object.assign(Array.isArray(l)?[]:{},l),i.unshift({subject:l,copy:a}),t[n]=this.deepClone(l,a,i)))}return t}}class l{constructor(e,t,i={}){this.table=e,this.msgType=t,this.registeredDefaults=Object.assign({},i)}register(e,t){this.registeredDefaults[e]=t}generate(e,t={}){var i=Object.assign({},this.registeredDefaults),s=this.table.options.debugInvalidOptions||!0===t.debugInvalidOptions;Object.assign(i,e);for(let e in t)i.hasOwnProperty(e)||(s&&console.warn("Invalid "+this.msgType+" option:",e),i[e]=t.key);for(let e in i)e in t?i[e]=t[e]:Array.isArray(i[e])?i[e]=Object.assign([],i[e]):"object"==typeof i[e]&&null!==i[e]?i[e]=Object.assign({},i[e]):void 0===i[e]&&delete i[e];return i}}class h extends t{constructor(e){super(e),this.elementVertical=e.rowManager.element,this.elementHorizontal=e.columnManager.element,this.tableElement=e.rowManager.tableElement,this.verticalFillMode="fit"}initialize(){}clearRows(){}clearColumns(){}reinitializeColumnWidths(e){}renderRows(){}renderColumns(){}rerenderRows(e){e&&e()}rerenderColumns(e,t){}renderRowCells(e){}rerenderRowCells(e,t){}scrollColumns(e,t){}scrollRows(e,t){}resize(){}scrollToRow(e){}scrollToRowNearestTop(e){}visibleRows(e){return[]}rows(){return this.table.rowManager.getDisplayRows()}styleRow(e,t){var i=e.getElement();t%2?(i.classList.add("tabulator-row-even"),i.classList.remove("tabulator-row-odd")):(i.classList.add("tabulator-row-odd"),i.classList.remove("tabulator-row-even"))}clear(){this.clearRows(),this.clearColumns()}render(){this.renderRows(),this.renderColumns()}rerender(e){this.rerenderRows(),this.rerenderColumns()}scrollToRowPosition(e,t,i){var s=this.rows().indexOf(e),o=e.getElement(),n=0;return new Promise(((r,l)=>{if(s>-1){if(void 0===i&&(i=this.table.options.scrollToRowIfVisible),!i&&a.elVisible(o)&&(n=a.elOffset(o).top-a.elOffset(this.elementVertical).top)>0&&n{i.appendChild(e.getElement())})),e.element.appendChild(i),t||e.cells.forEach((e=>{e.cellRendered()}))}reinitializeColumnWidths(e){e.forEach((function(e){e.reinitializeWidth()}))}}class c extends h{constructor(e){super(e),this.leftCol=0,this.rightCol=0,this.scrollLeft=0,this.vDomScrollPosLeft=0,this.vDomScrollPosRight=0,this.vDomPadLeft=0,this.vDomPadRight=0,this.fitDataColAvg=0,this.windowBuffer=200,this.visibleRows=null,this.initialized=!1,this.isFitData=!1,this.columns=[]}initialize(){this.compatibilityCheck(),this.layoutCheck(),this.vertScrollListen()}compatibilityCheck(){"fitDataTable"==this.options("layout")&&console.warn("Horizontal Virtual DOM is not compatible with fitDataTable layout mode"),this.options("responsiveLayout")&&console.warn("Horizontal Virtual DOM is not compatible with responsive columns"),this.options("rtl")&&console.warn("Horizontal Virtual DOM is not currently compatible with RTL text direction")}layoutCheck(){this.isFitData=this.options("layout").startsWith("fitData")}vertScrollListen(){this.subscribe("scroll-vertical",this.clearVisRowCache.bind(this)),this.subscribe("data-refreshed",this.clearVisRowCache.bind(this))}clearVisRowCache(){this.visibleRows=null}renderColumns(e,t){this.dataChange()}scrollColumns(e,t){this.scrollLeft!=e&&(this.scrollLeft=e,this.scroll(e-(this.vDomScrollPosLeft+this.windowBuffer)))}calcWindowBuffer(){var e=this.elementVertical.clientWidth;this.table.columnManager.columnsByIndex.forEach((t=>{if(t.visible){var i=t.getWidth();i>e&&(e=i)}})),this.windowBuffer=2*e}rerenderColumns(e,t){var i={cols:this.columns,leftCol:this.leftCol,rightCol:this.rightCol},s=0;e&&!this.initialized||(this.clear(),this.calcWindowBuffer(),this.scrollLeft=this.elementVertical.scrollLeft,this.vDomScrollPosLeft=this.scrollLeft-this.windowBuffer,this.vDomScrollPosRight=this.scrollLeft+this.elementVertical.clientWidth+this.windowBuffer,this.table.columnManager.columnsByIndex.forEach((e=>{var t,i={};e.visible&&(e.modules.frozen||(t=e.getWidth(),i.leftPos=s,i.rightPos=s+t,i.width=t,this.isFitData&&(i.fitDataCheck=!e.modules.vdomHoz||e.modules.vdomHoz.fitDataCheck),s+t>this.vDomScrollPosLeft&&s{t.appendChild(e.getElement())})),e.element.appendChild(t),e.cells.forEach((e=>{e.cellRendered()}))}}rerenderRowCells(e,t){this.reinitializeRow(e,t)}reinitializeColumnWidths(e){for(let e=this.leftCol;e<=this.rightCol;e++)this.columns[e].reinitializeWidth()}deinitialize(){this.initialized=!1}clear(){this.columns=[],this.leftCol=-1,this.rightCol=0,this.vDomScrollPosLeft=0,this.vDomScrollPosRight=0,this.vDomPadLeft=0,this.vDomPadRight=0}dataChange(){var e,t,i=!1;if(this.isFitData){if(this.table.columnManager.columnsByIndex.forEach((e=>{!e.definition.width&&e.visible&&(i=!0)})),i&&this.table.rowManager.getDisplayRows().length&&(this.vDomScrollPosRight=this.scrollLeft+this.elementVertical.clientWidth+this.windowBuffer,e=this.chain("rows-sample",[1],[],(()=>this.table.rowManager.getDisplayRows()))[0])){t=e.getElement(),e.generateCells(),this.tableElement.appendChild(t);for(let i=0;i{e!==this.columns[i]&&(t=!1)})),!t)}reinitializeRows(){var e=this.getVisibleRows(),t=this.table.rowManager.getRows().filter((t=>!e.includes(t)));e.forEach((e=>{this.reinitializeRow(e,!0)})),t.forEach((e=>{e.deinitialize()}))}getVisibleRows(){return this.visibleRows||(this.visibleRows=this.table.rowManager.getVisibleRows()),this.visibleRows}scroll(e){this.vDomScrollPosLeft+=e,this.vDomScrollPosRight+=e,Math.abs(e)>this.windowBuffer/2?this.rerenderColumns():e>0?(this.addColRight(),this.removeColLeft()):(this.addColLeft(),this.removeColRight())}colPositionAdjust(e,t,i){for(let s=e;s{if("group"!==e.type){var t=e.getCell(i);e.getElement().insertBefore(t.getElement(),e.getCell(this.columns[this.rightCol]).getElement().nextSibling),t.cellRendered()}})),this.fitDataColActualWidthCheck(i),this.rightCol++,this.getVisibleRows().forEach((e=>{"group"!==e.type&&(e.modules.vdomHoz.rightCol=this.rightCol)})),this.rightCol>=this.columns.length-1?this.vDomPadRight=0:this.vDomPadRight-=i.getWidth()):t=!1}e&&(this.tableElement.style.paddingRight=this.vDomPadRight+"px")}addColLeft(){for(var e=!1,t=!0;t;){let i=this.columns[this.leftCol-1];if(i)if(i.modules.vdomHoz.rightPos>=this.vDomScrollPosLeft){e=!0,this.getVisibleRows().forEach((e=>{if("group"!==e.type){var t=e.getCell(i);e.getElement().insertBefore(t.getElement(),e.getCell(this.columns[this.leftCol]).getElement()),t.cellRendered()}})),this.leftCol--,this.getVisibleRows().forEach((e=>{"group"!==e.type&&(e.modules.vdomHoz.leftCol=this.leftCol)})),this.leftCol<=0?this.vDomPadLeft=0:this.vDomPadLeft-=i.getWidth();let t=this.fitDataColActualWidthCheck(i);t&&(this.scrollLeft=this.elementVertical.scrollLeft=this.elementVertical.scrollLeft+t,this.vDomPadRight-=t)}else t=!1;else t=!1}e&&(this.tableElement.style.paddingLeft=this.vDomPadLeft+"px")}removeColRight(){for(var e=!1,t=!0;t;){let i=this.columns[this.rightCol];i&&i.modules.vdomHoz.leftPos>this.vDomScrollPosRight?(e=!0,this.getVisibleRows().forEach((e=>{if("group"!==e.type){var t=e.getCell(i);try{e.getElement().removeChild(t.getElement())}catch(e){console.warn("Could not removeColRight",e.message)}}})),this.vDomPadRight+=i.getWidth(),this.rightCol--,this.getVisibleRows().forEach((e=>{"group"!==e.type&&(e.modules.vdomHoz.rightCol=this.rightCol)}))):t=!1}e&&(this.tableElement.style.paddingRight=this.vDomPadRight+"px")}removeColLeft(){for(var e=!1,t=!0;t;){let i=this.columns[this.leftCol];i&&i.modules.vdomHoz.rightPos{if("group"!==e.type){var t=e.getCell(i);try{e.getElement().removeChild(t.getElement())}catch(e){console.warn("Could not removeColLeft",e.message)}}})),this.vDomPadLeft+=i.getWidth(),this.leftCol++,this.getVisibleRows().forEach((e=>{"group"!==e.type&&(e.modules.vdomHoz.leftCol=this.leftCol)}))):t=!1}e&&(this.tableElement.style.paddingLeft=this.vDomPadLeft+"px")}fitDataColActualWidthCheck(e){var t,i;return e.modules.vdomHoz.fitDataCheck&&(e.reinitializeWidth(),(i=(t=e.getWidth())-e.modules.vdomHoz.width)&&(e.modules.vdomHoz.rightPos+=i,e.modules.vdomHoz.width=t,this.colPositionAdjust(this.columns.indexOf(e)+1,this.columns.length,i)),e.modules.vdomHoz.fitDataCheck=!1),i}initializeRow(e){if("group"!==e.type){e.modules.vdomHoz={leftCol:this.leftCol,rightCol:this.rightCol},this.table.modules.frozenColumns&&this.table.modules.frozenColumns.leftColumns.forEach((t=>{this.appendCell(e,t)}));for(let t=this.leftCol;t<=this.rightCol;t++)this.appendCell(e,this.columns[t]);this.table.modules.frozenColumns&&this.table.modules.frozenColumns.rightColumns.forEach((t=>{this.appendCell(e,t)}))}}appendCell(e,t){if(t&&t.visible){let i=e.getCell(t);e.getElement().appendChild(i.getElement()),i.cellRendered()}}reinitializeRow(e,t){if("group"!==e.type&&(t||!e.modules.vdomHoz||e.modules.vdomHoz.leftCol!==this.leftCol||e.modules.vdomHoz.rightCol!==this.rightCol)){for(var i=e.getElement();i.firstChild;)i.removeChild(i.firstChild);this.initializeRow(e)}}}class u extends t{constructor(e){super(e),this.blockHozScrollEvent=!1,this.headersElement=null,this.contentsElement=null,this.rowHeader=null,this.element=null,this.columns=[],this.columnsByIndex=[],this.columnsByField={},this.scrollLeft=0,this.optionsList=new l(this.table,"column definition",s),this.redrawBlock=!1,this.redrawBlockUpdate=null,this.renderer=null}initialize(){this.initializeRenderer(),this.headersElement=this.createHeadersElement(),this.contentsElement=this.createHeaderContentsElement(),this.element=this.createHeaderElement(),this.contentsElement.insertBefore(this.headersElement,this.contentsElement.firstChild),this.element.insertBefore(this.contentsElement,this.element.firstChild),this.initializeScrollWheelWatcher(),this.subscribe("scroll-horizontal",this.scrollHorizontal.bind(this)),this.subscribe("scrollbar-vertical",this.padVerticalScrollbar.bind(this))}padVerticalScrollbar(e){this.table.rtl?this.headersElement.style.marginLeft=e+"px":this.headersElement.style.marginRight=e+"px"}initializeRenderer(){var e,t={virtual:c,basic:d};(e="string"==typeof this.table.options.renderHorizontal?t[this.table.options.renderHorizontal]:this.table.options.renderHorizontal)?(this.renderer=new e(this.table,this.element,this.tableElement),this.renderer.initialize()):console.error("Unable to find matching renderer:",this.table.options.renderHorizontal)}createHeadersElement(){var e=document.createElement("div");return e.classList.add("tabulator-headers"),e.setAttribute("role","row"),e}createHeaderContentsElement(){var e=document.createElement("div");return e.classList.add("tabulator-header-contents"),e.setAttribute("role","rowgroup"),e}createHeaderElement(){var e=document.createElement("div");return e.classList.add("tabulator-header"),e.setAttribute("role","rowgroup"),this.table.options.headerVisible||e.classList.add("tabulator-header-hidden"),e}getElement(){return this.element}getContentsElement(){return this.contentsElement}getHeadersElement(){return this.headersElement}scrollHorizontal(e){this.contentsElement.scrollLeft=e,this.scrollLeft=e,this.renderer.scrollColumns(e)}initializeScrollWheelWatcher(){this.contentsElement.addEventListener("wheel",(e=>{var t;e.deltaX&&(t=this.contentsElement.scrollLeft+e.deltaX,this.table.rowManager.scrollHorizontal(t),this.table.columnManager.scrollHorizontal(t))}))}generateColumnsFromRowData(e){var t=[],i={},s="full"===this.table.options.autoColumns?e:[e[0]],o=this.table.options.autoColumnsDefinitions;if(e&&e.length){if(s.forEach((e=>{Object.keys(e).forEach(((s,o)=>{let n,r=e[s];i[s]?!0!==i[s]&&void 0!==r&&(i[s].sorter=this.calculateSorterFromValue(r),i[s]=!0):(n={field:s,title:s,sorter:this.calculateSorterFromValue(r)},t.splice(o,0,n),i[s]=void 0!==r||n)}))})),o)switch(typeof o){case"function":this.table.options.columns=o.call(this.table,t);break;case"object":Array.isArray(o)?t.forEach((e=>{var t=o.find((t=>t.field===e.field));t&&Object.assign(e,t)})):t.forEach((e=>{o[e.field]&&Object.assign(e,o[e.field])})),this.table.options.columns=t}else this.table.options.columns=t;this.setColumns(this.table.options.columns)}}calculateSorterFromValue(e){var t;switch(typeof e){case"undefined":t="string";break;case"boolean":t="boolean";break;case"number":t="number";break;case"object":t=Array.isArray(e)?"array":"string";break;default:t=isNaN(e)||""===e?e.match(/((^[0-9]+[a-z]+)|(^[a-z]+[0-9]+))+$/i)?"alphanum":"string":"number"}return t}setColumns(e,t){for(;this.headersElement.firstChild;)this.headersElement.removeChild(this.headersElement.firstChild);this.columns=[],this.columnsByIndex=[],this.columnsByField={},this.dispatch("columns-loading"),this.dispatchExternal("columnsLoading"),this.table.options.rowHeader&&(this.rowHeader=new r(!0===this.table.options.rowHeader?{}:this.table.options.rowHeader,this,!0),this.columns.push(this.rowHeader),this.headersElement.appendChild(this.rowHeader.getElement()),this.rowHeader.columnRendered()),e.forEach(((e,t)=>{this._addColumn(e)})),this._reIndexColumns(),this.dispatch("columns-loaded"),this.subscribedExternal("columnsLoaded")&&this.dispatchExternal("columnsLoaded",this.getComponents()),this.rerenderColumns(!1,!0),this.redraw(!0)}_addColumn(e,t,i){var s=new r(e,this),o=s.getElement(),n=i?this.findColumnIndex(i):i;if(!t||!this.rowHeader||i&&i!==this.rowHeader||(t=!1,i=this.rowHeader,n=0),i&&n>-1){var a=i.getTopColumn(),l=this.columns.indexOf(a),h=a.getElement();t?(this.columns.splice(l,0,s),h.parentNode.insertBefore(o,h)):(this.columns.splice(l+1,0,s),h.parentNode.insertBefore(o,h.nextSibling))}else t?(this.columns.unshift(s),this.headersElement.insertBefore(s.getElement(),this.headersElement.firstChild)):(this.columns.push(s),this.headersElement.appendChild(s.getElement()));return s.columnRendered(),s}registerColumnField(e){e.definition.field&&(this.columnsByField[e.definition.field]=e)}registerColumnPosition(e){this.columnsByIndex.push(e)}_reIndexColumns(){this.columnsByIndex=[],this.columns.forEach((function(e){e.reRegisterPosition()}))}verticalAlignHeaders(){var e=0;this.redrawBlock||(this.headersElement.style.height="",this.columns.forEach((e=>{e.clearVerticalAlign()})),this.columns.forEach((t=>{var i=t.getHeight();i>e&&(e=i)})),this.headersElement.style.height=e+"px",this.columns.forEach((t=>{t.verticalAlign(this.table.options.columnHeaderVertAlign,e)})),this.table.rowManager.adjustTableSize())}findColumn(e){var t;if("object"!=typeof e)return this.columnsByField[e]||!1;if(e instanceof r)return e;if(e instanceof i)return e._getSelf()||!1;if("undefined"!=typeof HTMLElement&&e instanceof HTMLElement){return t=[],this.columns.forEach((e=>{t.push(e),t=t.concat(e.getColumns(!0))})),t.find((t=>t.element===e))||!1}return!1}getColumnByField(e){return this.columnsByField[e]}getColumnsByFieldRoot(e){var t=[];return Object.keys(this.columnsByField).forEach((i=>{(this.table.options.nestedFieldSeparator?i.split(this.table.options.nestedFieldSeparator)[0]:i)===e&&t.push(this.columnsByField[i])})),t}getColumnByIndex(e){return this.columnsByIndex[e]}getFirstVisibleColumn(){var e=this.columnsByIndex.findIndex((e=>e.visible));return e>-1&&this.columnsByIndex[e]}getVisibleColumnsByIndex(){return this.columnsByIndex.filter((e=>e.visible))}getColumns(){return this.columns}findColumnIndex(e){return this.columnsByIndex.findIndex((t=>e===t))}getRealColumns(){return this.columnsByIndex}traverse(e){this.columnsByIndex.forEach(((t,i)=>{e(t,i)}))}getDefinitions(e){var t=[];return this.columnsByIndex.forEach((i=>{(!e||e&&i.visible)&&t.push(i.getDefinition())})),t}getDefinitionTree(){var e=[];return this.columns.forEach((t=>{e.push(t.getDefinition(!0))})),e}getComponents(e){var t=[];return(e?this.columns:this.columnsByIndex).forEach((e=>{t.push(e.getComponent())})),t}getWidth(){var e=0;return this.columnsByIndex.forEach((t=>{t.visible&&(e+=t.getWidth())})),e}moveColumn(e,t,i){t.element.parentNode.insertBefore(e.element,t.element),i&&t.element.parentNode.insertBefore(t.element,e.element),this.moveColumnActual(e,t,i),this.verticalAlignHeaders(),this.table.rowManager.reinitialize()}moveColumnActual(e,t,i){e.parent.isGroup?this._moveColumnInArray(e.parent.columns,e,t,i):this._moveColumnInArray(this.columns,e,t,i),this._moveColumnInArray(this.columnsByIndex,e,t,i,!0),this.rerenderColumns(!0),this.dispatch("column-moved",e,t,i),this.subscribedExternal("columnMoved")&&this.dispatchExternal("columnMoved",e.getComponent(),this.table.columnManager.getComponents())}_moveColumnInArray(e,t,i,s,o){var n,r=e.indexOf(t);r>-1&&(e.splice(r,1),(n=e.indexOf(i))>-1?s&&(n+=1):n=r,e.splice(n,0,t),o&&(this.chain("column-moving-rows",[t,i,s],null,[])||[]).concat(this.table.rowManager.rows).forEach((function(e){if(e.cells.length){var t=e.cells.splice(r,1)[0];e.cells.splice(n,0,t)}})))}scrollToColumn(e,t,i){var s=0,o=e.getLeftOffset(),n=0,r=e.getElement();return new Promise(((a,l)=>{if(void 0===t&&(t=this.table.options.scrollToColumnPosition),void 0===i&&(i=this.table.options.scrollToColumnIfVisible),e.visible){switch(t){case"middle":case"center":n=-this.element.clientWidth/2;break;case"right":n=r.clientWidth-this.headersElement.clientWidth}if(!i&&o>0&&o+r.offsetWidth{t.push(i.generateCell(e))})),t}getFlexBaseWidth(){var e=this.table.element.clientWidth,t=0;return this.table.rowManager.element.scrollHeight>this.table.rowManager.element.clientHeight&&(e-=this.table.rowManager.element.offsetWidth-this.table.rowManager.element.clientWidth),this.columnsByIndex.forEach((function(i){var s,o,n;i.visible&&(s=i.definition.width||0,o=parseInt(i.minWidth),n="string"==typeof s?s.indexOf("%")>-1?e/100*parseInt(s):parseInt(s):s,t+=n>o?n:o)})),t}addColumn(e,t,i){return new Promise(((s,o)=>{var n=this._addColumn(e,t,i);this._reIndexColumns(),this.dispatch("column-add",e,t,i),"fitColumns"!=this.layoutMode()&&n.reinitializeWidth(),this.redraw(!0),this.table.rowManager.reinitialize(),this.rerenderColumns(),s(n)}))}deregisterColumn(e){var t,i=e.getField();i&&delete this.columnsByField[i],(t=this.columnsByIndex.indexOf(e))>-1&&this.columnsByIndex.splice(t,1),(t=this.columns.indexOf(e))>-1&&this.columns.splice(t,1),this.verticalAlignHeaders(),this.redraw()}rerenderColumns(e,t){this.redrawBlock?(!1===e||!0===e&&null===this.redrawBlockUpdate)&&(this.redrawBlockUpdate=e):this.renderer.rerenderColumns(e,t)}blockRedraw(){this.redrawBlock=!0,this.redrawBlockUpdate=null}restoreRedraw(){this.redrawBlock=!1,this.verticalAlignHeaders(),this.renderer.rerenderColumns(this.redrawBlockUpdate)}redraw(e){a.elVisible(this.element)&&this.verticalAlignHeaders(),e&&(this.table.rowManager.resetScroll(),this.table.rowManager.reinitialize()),this.confirm("table-redrawing",e)||this.layoutRefresh(e),this.dispatch("table-redraw",e),this.table.footerManager.redraw()}}class m{constructor(e){return this._row=e,new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._row.table.componentFunctionBinder.handle("row",e._row,t)}})}getData(e){return this._row.getData(e)}getElement(){return this._row.getElement()}getCells(){var e=[];return this._row.getCells().forEach((function(t){e.push(t.getComponent())})),e}getCell(e){var t=this._row.getCell(e);return!!t&&t.getComponent()}getIndex(){return this._row.getData("data")[this._row.table.options.index]}getPosition(){return this._row.getPosition()}watchPosition(e){return this._row.watchPosition(e)}delete(){return this._row.delete()}scrollTo(e,t){return this._row.table.rowManager.scrollToRow(this._row,e,t)}move(e,t){this._row.moveToRow(e,t)}update(e){return this._row.updateData(e)}normalizeHeight(){this._row.normalizeHeight(!0)}_getSelf(){return this._row}reformat(){return this._row.reinitialize()}getTable(){return this._row.table}getNextRow(){var e=this._row.nextRow();return e?e.getComponent():e}getPrevRow(){var e=this._row.prevRow();return e?e.getComponent():e}}class p extends t{constructor(e,t,i="row"){super(t.table),this.parent=t,this.data={},this.type=i,this.element=!1,this.modules={},this.cells=[],this.height=0,this.heightStyled="",this.manualHeight=!1,this.outerHeight=0,this.initialized=!1,this.heightInitialized=!1,this.position=0,this.positionWatchers=[],this.component=null,this.created=!1,this.setData(e)}create(){this.created||(this.created=!0,this.generateElement())}createElement(){var e=document.createElement("div");e.classList.add("tabulator-row"),e.setAttribute("role","row"),this.element=e}getElement(){return this.create(),this.element}detachElement(){this.element&&this.element.parentNode&&this.element.parentNode.removeChild(this.element)}generateElement(){this.createElement(),this.dispatch("row-init",this)}generateCells(){this.cells=this.table.columnManager.generateCells(this)}initialize(e,t){if(this.create(),!this.initialized||e){for(this.deleteCells();this.element.firstChild;)this.element.removeChild(this.element.firstChild);this.dispatch("row-layout-before",this),this.generateCells(),this.initialized=!0,this.table.columnManager.renderer.renderRowCells(this,t),e&&this.normalizeHeight(),this.dispatch("row-layout",this),this.table.options.rowFormatter&&this.table.options.rowFormatter(this.getComponent()),this.dispatch("row-layout-after",this)}else this.table.columnManager.renderer.rerenderRowCells(this,t)}rendered(){this.cells.forEach((e=>{e.cellRendered()}))}reinitializeHeight(){this.heightInitialized=!1,this.element&&null!==this.element.offsetParent&&this.normalizeHeight(!0)}deinitialize(){this.initialized=!1}deinitializeHeight(){this.heightInitialized=!1}reinitialize(e){this.initialized=!1,this.heightInitialized=!1,this.manualHeight||(this.height=0,this.heightStyled=""),this.element&&null!==this.element.offsetParent&&this.initialize(!0),this.dispatch("row-relayout",this)}calcHeight(e){var t=0,i=0;this.table.options.rowHeight?this.height=this.table.options.rowHeight:(i=this.calcMinHeight(),t=this.calcMaxHeight(),this.height=e?Math.max(t,i):this.manualHeight?this.height:Math.max(t,i)),this.heightStyled=this.height?this.height+"px":"",this.outerHeight=this.element.offsetHeight}calcMinHeight(){return this.table.options.resizableRows?this.element.clientHeight:0}calcMaxHeight(){var e=0;return this.cells.forEach((function(t){var i=t.getHeight();i>e&&(e=i)})),e}setCellHeight(){this.cells.forEach((function(e){e.setHeight()})),this.heightInitialized=!0}clearCellHeight(){this.cells.forEach((function(e){e.clearHeight()}))}normalizeHeight(e){e&&!this.table.options.rowHeight&&this.clearCellHeight(),this.calcHeight(e),this.setCellHeight()}setHeight(e,t){(this.height!=e||t)&&(this.manualHeight=!0,this.height=e,this.heightStyled=e?e+"px":"",this.setCellHeight(),this.outerHeight=this.element.offsetHeight,this.subscribedExternal("rowHeight")&&this.dispatchExternal("rowHeight",this.getComponent()))}getHeight(){return this.outerHeight}getWidth(){return this.element.offsetWidth}deleteCell(e){var t=this.cells.indexOf(e);t>-1&&this.cells.splice(t,1)}setData(e){this.data=this.chain("row-data-init-before",[this,e],void 0,e),this.dispatch("row-data-init-after",this)}updateData(e){var t,i=this.element&&a.elVisible(this.element),s={};return new Promise(((o,n)=>{"string"==typeof e&&(e=JSON.parse(e)),this.dispatch("row-data-save-before",this),this.subscribed("row-data-changing")&&(s=Object.assign(s,this.data),s=Object.assign(s,e)),t=this.chain("row-data-changing",[this,s,e],null,e);for(let e in t)this.data[e]=t[e];this.dispatch("row-data-save-after",this);for(let s in e){this.table.columnManager.getColumnsByFieldRoot(s).forEach((e=>{let s=this.getCell(e.getField());if(s){let o=e.getFieldValue(t);s.getValue()!==o&&(s.setValueProcessData(o),i&&s.cellRendered())}}))}i?(this.normalizeHeight(!0),this.table.options.rowFormatter&&this.table.options.rowFormatter(this.getComponent())):(this.initialized=!1,this.height=0,this.heightStyled=""),this.dispatch("row-data-changed",this,i,e),this.dispatchExternal("rowUpdated",this.getComponent()),this.subscribedExternal("dataChanged")&&this.dispatchExternal("dataChanged",this.table.rowManager.getData()),o()}))}getData(e){return e?this.chain("row-data-retrieve",[this,e],null,this.data):this.data}getCell(e){return e=this.table.columnManager.findColumn(e),this.initialized||0!==this.cells.length||this.generateCells(),this.cells.find((function(t){return t.column===e}))}getCellIndex(e){return this.cells.findIndex((function(t){return t===e}))}findCell(e){return this.cells.find((t=>t.element===e))}getCells(){return this.initialized||0!==this.cells.length||this.generateCells(),this.cells}nextRow(){return this.table.rowManager.nextDisplayRow(this,!0)||!1}prevRow(){return this.table.rowManager.prevDisplayRow(this,!0)||!1}moveToRow(e,t){var i=this.table.rowManager.findRow(e);i?(this.table.rowManager.moveRowActual(this,i,!t),this.table.rowManager.refreshActiveData("display",!1,!0)):console.warn("Move Error - No matching row found:",e)}delete(){return this.dispatch("row-delete",this),this.deleteActual(),Promise.resolve()}deleteActual(e){this.detachModules(),this.table.rowManager.deleteRow(this,e),this.deleteCells(),this.initialized=!1,this.heightInitialized=!1,this.element=!1,this.dispatch("row-deleted",this)}detachModules(){this.dispatch("row-deleting",this)}deleteCells(){var e=this.cells.length;for(let t=0;t{e(this.position)})))}watchPosition(e){this.positionWatchers.push(e),e(this.position)}getGroup(){return this.modules.group||!1}getComponent(){return this.component||(this.component=new m(this)),this.component}}class g extends h{constructor(e){super(e),this.verticalFillMode="fill",this.scrollTop=0,this.scrollLeft=0,this.scrollTop=0,this.scrollLeft=0}clearRows(){for(var e=this.tableElement;e.firstChild;)e.removeChild(e.firstChild);e.scrollTop=0,e.scrollLeft=0,e.style.minWidth="",e.style.minHeight="",e.style.display="",e.style.visibility=""}renderRows(){var e=this.tableElement,t=!0,i=document.createDocumentFragment(),s=this.rows();s.forEach(((e,s)=>{this.styleRow(e,s),e.initialize(!1,!0),"group"!==e.type&&(t=!1),i.appendChild(e.getElement())})),e.appendChild(i),s.forEach((e=>{e.rendered(),e.heightInitialized||e.calcHeight(!0)})),s.forEach((e=>{e.heightInitialized||e.setCellHeight()})),e.style.minWidth=t?this.table.columnManager.getWidth()+"px":""}rerenderRows(e){this.clearRows(),e&&e(),this.renderRows(),this.rows().length||this.table.rowManager.tableEmpty()}scrollToRowNearestTop(e){var t=a.elOffset(e.getElement()).top;return!(Math.abs(this.elementVertical.scrollTop-t)>Math.abs(this.elementVertical.scrollTop+this.elementVertical.clientHeight-t))}scrollToRow(e){var t=e.getElement();this.elementVertical.scrollTop=a.elOffset(t).top-a.elOffset(this.elementVertical).top+this.elementVertical.scrollTop}visibleRows(e){return this.rows()}}class b extends h{constructor(e){super(e),this.verticalFillMode="fill",this.scrollTop=0,this.scrollLeft=0,this.vDomRowHeight=20,this.vDomTop=0,this.vDomBottom=0,this.vDomScrollPosTop=0,this.vDomScrollPosBottom=0,this.vDomTopPad=0,this.vDomBottomPad=0,this.vDomMaxRenderChain=90,this.vDomWindowBuffer=0,this.vDomWindowMinTotalRows=20,this.vDomWindowMinMarginRows=5,this.vDomTopNewRows=[],this.vDomBottomNewRows=[]}clearRows(){for(var e=this.tableElement;e.firstChild;)e.removeChild(e.firstChild);e.style.paddingTop="",e.style.paddingBottom="",e.style.minHeight="",e.style.display="",e.style.visibility="",this.elementVertical.scrollTop=0,this.elementVertical.scrollLeft=0,this.scrollTop=0,this.scrollLeft=0,this.vDomTop=0,this.vDomBottom=0,this.vDomTopPad=0,this.vDomBottomPad=0,this.vDomScrollPosTop=0,this.vDomScrollPosBottom=0}renderRows(){this._virtualRenderFill()}rerenderRows(e){for(var t=this.elementVertical.scrollTop,i=!1,s=!1,o=this.table.rowManager.scrollLeft,n=this.rows(),r=this.vDomTop;r<=this.vDomBottom;r++)if(n[r]){var a=t-n[r].getElement().offsetTop;if(!(!1===s||Math.abs(a){e.deinitializeHeight()})),e&&e(),this.rows().length?this._virtualRenderFill(!1===i?this.rows.length-1:i,!0,s||0):(this.clear(),this.table.rowManager.tableEmpty()),this.scrollColumns(o)}scrollColumns(e){this.table.rowManager.scrollHorizontal(e)}scrollRows(e,t){var i=e-this.vDomScrollPosTop,s=e-this.vDomScrollPosBottom,o=2*this.vDomWindowBuffer,n=this.rows();if(this.scrollTop=e,-i>o||s>o){var r=this.table.rowManager.scrollLeft;this._virtualRenderFill(Math.floor(this.elementVertical.scrollTop/this.elementVertical.scrollHeight*n.length)),this.scrollColumns(r)}else t?(i<0&&this._addTopRow(n,-i),s<0&&(this.vDomScrollHeight-this.scrollTop>this.vDomWindowBuffer?this._removeBottomRow(n,-s):this.vDomScrollPosBottom=this.scrollTop)):(s>=0&&this._addBottomRow(n,s),i>=0&&(this.scrollTop>this.vDomWindowBuffer?this._removeTopRow(n,i):this.vDomScrollPosTop=this.scrollTop))}resize(){this.vDomWindowBuffer=this.table.options.renderVerticalBuffer||this.elementVertical.clientHeight}scrollToRowNearestTop(e){var t=this.rows().indexOf(e);return!(Math.abs(this.vDomTop-t)>Math.abs(this.vDomBottom-t))}scrollToRow(e){var t=this.rows().indexOf(e);t>-1&&this._virtualRenderFill(t,!0)}visibleRows(e){var t=this.elementVertical.scrollTop,i=this.elementVertical.clientHeight+t,s=!1,o=0,n=0,r=this.rows();if(e)o=this.vDomTop,n=this.vDomBottom;else for(var a=this.vDomTop;a<=this.vDomBottom;a++)if(r[a])if(s){if(!(i-r[a].getElement().offsetTop>=0))break;n=a}else if(t-r[a].getElement().offsetTop>=0)o=a;else{if(s=!0,!(i-r[a].getElement().offsetTop>=0))break;n=a}return r.slice(o,n+1)}_virtualRenderFill(e,t,i){var s,o,n=this.tableElement,r=this.elementVertical,l=0,h=0,d=0,c=0,u=0,m=0,p=this.rows(),g=p.length,b=0,f=[],v=0,w=0,C=this.table.rowManager.fixedHeight,E=this.elementVertical.clientHeight,y=this.table.options.rowHeight,R=!0;if(i=i||0,e=e||0){for(;n.firstChild;)n.removeChild(n.firstChild);(c=(g-e+1)*this.vDomRowHeight){e.rendered(),e.heightInitialized||e.calcHeight(!0)})),f.forEach((e=>{e.heightInitialized||e.setCellHeight()})),f.forEach((e=>{d=e.getHeight(),vthis.vDomWindowBuffer&&(this.vDomWindowBuffer=2*d),v++})),R=this.table.rowManager.adjustTableSize(),E=this.elementVertical.clientHeight,R&&(C||this.table.options.maxHeight)&&(y=h/v,w=Math.max(this.vDomWindowMinTotalRows,Math.ceil(E/y+this.vDomWindowBuffer/y)))}e?(this.vDomTopPad=t?this.vDomRowHeight*this.vDomTop+i:this.scrollTop-u,this.vDomBottomPad=this.vDomBottom==g-1?0:Math.max(this.vDomScrollHeight-this.vDomTopPad-h-u,0)):(this.vDomTopPad=0,this.vDomRowHeight=Math.floor((h+u)/v),this.vDomBottomPad=this.vDomRowHeight*(g-this.vDomBottom-1),this.vDomScrollHeight=u+h+this.vDomBottomPad-E),n.style.paddingTop=this.vDomTopPad+"px",n.style.paddingBottom=this.vDomBottomPad+"px",t&&(this.scrollTop=this.vDomTopPad+u+i-(this.elementVertical.scrollWidth>this.elementVertical.clientWidth?this.elementVertical.offsetHeight-E:0)),this.scrollTop=Math.min(this.scrollTop,this.elementVertical.scrollHeight-E),this.elementVertical.scrollWidth>this.elementVertical.clientWidth&&t&&(this.scrollTop+=this.elementVertical.offsetHeight-E),this.vDomScrollPosTop=this.scrollTop,this.vDomScrollPosBottom=this.scrollTop,r.scrollTop=this.scrollTop,this.dispatch("render-virtual-fill")}}_addTopRow(e,t){for(var i=this.tableElement,s=[],o=0,n=this.vDomTop-1,r=0,a=!0;a;)if(this.vDomTop){let l,h,d=e[n];d&&r=l?(this.styleRow(d,n),i.insertBefore(d.getElement(),i.firstChild),d.initialized&&d.heightInitialized||s.push(d),d.initialize(),h||(l=d.getElement().offsetHeight,l>this.vDomWindowBuffer&&(this.vDomWindowBuffer=2*l)),t-=l,o+=l,this.vDomTop--,n--,r++):a=!1):a=!1}else a=!1;for(let e of s)e.clearCellHeight();this._quickNormalizeRowHeight(s),o&&(this.vDomTopPad-=o,this.vDomTopPad<0&&(this.vDomTopPad=n*this.vDomRowHeight),n<1&&(this.vDomTopPad=0),i.style.paddingTop=this.vDomTopPad+"px",this.vDomScrollPosTop-=o)}_removeTopRow(e,t){for(var i=[],s=0,o=0,n=!0;n;){let r,a=e[this.vDomTop];a&&o=r?(this.vDomTop++,t-=r,s+=r,i.push(a),o++):n=!1):n=!1}for(let e of i){let t=e.getElement();t.parentNode&&t.parentNode.removeChild(t)}s&&(this.vDomTopPad+=s,this.tableElement.style.paddingTop=this.vDomTopPad+"px",this.vDomScrollPosTop+=this.vDomTop?s:s+this.vDomWindowBuffer)}_addBottomRow(e,t){for(var i=this.tableElement,s=[],o=0,n=this.vDomBottom+1,r=0,a=!0;a;){let l,h,d=e[n];d&&r=l?(this.styleRow(d,n),i.appendChild(d.getElement()),d.initialized&&d.heightInitialized||s.push(d),d.initialize(),h||(l=d.getElement().offsetHeight,l>this.vDomWindowBuffer&&(this.vDomWindowBuffer=2*l)),t-=l,o+=l,this.vDomBottom++,n++,r++):a=!1):a=!1}for(let e of s)e.clearCellHeight();this._quickNormalizeRowHeight(s),o&&(this.vDomBottomPad-=o,(this.vDomBottomPad<0||n==e.length-1)&&(this.vDomBottomPad=0),i.style.paddingBottom=this.vDomBottomPad+"px",this.vDomScrollPosBottom+=o)}_removeBottomRow(e,t){for(var i=[],s=0,o=0,n=!0;n;){let r,a=e[this.vDomBottom];a&&o=r?(this.vDomBottom--,t-=r,s+=r,i.push(a),o++):n=!1):n=!1}for(let e of i){let t=e.getElement();t.parentNode&&t.parentNode.removeChild(t)}s&&(this.vDomBottomPad+=s,this.vDomBottomPad<0&&(this.vDomBottomPad=0),this.tableElement.style.paddingBottom=this.vDomBottomPad+"px",this.vDomScrollPosBottom-=s)}_quickNormalizeRowHeight(e){for(let t of e)t.calcHeight();for(let t of e)t.setCellHeight()}}class f extends t{constructor(e){super(e),this.element=this.createHolderElement(),this.tableElement=this.createTableElement(),this.heightFixer=this.createTableElement(),this.placeholder=null,this.placeholderContents=null,this.firstRender=!1,this.renderMode="virtual",this.fixedHeight=!1,this.rows=[],this.activeRowsPipeline=[],this.activeRows=[],this.activeRowsCount=0,this.displayRows=[],this.displayRowsCount=0,this.scrollTop=0,this.scrollLeft=0,this.redrawBlock=!1,this.redrawBlockRestoreConfig=!1,this.redrawBlockRenderInPosition=!1,this.dataPipeline=[],this.displayPipeline=[],this.scrollbarWidth=0,this.renderer=null}createHolderElement(){var e=document.createElement("div");return e.classList.add("tabulator-tableholder"),e.setAttribute("tabindex",0),e}createTableElement(){var e=document.createElement("div");return e.classList.add("tabulator-table"),e.setAttribute("role","rowgroup"),e}initializePlaceholder(){var e=this.table.options.placeholder;if("function"==typeof e&&(e=e.call(this.table)),e=this.chain("placeholder",[e],e,e)||e){let t=document.createElement("div");if(t.classList.add("tabulator-placeholder"),"string"==typeof e){let i=document.createElement("div");i.classList.add("tabulator-placeholder-contents"),i.innerHTML=e,t.appendChild(i),this.placeholderContents=i}else"undefined"!=typeof HTMLElement&&e instanceof HTMLElement?(t.appendChild(e),this.placeholderContents=e):(console.warn("Invalid placeholder provided, must be string or HTML Element",e),this.el=null);this.placeholder=t}}getElement(){return this.element}getTableElement(){return this.tableElement}initialize(){this.initializePlaceholder(),this.initializeRenderer(),this.element.appendChild(this.tableElement),this.firstRender=!0,this.element.addEventListener("scroll",(()=>{var e=this.element.scrollLeft,t=this.scrollLeft>e,i=this.element.scrollTop,s=this.scrollTop>i;this.scrollLeft!=e&&(this.scrollLeft=e,this.dispatch("scroll-horizontal",e,t),this.dispatchExternal("scrollHorizontal",e,t),this._positionPlaceholder()),this.scrollTop!=i&&(this.scrollTop=i,this.renderer.scrollRows(i,s),this.dispatch("scroll-vertical",i,s),this.dispatchExternal("scrollVertical",i,s))}))}findRow(e){if("object"!=typeof e){if(void 0===e)return!1;return this.rows.find((t=>t.data[this.table.options.index]==e))||!1}if(e instanceof p)return e;if(e instanceof m)return e._getSelf()||!1;if("undefined"!=typeof HTMLElement&&e instanceof HTMLElement){return this.rows.find((t=>t.getElement()===e))||!1}return!1}getRowFromDataObject(e){return this.rows.find((t=>t.data===e))||!1}getRowFromPosition(e){return this.getDisplayRows().find((t=>"row"===t.type&&t.getPosition()===e&&t.isDisplayed()))}scrollToRow(e,t,i){return this.renderer.scrollToRowPosition(e,t,i)}setData(e,t,i){return new Promise(((s,o)=>{t&&this.getDisplayRows().length?this.table.options.pagination?this._setDataActual(e,!0):this.reRenderInPosition((()=>{this._setDataActual(e)})):(this.table.options.autoColumns&&i&&this.table.initialized&&this.table.columnManager.generateColumnsFromRowData(e),this.resetScroll(),this._setDataActual(e)),s()}))}_setDataActual(e,t){this.dispatchExternal("dataProcessing",e),this._wipeElements(),Array.isArray(e)?(this.dispatch("data-processing",e),e.forEach(((e,t)=>{if(e&&"object"==typeof e){var i=new p(e,this);this.rows.push(i)}else console.warn("Data Loading Warning - Invalid row data detected and ignored, expecting object but received:",e)})),this.refreshActiveData(!1,!1,t),this.dispatch("data-processed",e),this.dispatchExternal("dataProcessed",e)):console.error("Data Loading Error - Unable to process data due to invalid data type \nExpecting: array \nReceived: ",typeof e,"\nData: ",e)}_wipeElements(){this.dispatch("rows-wipe"),this.destroy(),this.adjustTableSize(),this.dispatch("rows-wiped")}destroy(){this.rows.forEach((e=>{e.wipe()})),this.rows=[],this.activeRows=[],this.activeRowsPipeline=[],this.activeRowsCount=0,this.displayRows=[],this.displayRowsCount=0}deleteRow(e,t){var i=this.rows.indexOf(e),s=this.activeRows.indexOf(e);s>-1&&this.activeRows.splice(s,1),i>-1&&this.rows.splice(i,1),this.setActiveRows(this.activeRows),this.displayRowIterator((t=>{var i=t.indexOf(e);i>-1&&t.splice(i,1)})),t||this.reRenderInPosition(),this.regenerateRowPositions(),this.dispatchExternal("rowDeleted",e.getComponent()),this.displayRowsCount||this.tableEmpty(),this.subscribedExternal("dataChanged")&&this.dispatchExternal("dataChanged",this.getData())}addRow(e,t,i,s){return this.addRowActual(e,t,i,s)}addRows(e,t,i,s){var o=[];return new Promise(((n,r)=>{t=this.findAddRowPos(t),Array.isArray(e)||(e=[e]),(void 0===i&&t||void 0!==i&&!t)&&e.reverse(),e.forEach(((e,s)=>{var n=this.addRow(e,t,i,!0);o.push(n),this.dispatch("row-added",n,e,t,i)})),this.refreshActiveData(!!s&&"displayPipeline",!1,!0),this.regenerateRowPositions(),this.displayRowsCount&&this._clearPlaceholder(),n(o)}))}findAddRowPos(e){return void 0===e&&(e=this.table.options.addRowPos),"pos"===e&&(e=!0),"bottom"===e&&(e=!1),e}addRowActual(e,t,i,s){var o,n,r=e instanceof p?e:new p(e||{},this),a=this.findAddRowPos(t),l=-1;return i||(n=this.chain("row-adding-position",[r,a],null,{index:i,top:a}),i=n.index,a=n.top),void 0!==i&&(i=this.findRow(i)),(i=this.chain("row-adding-index",[r,i,a],null,i))&&(l=this.rows.indexOf(i)),i&&l>-1?(o=this.activeRows.indexOf(i),this.displayRowIterator((function(e){var t=e.indexOf(i);t>-1&&e.splice(a?t:t+1,0,r)})),o>-1&&this.activeRows.splice(a?o:o+1,0,r),this.rows.splice(a?l:l+1,0,r)):a?(this.displayRowIterator((function(e){e.unshift(r)})),this.activeRows.unshift(r),this.rows.unshift(r)):(this.displayRowIterator((function(e){e.push(r)})),this.activeRows.push(r),this.rows.push(r)),this.setActiveRows(this.activeRows),this.dispatchExternal("rowAdded",r.getComponent()),this.subscribedExternal("dataChanged")&&this.dispatchExternal("dataChanged",this.table.rowManager.getData()),s||this.reRenderInPosition(),r}moveRow(e,t,i){this.dispatch("row-move",e,t,i),this.moveRowActual(e,t,i),this.regenerateRowPositions(),this.dispatch("row-moved",e,t,i),this.dispatchExternal("rowMoved",e.getComponent())}moveRowActual(e,t,i){this.moveRowInArray(this.rows,e,t,i),this.moveRowInArray(this.activeRows,e,t,i),this.displayRowIterator((s=>{this.moveRowInArray(s,e,t,i)})),this.dispatch("row-moving",e,t,i)}moveRowInArray(e,t,i,s){var o,n,r;if(t!==i&&((o=e.indexOf(t))>-1&&(e.splice(o,1),(n=e.indexOf(i))>-1?s?e.splice(n+1,0,t):e.splice(n,0,t):e.splice(o,0,t)),e===this.getDisplayRows())){r=n>o?n:o+1;for(let t=o-1&&t}nextDisplayRow(e,t){var i=this.getDisplayRowIndex(e),s=!1;return!1!==i&&i-1)&&i}getData(e,t){var i=[];return this.getRows(e).forEach((function(e){"row"==e.type&&i.push(e.getData(t||"data"))})),i}getComponents(e){var t=[];return this.getRows(e).forEach((function(e){t.push(e.getComponent())})),t}getDataCount(e){return this.getRows(e).length}scrollHorizontal(e){this.scrollLeft=e,this.element.scrollLeft=e,this.dispatch("scroll-horizontal",e)}registerDataPipelineHandler(e,t){void 0!==t?(this.dataPipeline.push({handler:e,priority:t}),this.dataPipeline.sort(((e,t)=>e.priority-t.priority))):console.error("Data pipeline handlers must have a priority in order to be registered")}registerDisplayPipelineHandler(e,t){void 0!==t?(this.displayPipeline.push({handler:e,priority:t}),this.displayPipeline.sort(((e,t)=>e.priority-t.priority))):console.error("Display pipeline handlers must have a priority in order to be registered")}refreshActiveData(e,t,i){var s=this.table,o="",n=0,r=["all","dataPipeline","display","displayPipeline","end"];if(!this.table.destroyed){if("function"==typeof e)if((n=this.dataPipeline.findIndex((t=>t.handler===e)))>-1)o="dataPipeline",t&&(n==this.dataPipeline.length-1?o="display":n++);else{if(!((n=this.displayPipeline.findIndex((t=>t.handler===e)))>-1))return void console.error("Unable to refresh data, invalid handler provided",e);o="displayPipeline",t&&(n==this.displayPipeline.length-1?o="end":n++)}else o=e||"all",n=0;if(this.redrawBlock)return void((!this.redrawBlockRestoreConfig||this.redrawBlockRestoreConfig&&(this.redrawBlockRestoreConfig.stage===o&&n{"row"===e.type&&(e.setPosition(t),t++)}))}setActiveRows(e){this.activeRows=this.activeRows=Object.assign([],e),this.activeRowsCount=this.activeRows.length}resetDisplayRows(){this.displayRows=[],this.displayRows.push(this.activeRows.slice(0)),this.displayRowsCount=this.displayRows[0].length}setDisplayRows(e,t){this.displayRows[t]=e,t==this.displayRows.length-1&&(this.displayRowsCount=this.displayRows[this.displayRows.length-1].length)}getDisplayRows(e){return void 0===e?this.displayRows.length?this.displayRows[this.displayRows.length-1]:[]:this.displayRows[e]||[]}getVisibleRows(e,t){var i=Object.assign([],this.renderer.visibleRows(!t));return e&&(i=this.chain("rows-visible",[t],i,i)),i}displayRowIterator(e){this.activeRowsPipeline.forEach(e),this.displayRows.forEach(e),this.displayRowsCount=this.displayRows[this.displayRows.length-1].length}getRows(e){var t=[];switch(e){case"active":t=this.activeRows;break;case"display":t=this.table.rowManager.getDisplayRows();break;case"visible":t=this.getVisibleRows(!1,!0);break;default:t=this.chain("rows-retrieve",e,null,this.rows)||this.rows}return t}reRenderInPosition(e){this.redrawBlock?e?e():this.redrawBlockRenderInPosition=!0:(this.dispatchExternal("renderStarted"),this.renderer.rerenderRows(e),this.fixedHeight||this.adjustTableSize(),this.scrollBarCheck(),this.dispatchExternal("renderComplete"))}scrollBarCheck(){var e=0;this.element.scrollHeight>this.element.clientHeight&&(e=this.element.offsetWidth-this.element.clientWidth),e!==this.scrollbarWidth&&(this.scrollbarWidth=e,this.dispatch("scrollbar-vertical",e))}initializeRenderer(){var e,t={virtual:b,basic:g};(e="string"==typeof this.table.options.renderVertical?t[this.table.options.renderVertical]:this.table.options.renderVertical)?(this.renderMode=this.table.options.renderVertical,this.renderer=new e(this.table,this.element,this.tableElement),this.renderer.initialize(),!this.table.element.clientHeight&&!this.table.options.height||this.table.options.minHeight&&this.table.options.maxHeight?this.fixedHeight=!1:this.fixedHeight=!0):console.error("Unable to find matching renderer:",this.table.options.renderVertical)}getRenderMode(){return this.renderMode}renderTable(){this.dispatchExternal("renderStarted"),this.element.scrollTop=0,this._clearTable(),this.displayRowsCount?(this.renderer.renderRows(),this.firstRender&&(this.firstRender=!1,this.fixedHeight||this.adjustTableSize(),this.layoutRefresh(!0))):this.renderEmptyScroll(),this.fixedHeight||this.adjustTableSize(),this.dispatch("table-layout"),this.displayRowsCount||this._showPlaceholder(),this.scrollBarCheck(),this.dispatchExternal("renderComplete")}renderEmptyScroll(){this.placeholder?this.tableElement.style.display="none":this.tableElement.style.minWidth=this.table.columnManager.getWidth()+"px"}_clearTable(){this._clearPlaceholder(),this.scrollTop=0,this.scrollLeft=0,this.renderer.clearRows()}tableEmpty(){this.renderEmptyScroll(),this._showPlaceholder()}checkPlaceholder(){this.displayRowsCount?this._clearPlaceholder():this.tableEmpty()}_showPlaceholder(){this.placeholder&&(this.placeholder&&this.placeholder.parentNode&&this.placeholder.parentNode.removeChild(this.placeholder),this.initializePlaceholder(),this.placeholder.setAttribute("tabulator-render-mode",this.renderMode),this.getElement().appendChild(this.placeholder),this._positionPlaceholder(),this.adjustTableSize())}_clearPlaceholder(){this.placeholder&&this.placeholder.parentNode&&this.placeholder.parentNode.removeChild(this.placeholder),this.tableElement.style.minWidth="",this.tableElement.style.display=""}_positionPlaceholder(){this.placeholder&&this.placeholder.parentNode&&(this.placeholder.style.width=this.table.columnManager.getWidth()+"px",this.placeholderContents.style.width=this.table.rowManager.element.clientWidth+"px",this.placeholderContents.style.marginLeft=this.scrollLeft+"px")}styleRow(e,t){var i=e.getElement();t%2?(i.classList.add("tabulator-row-even"),i.classList.remove("tabulator-row-odd")):(i.classList.add("tabulator-row-odd"),i.classList.remove("tabulator-row-even"))}normalizeHeight(){this.activeRows.forEach((function(e){e.normalizeHeight()}))}adjustTableSize(){let e,t=this.element.clientHeight,i=!1;if("fill"===this.renderer.verticalFillMode){let s=Math.floor(this.table.columnManager.getElement().getBoundingClientRect().height+(this.table.footerManager&&this.table.footerManager.active&&!this.table.footerManager.external?this.table.footerManager.getElement().getBoundingClientRect().height:0));if(this.fixedHeight){e=isNaN(this.table.options.minHeight)?this.table.options.minHeight:this.table.options.minHeight+"px";const t="calc(100% - "+s+"px)";this.element.style.minHeight=e||"calc(100% - "+s+"px)",this.element.style.height=t,this.element.style.maxHeight=t}else this.element.style.height="",this.element.style.height=this.table.element.clientHeight-s+"px",this.element.scrollTop=this.scrollTop;this.renderer.resize(),this.fixedHeight||t==this.element.clientHeight||(i=!0,this.subscribed("table-resize")?this.dispatch("table-resize"):this.redraw()),this.scrollBarCheck()}return this._positionPlaceholder(),i}reinitialize(){this.rows.forEach((function(e){e.reinitialize(!0)}))}blockRedraw(){this.redrawBlock=!0,this.redrawBlockRestoreConfig=!1}restoreRedraw(){this.redrawBlock=!1,this.redrawBlockRestoreConfig?(this.refreshActiveData(this.redrawBlockRestoreConfig.handler,this.redrawBlockRestoreConfig.skipStage,this.redrawBlockRestoreConfig.renderInPosition),this.redrawBlockRestoreConfig=!1):this.redrawBlockRenderInPosition&&this.reRenderInPosition(),this.redrawBlockRenderInPosition=!1}redraw(e){this.adjustTableSize(),this.table.tableWidth=this.table.element.clientWidth,e?this.renderTable():(this.reRenderInPosition(),this.scrollHorizontal(this.scrollLeft))}resetScroll(){if(this.element.scrollLeft=0,this.element.scrollTop=0,"ie"===this.table.browser){var e=document.createEvent("Event");e.initEvent("scroll",!1,!0),this.element.dispatchEvent(e)}else this.element.dispatchEvent(new Event("scroll"))}}class v extends t{constructor(e){super(e),this.active=!1,this.element=this.createElement(),this.containerElement=this.createContainerElement(),this.external=!1}initialize(){this.initializeElement()}createElement(){var e=document.createElement("div");return e.classList.add("tabulator-footer"),e}createContainerElement(){var e=document.createElement("div");return e.classList.add("tabulator-footer-contents"),this.element.appendChild(e),e}initializeElement(){if(this.table.options.footerElement)if("string"==typeof this.table.options.footerElement)"<"===this.table.options.footerElement[0]?this.containerElement.innerHTML=this.table.options.footerElement:(this.external=!0,this.containerElement=document.querySelector(this.table.options.footerElement));else this.element=this.table.options.footerElement}getElement(){return this.element}append(e){this.activate(),this.containerElement.appendChild(e),this.table.rowManager.adjustTableSize()}prepend(e){this.activate(),this.element.insertBefore(e,this.element.firstChild),this.table.rowManager.adjustTableSize()}remove(e){e.parentNode.removeChild(e),this.deactivate()}deactivate(e){this.element.firstChild&&!e||(this.external||this.element.parentNode.removeChild(this.element),this.active=!1)}activate(){this.active||(this.active=!0,this.external||(this.table.element.appendChild(this.getElement()),this.table.element.style.display=""))}redraw(){this.dispatch("footer-redraw")}}class w extends t{constructor(e){super(e),this.el=null,this.abortClasses=["tabulator-headers","tabulator-table"],this.previousTargets={},this.listeners=["click","dblclick","contextmenu","mouseenter","mouseleave","mouseover","mouseout","mousemove","mouseup","mousedown","touchstart","touchend"],this.componentMap={"tabulator-cell":"cell","tabulator-row":"row","tabulator-group":"group","tabulator-col":"column"},this.pseudoTrackers={row:{subscriber:null,target:null},cell:{subscriber:null,target:null},group:{subscriber:null,target:null},column:{subscriber:null,target:null}},this.pseudoTracking=!1}initialize(){this.el=this.table.element,this.buildListenerMap(),this.bindSubscriptionWatchers()}buildListenerMap(){var e={};this.listeners.forEach((t=>{e[t]={handler:null,components:[]}})),this.listeners=e}bindPseudoEvents(){Object.keys(this.pseudoTrackers).forEach((e=>{this.pseudoTrackers[e].subscriber=this.pseudoMouseEnter.bind(this,e),this.subscribe(e+"-mouseover",this.pseudoTrackers[e].subscriber)})),this.pseudoTracking=!0}pseudoMouseEnter(e,t,i){this.pseudoTrackers[e].target!==i&&(this.pseudoTrackers[e].target&&this.dispatch(e+"-mouseleave",t,this.pseudoTrackers[e].target),this.pseudoMouseLeave(e,t),this.pseudoTrackers[e].target=i,this.dispatch(e+"-mouseenter",t,i))}pseudoMouseLeave(e,t){var i=Object.keys(this.pseudoTrackers),s={row:["cell"],cell:["row"]};(i=i.filter((t=>{var i=s[e];return t!==e&&(!i||i&&!i.includes(t))}))).forEach((e=>{var i=this.pseudoTrackers[e].target;this.pseudoTrackers[e].target&&(this.dispatch(e+"-mouseleave",t,i),this.pseudoTrackers[e].target=null)}))}bindSubscriptionWatchers(){var e=Object.keys(this.listeners),t=Object.values(this.componentMap);for(let i of t)for(let t of e){let e=i+"-"+t;this.subscriptionChange(e,this.subscriptionChanged.bind(this,i,t))}this.subscribe("table-destroy",this.clearWatchers.bind(this))}subscriptionChanged(e,t,i){var s=this.listeners[t].components,o=s.indexOf(e),n=!1;i?-1===o&&(s.push(e),n=!0):this.subscribed(e+"-"+t)||o>-1&&(s.splice(o,1),n=!0),"mouseenter"!==t&&"mouseleave"!==t||this.pseudoTracking||this.bindPseudoEvents(),n&&this.updateEventListeners()}updateEventListeners(){for(let e in this.listeners){let t=this.listeners[e];t.components.length?t.handler||(t.handler=this.track.bind(this,e),this.el.addEventListener(e,t.handler)):t.handler&&(this.el.removeEventListener(e,t.handler),t.handler=null)}}track(e,t){var i=t.composedPath&&t.composedPath()||t.path,s=this.findTargets(i);s=this.bindComponents(e,s),this.triggerEvents(e,t,s),!this.pseudoTracking||"mouseover"!=e&&"mouseleave"!=e||Object.keys(s).length||this.pseudoMouseLeave("none",t)}findTargets(e){var t={};let i=Object.keys(this.componentMap);for(let s of e){let e=s.classList?[...s.classList]:[];if(e.filter((e=>this.abortClasses.includes(e))).length)break;let o=e.filter((e=>i.includes(e)));for(let e of o)t[this.componentMap[e]]||(t[this.componentMap[e]]=s)}return t.group&&t.group===t.row&&delete t.row,t}bindComponents(e,t){var i=Object.keys(t).reverse(),s=this.listeners[e],o={},n={};for(let e of i){let i,r=t[e],a=this.previousTargets[e];if(a&&a.target===r)i=a.component;else switch(e){case"row":case"group":if(s.components.includes("row")||s.components.includes("cell")||s.components.includes("group")){i=this.table.rowManager.getVisibleRows(!0).find((e=>e.getElement()===r)),t.row&&t.row.parentNode&&t.row.parentNode.closest(".tabulator-row")&&(t[e]=!1)}break;case"column":s.components.includes("column")&&(i=this.table.columnManager.findColumn(r));break;case"cell":s.components.includes("cell")&&(o.row instanceof p?i=o.row.findCell(r):t.row&&console.warn("Event Target Lookup Error - The row this cell is attached to cannot be found, has the table been reinitialized without being destroyed first?"))}i&&(o[e]=i,n[e]={target:r,component:i})}return this.previousTargets=n,o}triggerEvents(e,t,i){var s=this.listeners[e];for(let o in i)i[o]&&s.components.includes(o)&&this.dispatch(o+"-"+e,t,i[o])}clearWatchers(){for(let e in this.listeners){let t=this.listeners[e];t.handler&&(this.el.removeEventListener(e,t.handler),t.handler=null)}}}class C{constructor(e){this.table=e,this.bindings={}}bind(e,t,i){this.bindings[e]||(this.bindings[e]={}),this.bindings[e][t]?console.warn("Unable to bind component handler, a matching function name is already bound",e,t,i):this.bindings[e][t]=i}handle(e,t,i){if(this.bindings[e]&&this.bindings[e][i]&&"function"==typeof this.bindings[e][i].bind)return this.bindings[e][i].bind(null,t);"then"===i||"string"!=typeof i||i.startsWith("_")||this.table.options.debugInvalidComponentFuncs&&console.error("The "+e+" component does not have a "+i+" function, have you checked that you have the correct Tabulator module installed?")}}class E extends t{constructor(e){super(e),this.requestOrder=0,this.loading=!1}initialize(){}load(e,t,i,s,o,n){var r=++this.requestOrder;return this.table.destroyed?Promise.resolve():(this.dispatchExternal("dataLoading",e),!e||0!=e.indexOf("{")&&0!=e.indexOf("[")||(e=JSON.parse(e)),this.confirm("data-loading",[e,t,i,o])?(this.loading=!0,o||this.alertLoader(),t=this.chain("data-params",[e,i,o],t||{},t||{}),t=this.mapParams(t,this.table.options.dataSendParams),this.chain("data-load",[e,t,i,o],!1,Promise.resolve([])).then((e=>{if(this.table.destroyed)console.warn("Data Load Response Blocked - Table has been destroyed");else{Array.isArray(e)||"object"!=typeof e||(e=this.mapParams(e,this.objectInvert(this.table.options.dataReceiveParams)));var t=this.chain("data-loaded",[e],null,e);r==this.requestOrder?(this.clearAlert(),!1!==t&&(this.dispatchExternal("dataLoaded",t),this.table.rowManager.setData(t,s,void 0===n?!s:n))):console.warn("Data Load Response Blocked - An active data load request was blocked by an attempt to change table data while the request was being made")}})).catch((e=>{console.error("Data Load Error: ",e),this.dispatchExternal("dataLoadError",e),o||this.alertError(),setTimeout((()=>{this.clearAlert()}),this.table.options.dataLoaderErrorTimeout)})).finally((()=>{this.loading=!1}))):(this.dispatchExternal("dataLoaded",e),e||(e=[]),this.table.rowManager.setData(e,s,void 0===n?!s:n),Promise.resolve()))}mapParams(e,t){var i={};for(let s in e)i[t.hasOwnProperty(s)?t[s]:s]=e[s];return i}objectInvert(e){var t={};for(let i in e)t[e[i]]=i;return t}blockActiveLoad(){this.requestOrder++}alertLoader(){("function"==typeof this.table.options.dataLoader?this.table.options.dataLoader():this.table.options.dataLoader)&&this.table.alertManager.alert(this.table.options.dataLoaderLoading||this.langText("data|loading"))}alertError(){this.table.alertManager.alert(this.table.options.dataLoaderError||this.langText("data|error"),"error")}clearAlert(){this.table.alertManager.clear()}}class y{constructor(e,t,i){this.table=e,this.events={},this.optionsList=t||{},this.subscriptionNotifiers={},this.dispatch=i?this._debugDispatch.bind(this):this._dispatch.bind(this),this.debug=i}subscriptionChange(e,t){this.subscriptionNotifiers[e]||(this.subscriptionNotifiers[e]=[]),this.subscriptionNotifiers[e].push(t),this.subscribed(e)&&this._notifySubscriptionChange(e,!0)}subscribe(e,t){this.events[e]||(this.events[e]=[]),this.events[e].push(t),this._notifySubscriptionChange(e,!0)}unsubscribe(e,t){var i;if(this.events[e]){if(t){if(!((i=this.events[e].findIndex((e=>e===t)))>-1))return void console.warn("Cannot remove event, no matching event found:",e,t);this.events[e].splice(i,1)}else delete this.events[e];this._notifySubscriptionChange(e,!1)}else console.warn("Cannot remove event, no events set on:",e)}subscribed(e){return this.events[e]&&this.events[e].length}_notifySubscriptionChange(e,t){var i=this.subscriptionNotifiers[e];i&&i.forEach((e=>{e(t)}))}_dispatch(){var e,t=Array.from(arguments),i=t.shift();return this.events[i]&&this.events[i].forEach(((i,s)=>{let o=i.apply(this.table,t);s||(e=o)})),e}_debugDispatch(){var e=Array.from(arguments),t=e[0];return e[0]="ExternalEvent:"+e[0],(!0===this.debug||this.debug.includes(t))&&console.log(...e),this._dispatch(...arguments)}}class R{constructor(e){this.events={},this.subscriptionNotifiers={},this.dispatch=e?this._debugDispatch.bind(this):this._dispatch.bind(this),this.chain=e?this._debugChain.bind(this):this._chain.bind(this),this.confirm=e?this._debugConfirm.bind(this):this._confirm.bind(this),this.debug=e}subscriptionChange(e,t){this.subscriptionNotifiers[e]||(this.subscriptionNotifiers[e]=[]),this.subscriptionNotifiers[e].push(t),this.subscribed(e)&&this._notifySubscriptionChange(e,!0)}subscribe(e,t,i=1e4){this.events[e]||(this.events[e]=[]),this.events[e].push({callback:t,priority:i}),this.events[e].sort(((e,t)=>e.priority-t.priority)),this._notifySubscriptionChange(e,!0)}unsubscribe(e,t){var i;if(this.events[e]){if(t){if(!((i=this.events[e].findIndex((e=>e.callback===t)))>-1))return void console.warn("Cannot remove event, no matching event found:",e,t);this.events[e].splice(i,1)}this._notifySubscriptionChange(e,!1)}else console.warn("Cannot remove event, no events set on:",e)}subscribed(e){return this.events[e]&&this.events[e].length}_chain(e,t,i,s){var o=i;return Array.isArray(t)||(t=[t]),this.subscribed(e)?(this.events[e].forEach(((e,i)=>{o=e.callback.apply(this,t.concat([o]))})),o):"function"==typeof s?s():s}_confirm(e,t){var i=!1;return Array.isArray(t)||(t=[t]),this.subscribed(e)&&this.events[e].forEach(((e,s)=>{e.callback.apply(this,t)&&(i=!0)})),i}_notifySubscriptionChange(e,t){var i=this.subscriptionNotifiers[e];i&&i.forEach((e=>{e(t)}))}_dispatch(){var e=Array.from(arguments),t=e.shift();this.events[t]&&this.events[t].forEach((t=>{t.callback.apply(this,e)}))}_debugDispatch(){var e=Array.from(arguments),t=e[0];return e[0]="InternalEvent:"+t,(!0===this.debug||this.debug.includes(t))&&console.log(...e),this._dispatch(...arguments)}_debugChain(){var e=Array.from(arguments),t=e[0];return e[0]="InternalEvent:"+t,(!0===this.debug||this.debug.includes(t))&&console.log(...e),this._chain(...arguments)}_debugConfirm(){var e=Array.from(arguments),t=e[0];return e[0]="InternalEvent:"+t,(!0===this.debug||this.debug.includes(t))&&console.log(...e),this._confirm(...arguments)}}class x extends t{constructor(e){super(e)}_warnUser(){this.options("debugDeprecation")&&console.warn(...arguments)}check(e,t,i){var s="";return void 0===this.options(e)||(s="Deprecated Setup Option - Use of the %c"+e+"%c option is now deprecated",t?(s=s+", Please use the %c"+t+"%c option instead",this._warnUser(s,"font-weight: bold;","font-weight: normal;","font-weight: bold;","font-weight: normal;"),i&&(this.table.options[t]=this.table.options[e])):this._warnUser(s,"font-weight: bold;","font-weight: normal;"),!1)}checkMsg(e,t){return void 0===this.options(e)||(this._warnUser("%cDeprecated Setup Option - Use of the %c"+e+" %c option is now deprecated, "+t,"font-weight: normal;","font-weight: bold;","font-weight: normal;"),!1)}msg(e){this._warnUser(e)}}let T=class e extends t{constructor(e,t,i){super(e),this.element=t,this.container=this._lookupContainer(),this.parent=i,this.reversedX=!1,this.childPopup=null,this.blurable=!1,this.blurCallback=null,this.blurEventsBound=!1,this.renderedCallback=null,this.visible=!1,this.hideable=!0,this.element.classList.add("tabulator-popup-container"),this.blurEvent=this.hide.bind(this,!1),this.escEvent=this._escapeCheck.bind(this),this.destroyBinding=this.tableDestroyed.bind(this),this.destroyed=!1}tableDestroyed(){this.destroyed=!0,this.hide(!0)}_lookupContainer(){var e=this.table.options.popupContainer;return"string"==typeof e?(e=document.querySelector(e))||console.warn("Menu Error - no container element found matching selector:",this.table.options.popupContainer,"(defaulting to document body)"):!0===e&&(e=this.table.element),e&&!this._checkContainerIsParent(e)&&(e=!1,console.warn("Menu Error - container element does not contain this table:",this.table.options.popupContainer,"(defaulting to document body)")),e||(e=document.body),e}_checkContainerIsParent(e,t=this.table.element){return e===t||!!t.parentNode&&this._checkContainerIsParent(e,t.parentNode)}renderCallback(e){this.renderedCallback=e}containerEventCoords(e){var t=!(e instanceof MouseEvent),i=t?e.touches[0].pageX:e.pageX,s=t?e.touches[0].pageY:e.pageY;if(this.container!==document.body){let e=a.elOffset(this.container);i-=e.left,s-=e.top}return{x:i,y:s}}elementPositionCoords(e,t="right"){var i,s,o,n=a.elOffset(e);switch(this.container!==document.body&&(i=a.elOffset(this.container),n.left-=i.left,n.top-=i.top),t){case"right":s=n.left+e.offsetWidth,o=n.top-1;break;case"bottom":s=n.left,o=n.top+e.offsetHeight;break;case"left":s=n.left,o=n.top-1;break;case"top":s=n.left,o=n.top;break;case"center":s=n.left+e.offsetWidth/2,o=n.top+e.offsetHeight/2}return{x:s,y:o,offset:n}}show(e,t){var i,s,o,n,r;return this.destroyed||this.table.destroyed||(e instanceof HTMLElement?(o=e,n=(r=this.elementPositionCoords(e,t)).offset,i=r.x,s=r.y):"number"==typeof e?(n={top:0,left:0},i=e,s=t):(i=(r=this.containerEventCoords(e)).x,s=r.y,this.reversedX=!1),this.element.style.top=s+"px",this.element.style.left=i+"px",this.container.appendChild(this.element),"function"==typeof this.renderedCallback&&this.renderedCallback(),this._fitToScreen(i,s,o,n,t),this.visible=!0,this.subscribe("table-destroy",this.destroyBinding),this.element.addEventListener("mousedown",(e=>{e.stopPropagation()}))),this}_fitToScreen(e,t,i,s,o){var n=this.container===document.body?document.documentElement.scrollTop:this.container.scrollTop;(e+this.element.offsetWidth>=this.container.offsetWidth||this.reversedX)&&(this.element.style.left="",this.element.style.right=i?this.container.offsetWidth-s.left+"px":this.container.offsetWidth-e+"px",this.reversedX=!0);let r=Math.max(this.container.offsetHeight,n?this.container.scrollHeight:0);if(t+this.element.offsetHeight>r)if(i)if("bottom"===o)this.element.style.top=parseInt(this.element.style.top)-this.element.offsetHeight-i.offsetHeight-1+"px";else this.element.style.top=parseInt(this.element.style.top)-this.element.offsetHeight+i.offsetHeight+1+"px";else this.element.style.height=r+"px"}isVisible(){return this.visible}hideOnBlur(e){return this.blurable=!0,this.visible&&(setTimeout((()=>{this.visible&&(this.table.rowManager.element.addEventListener("scroll",this.blurEvent),this.subscribe("cell-editing",this.blurEvent),document.body.addEventListener("click",this.blurEvent),document.body.addEventListener("contextmenu",this.blurEvent),document.body.addEventListener("mousedown",this.blurEvent),window.addEventListener("resize",this.blurEvent),document.body.addEventListener("keydown",this.escEvent),this.blurEventsBound=!0)}),100),this.blurCallback=e),this}_escapeCheck(e){27==e.keyCode&&this.hide()}blockHide(){this.hideable=!1}restoreHide(){this.hideable=!0}hide(e=!1){return this.visible&&this.hideable&&(this.blurable&&this.blurEventsBound&&(document.body.removeEventListener("keydown",this.escEvent),document.body.removeEventListener("click",this.blurEvent),document.body.removeEventListener("contextmenu",this.blurEvent),document.body.removeEventListener("mousedown",this.blurEvent),window.removeEventListener("resize",this.blurEvent),this.table.rowManager.element.removeEventListener("scroll",this.blurEvent),this.unsubscribe("cell-editing",this.blurEvent),this.blurEventsBound=!1),this.childPopup&&this.childPopup.hide(),this.parent&&(this.parent.childPopup=null),this.element.parentNode&&this.element.parentNode.removeChild(this.element),this.visible=!1,this.blurCallback&&!e&&this.blurCallback(),this.unsubscribe("table-destroy",this.destroyBinding)),this}child(t){return this.childPopup&&this.childPopup.hide(),this.childPopup=new e(this.table,t,this),this.childPopup}};class M extends t{constructor(e,t){super(e),this._handler=null}initialize(){}registerTableOption(e,t){this.table.optionsList.register(e,t)}registerColumnOption(e,t){this.table.columnManager.optionsList.register(e,t)}registerTableFunction(e,t){void 0===this.table[e]?this.table[e]=(...i)=>(this.table.initGuard(e),t(...i)):console.warn("Unable to bind table function, name already in use",e)}registerComponentFunction(e,t,i){return this.table.componentFunctionBinder.bind(e,t,i)}registerDataHandler(e,t){this.table.rowManager.registerDataPipelineHandler(e,t),this._handler=e}registerDisplayHandler(e,t){this.table.rowManager.registerDisplayPipelineHandler(e,t),this._handler=e}displayRows(e){var t,i=this.table.rowManager.displayRows.length-1;if(this._handler&&(t=this.table.rowManager.displayPipeline.findIndex((e=>e.handler===this._handler)))>-1&&(i=t),e&&(i+=e),this._handler)return i>-1?this.table.rowManager.getDisplayRows(i):this.activeRows()}activeRows(){return this.table.rowManager.activeRows}refreshData(e,t){t||(t=this._handler),t&&this.table.rowManager.refreshActiveData(t,!1,e)}footerAppend(e){return this.table.footerManager.append(e)}footerPrepend(e){return this.table.footerManager.prepend(e)}footerRemove(e){return this.table.footerManager.remove(e)}popup(e,t){return new T(this.table,e,t)}alert(e,t){return this.table.alertManager.alert(e,t)}clearAlert(){return this.table.alertManager.clear()}}function k(e,t){e.forEach((function(e){e.reinitializeWidth()})),this.table.options.responsiveLayout&&this.table.modExists("responsiveLayout",!0)&&this.table.modules.responsiveLayout.update()}var L={fitData:function(e,t){t&&this.table.columnManager.renderer.reinitializeColumnWidths(e),this.table.options.responsiveLayout&&this.table.modExists("responsiveLayout",!0)&&this.table.modules.responsiveLayout.update()},fitDataFill:k,fitDataTable:k,fitDataStretch:function(e,t){var i=0,s=this.table.rowManager.element.clientWidth,o=0,n=!1;e.forEach(((e,t)=>{e.widthFixed||e.reinitializeWidth(),(this.table.options.responsiveLayout?e.modules.responsive.visible:e.visible)&&(n=e),e.visible&&(i+=e.getWidth())})),n?(o=s-i+n.getWidth(),this.table.options.responsiveLayout&&this.table.modExists("responsiveLayout",!0)&&(n.setWidth(0),this.table.modules.responsiveLayout.update()),o>0?n.setWidth(o):n.reinitializeWidth()):this.table.options.responsiveLayout&&this.table.modExists("responsiveLayout",!0)&&this.table.modules.responsiveLayout.update()},fitColumns:function(e,t){var i,s,o=this.table.rowManager.element.getBoundingClientRect().width,n=0,r=0,a=0,l=[],h=[],d=0,c=0;function u(e){return"string"==typeof e?e.indexOf("%")>-1?o/100*parseInt(e):parseInt(e):e}function m(e,t,i,s){var o=[],n=0,r=0,l=0,h=a,d=0,c=0,p=[];function g(e){return i*(e.column.definition.widthGrow||1)}function b(e){return u(e.width)-i*(e.column.definition.widthShrink||0)}return e.forEach((function(e,n){var r=s?b(e):g(e);e.column.minWidth>=r?o.push(e):e.column.maxWidth&&e.column.maxWidththis.table.rowManager.element.clientHeight&&(o-=this.table.rowManager.element.offsetWidth-this.table.rowManager.element.clientWidth),e.forEach((function(e){var t,i,s;e.visible&&(t=e.definition.width,i=parseInt(e.minWidth),t?(s=u(t),n+=s>i?s:i,e.definition.widthShrink&&(h.push({column:e,width:s>i?s:i}),d+=e.definition.widthShrink)):(l.push({column:e,width:0}),a+=e.definition.widthGrow||1))})),r=o-n,i=Math.floor(r/a),c=m(l,r,i,!1),l.length&&c>0&&(l[l.length-1].width+=c),l.forEach((function(e){r-=e.width})),(s=Math.abs(c)+r)>0&&d&&(c=m(h,s,Math.floor(s/d),!0)),c&&h.length&&(h[h.length-1].width-=c),l.forEach((function(e){e.column.setWidth(e.width)})),h.forEach((function(e){e.column.setWidth(e.width)}))}};class S extends M{static moduleName="layout";static modes=L;constructor(e){super(e,"layout"),this.mode=null,this.registerTableOption("layout","fitData"),this.registerTableOption("layoutColumnsOnNewData",!1),this.registerColumnOption("widthGrow"),this.registerColumnOption("widthShrink")}initialize(){var e=this.table.options.layout;S.modes[e]?this.mode=e:(console.warn("Layout Error - invalid mode set, defaulting to 'fitData' : "+e),this.mode="fitData"),this.table.element.setAttribute("tabulator-layout",this.mode),this.subscribe("column-init",this.initializeColumn.bind(this))}initializeColumn(e){e.definition.widthGrow&&(e.definition.widthGrow=Number(e.definition.widthGrow)),e.definition.widthShrink&&(e.definition.widthShrink=Number(e.definition.widthShrink))}getMode(){return this.mode}layout(e){this.dispatch("layout-refreshing"),S.modes[this.mode].call(this,this.table.columnManager.columnsByIndex,e),this.dispatch("layout-refreshed")}}var D={default:{groups:{item:"item",items:"items"},columns:{},data:{loading:"Loading",error:"Error"},pagination:{page_size:"Page Size",page_title:"Show Page",first:"First",first_title:"First Page",last:"Last",last_title:"Last Page",prev:"Prev",prev_title:"Prev Page",next:"Next",next_title:"Next Page",all:"All",counter:{showing:"Showing",of:"of",rows:"rows",pages:"pages"}},headerFilters:{default:"filter column...",columns:{}}}};class z extends M{static moduleName="localize";static langs=D;constructor(e){super(e),this.locale="default",this.lang=!1,this.bindings={},this.langList={},this.registerTableOption("locale",!1),this.registerTableOption("langs",{})}initialize(){this.langList=a.deepClone(z.langs),!1!==this.table.options.columnDefaults.headerFilterPlaceholder&&this.setHeaderFilterPlaceholder(this.table.options.columnDefaults.headerFilterPlaceholder);for(let e in this.table.options.langs)this.installLang(e,this.table.options.langs[e]);this.setLocale(this.table.options.locale),this.registerTableFunction("setLocale",this.setLocale.bind(this)),this.registerTableFunction("getLocale",this.getLocale.bind(this)),this.registerTableFunction("getLang",this.getLang.bind(this))}setHeaderFilterPlaceholder(e){this.langList.default.headerFilters.default=e}installLang(e,t){this.langList[e]?this._setLangProp(this.langList[e],t):this.langList[e]=t}_setLangProp(e,t){for(let i in t)e[i]&&"object"==typeof e[i]?this._setLangProp(e[i],t[i]):e[i]=t[i]}setLocale(e){if(!0===(e=e||"default")&&navigator.language&&(e=navigator.language.toLowerCase()),e&&!this.langList[e]){let t=e.split("-")[0];this.langList[t]?(console.warn("Localization Error - Exact matching locale not found, using closest match: ",e,t),e=t):(console.warn("Localization Error - Matching locale not found, using default: ",e),e="default")}this.locale=e,this.lang=a.deepClone(this.langList.default||{}),"default"!=e&&function e(t,i){for(var s in t)"object"==typeof t[s]?(i[s]||(i[s]={}),e(t[s],i[s])):i[s]=t[s]}(this.langList[e],this.lang),this.dispatchExternal("localized",this.locale,this.lang),this._executeBindings()}getLocale(e){return this.locale}getLang(e){return e?this.langList[e]:this.lang}getText(e,t){var i=(t?e+"|"+t:e).split("|");return this._getLangElement(i,this.locale)||""}_getLangElement(e,t){var i=this.lang;return e.forEach((function(e){var t;i&&(t=i[e],i=void 0!==t&&t)})),i}bind(e,t){this.bindings[e]||(this.bindings[e]=[]),this.bindings[e].push(t),t(this.getText(e),this.lang)}_executeBindings(){for(let e in this.bindings)this.bindings[e].forEach((t=>{t(this.getText(e),this.lang)}))}}var P=Object.freeze({__proto__:null,CommsModule:class extends M{static moduleName="comms";constructor(e){super(e)}initialize(){this.registerTableFunction("tableComms",this.receive.bind(this))}getConnections(e){var t=[];return this.table.constructor.registry.lookupTable(e).forEach((e=>{this.table!==e&&t.push(e)})),t}send(e,t,i,s){var o=this.getConnections(e);o.forEach((e=>{e.tableComms(this.table.element,t,i,s)})),!o.length&&e&&console.warn("Table Connection Error - No tables matching selector found",e)}receive(e,t,i,s){if(this.table.modExists(t))return this.table.modules[t].commsReceived(e,i,s);console.warn("Inter-table Comms Error - no such module:",t)}},LayoutModule:S,LocalizeModule:z});class F{static registry={tables:[],register(e){F.registry.tables.push(e)},deregister(e){var t=F.registry.tables.indexOf(e);t>-1&&F.registry.tables.splice(t,1)},lookupTable(e,t){var i,s,o=[];if("string"==typeof e){if((i=document.querySelectorAll(e)).length)for(var n=0;nF.registry.tables.find((function(t){return e instanceof F?t===e:t.element===e}))};static findTable(e){var t=F.registry.lookupTable(e,!0);return!(Array.isArray(t)&&!t.length)&&t}}class H extends F{static moduleBindings={};static moduleExtensions={};static modulesRegistered=!1;static defaultModules=!1;constructor(){super()}static initializeModuleBinder(e){H.modulesRegistered||(H.modulesRegistered=!0,H._registerModules(P,!0),e&&H._registerModules(e))}static _extendModule(e,t,i){if(H.moduleBindings[e]){var s=H.moduleBindings[e][t];if(s)if("object"==typeof i)for(let e in i)s[e]=i[e];else console.warn("Module Error - Invalid value type, it must be an object");else console.warn("Module Error - property does not exist:",t)}else console.warn("Module Error - module does not exist:",e)}static _registerModules(e,t){var i=Object.values(e);t&&i.forEach((e=>{e.prototype.moduleCore=!0})),H._registerModule(i)}static _registerModule(e){Array.isArray(e)||(e=[e]),e.forEach((e=>{H._registerModuleBinding(e),H._registerModuleExtensions(e)}))}static _registerModuleBinding(e){e.moduleName?H.moduleBindings[e.moduleName]=e:console.error("Unable to bind module, no moduleName defined",e.moduleName)}static _registerModuleExtensions(e){var t=e.moduleExtensions;if(e.moduleExtensions)for(let e in t){let i=t[e];if(H.moduleBindings[e])for(let t in i)H._extendModule(e,t,i[t]);else{H.moduleExtensions[e]||(H.moduleExtensions[e]={});for(let t in i)H.moduleExtensions[e][t]||(H.moduleExtensions[e][t]={}),Object.assign(H.moduleExtensions[e][t],i[t])}}H._extendModuleFromQueue(e)}static _extendModuleFromQueue(e){var t=H.moduleExtensions[e.moduleName];if(t)for(let i in t)H._extendModule(e.moduleName,i,t[i])}_bindModules(){var e=[],t=[],i=[];for(var s in this.modules={},H.moduleBindings){let o=H.moduleBindings[s],n=new o(this);this.modules[s]=n,o.prototype.moduleCore?this.modulesCore.push(n):o.moduleInitOrder?o.moduleInitOrder<0?e.push(n):t.push(n):i.push(n)}e.sort(((e,t)=>e.moduleInitOrder>t.moduleInitOrder?1:-1)),t.sort(((e,t)=>e.moduleInitOrder>t.moduleInitOrder?1:-1)),this.modulesRegular=e.concat(i.concat(t))}}class _ extends t{constructor(e){super(e),this.element=this._createAlertElement(),this.msgElement=this._createMsgElement(),this.type=null,this.element.appendChild(this.msgElement)}_createAlertElement(){var e=document.createElement("div");return e.classList.add("tabulator-alert"),e}_createMsgElement(){var e=document.createElement("div");return e.classList.add("tabulator-alert-msg"),e.setAttribute("role","alert"),e}_typeClass(){return"tabulator-alert-state-"+this.type}alert(e,t="msg"){if(e){for(this.clear(),this.dispatch("alert-show",t),this.type=t;this.msgElement.firstChild;)this.msgElement.removeChild(this.msgElement.firstChild);this.msgElement.classList.add(this._typeClass()),"function"==typeof e&&(e=e()),e instanceof HTMLElement?this.msgElement.appendChild(e):this.msgElement.innerHTML=e,this.table.element.appendChild(this.element)}}clear(){this.dispatch("alert-hide",this.type),this.element.parentNode&&this.element.parentNode.removeChild(this.element),this.msgElement.classList.remove(this._typeClass())}}class O extends H{static defaultOptions=e;static extendModule(){O.initializeModuleBinder(),O._extendModule(...arguments)}static registerModule(){O.initializeModuleBinder(),O._registerModule(...arguments)}constructor(e,t,i){super(),O.initializeModuleBinder(i),this.options={},this.columnManager=null,this.rowManager=null,this.footerManager=null,this.alertManager=null,this.vdomHoz=null,this.externalEvents=null,this.eventBus=null,this.interactionMonitor=!1,this.browser="",this.browserSlow=!1,this.browserMobile=!1,this.rtl=!1,this.originalElement=null,this.componentFunctionBinder=new C(this),this.dataLoader=!1,this.modules={},this.modulesCore=[],this.modulesRegular=[],this.deprecationAdvisor=new x(this),this.optionsList=new l(this,"table constructor"),this.initialized=!1,this.destroyed=!1,this.initializeElement(e)&&(this.initializeCoreSystems(t),setTimeout((()=>{this._create()}))),this.constructor.registry.register(this)}initializeElement(e){return"undefined"!=typeof HTMLElement&&e instanceof HTMLElement?(this.element=e,!0):"string"==typeof e?(this.element=document.querySelector(e),!!this.element||(console.error("Tabulator Creation Error - no element found matching selector: ",e),!1)):(console.error("Tabulator Creation Error - Invalid element provided:",e),!1)}initializeCoreSystems(e){this.columnManager=new u(this),this.rowManager=new f(this),this.footerManager=new v(this),this.dataLoader=new E(this),this.alertManager=new _(this),this._bindModules(),this.options=this.optionsList.generate(O.defaultOptions,e),this._clearObjectPointers(),this._mapDeprecatedFunctionality(),this.externalEvents=new y(this,this.options,this.options.debugEventsExternal),this.eventBus=new R(this.options.debugEventsInternal),this.interactionMonitor=new w(this),this.dataLoader.initialize(),this.footerManager.initialize()}_mapDeprecatedFunctionality(){}_clearSelection(){this.element.classList.add("tabulator-block-select"),window.getSelection?window.getSelection().empty?window.getSelection().empty():window.getSelection().removeAllRanges&&window.getSelection().removeAllRanges():document.selection&&document.selection.empty(),this.element.classList.remove("tabulator-block-select")}_create(){this.externalEvents.dispatch("tableBuilding"),this.eventBus.dispatch("table-building"),this._rtlCheck(),this._buildElement(),this._initializeTable(),this.initialized=!0,this._loadInitialData().finally((()=>{this.eventBus.dispatch("table-initialized"),this.externalEvents.dispatch("tableBuilt")}))}_rtlCheck(){var e=window.getComputedStyle(this.element);switch(this.options.textDirection){case"auto":if("rtl"!==e.direction)break;case"rtl":this.element.classList.add("tabulator-rtl"),this.rtl=!0;break;case"ltr":this.element.classList.add("tabulator-ltr");default:this.rtl=!1}}_clearObjectPointers(){this.options.columns=this.options.columns.slice(0),Array.isArray(this.options.data)&&!this.options.reactiveData&&(this.options.data=this.options.data.slice(0))}_buildElement(){var e,t=this.element,i=this.options;if("TABLE"===t.tagName){this.originalElement=this.element,e=document.createElement("div");var s=t.attributes;for(var o in s)"object"==typeof s[o]&&e.setAttribute(s[o].name,s[o].value);t.parentNode.replaceChild(e,t),this.element=t=e}for(t.classList.add("tabulator"),t.setAttribute("role","grid");t.firstChild;)t.removeChild(t.firstChild);i.height&&(i.height=isNaN(i.height)?i.height:i.height+"px",t.style.height=i.height),!1!==i.minHeight&&(i.minHeight=isNaN(i.minHeight)?i.minHeight:i.minHeight+"px",t.style.minHeight=i.minHeight),!1!==i.maxHeight&&(i.maxHeight=isNaN(i.maxHeight)?i.maxHeight:i.maxHeight+"px",t.style.maxHeight=i.maxHeight)}_initializeTable(){var e=this.element,t=this.options;this.interactionMonitor.initialize(),this.columnManager.initialize(),this.rowManager.initialize(),this._detectBrowser(),this.modulesCore.forEach((e=>{e.initialize()})),e.appendChild(this.columnManager.getElement()),e.appendChild(this.rowManager.getElement()),t.footerElement&&this.footerManager.activate(),t.autoColumns&&t.data&&this.columnManager.generateColumnsFromRowData(this.options.data),this.modulesRegular.forEach((e=>{e.initialize()})),this.columnManager.setColumns(t.columns),this.eventBus.dispatch("table-built")}_loadInitialData(){return this.dataLoader.load(this.options.data).finally((()=>{this.columnManager.verticalAlignHeaders()}))}destroy(){var e=this.element;for(this.destroyed=!0,this.constructor.registry.deregister(this),this.eventBus.dispatch("table-destroy"),this.rowManager.destroy();e.firstChild;)e.removeChild(e.firstChild);e.classList.remove("tabulator"),this.externalEvents.dispatch("tableDestroyed")}_detectBrowser(){var e=navigator.userAgent||navigator.vendor||window.opera;e.indexOf("Trident")>-1?(this.browser="ie",this.browserSlow=!0):e.indexOf("Edge")>-1?(this.browser="edge",this.browserSlow=!0):e.indexOf("Firefox")>-1?(this.browser="firefox",this.browserSlow=!1):e.indexOf("Mac OS")>-1?(this.browser="safari",this.browserSlow=!1):(this.browser="other",this.browserSlow=!1),this.browserMobile=/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino|android|ipad|playbook|silk/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw-(n|u)|c55\/|capi|ccwa|cdm-|cell|chtm|cldc|cmd-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc-s|devi|dica|dmob|do(c|p)o|ds(12|-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(-|_)|g1 u|g560|gene|gf-5|g-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd-(m|p|t)|hei-|hi(pt|ta)|hp( i|ip)|hs-c|ht(c(-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i-(20|go|ma)|i230|iac( |-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|-[a-w])|libw|lynx|m1-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|-([1-8]|c))|phil|pire|pl(ay|uc)|pn-2|po(ck|rt|se)|prox|psio|pt-g|qa-a|qc(07|12|21|32|60|-[2-7]|i-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h-|oo|p-)|sdk\/|se(c(-|0|1)|47|mc|nd|ri)|sgh-|shar|sie(-|m)|sk-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h-|v-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl-|tdg-|tel(i|m)|tim-|t-mo|to(pl|sh)|ts(70|m-|m3|m5)|tx-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas-|your|zeto|zte-/i.test(e.slice(0,4))}initGuard(e,t){var i,s;return this.options.debugInitialization&&!this.initialized&&(e||(e=" "==(s="Error"==(i=(new Error).stack.split("\n"))[0]?i[2]:i[1])[0]?s.trim().split(" ")[1].split(".")[1]:s.trim().split("@")[0]),console.warn("Table Not Initialized - Calling the "+e+" function before the table is initialized may result in inconsistent behavior, Please wait for the `tableBuilt` event before calling this function."+(t?" "+t:""))),this.initialized}blockRedraw(){this.initGuard(),this.eventBus.dispatch("redraw-blocking"),this.rowManager.blockRedraw(),this.columnManager.blockRedraw(),this.eventBus.dispatch("redraw-blocked")}restoreRedraw(){this.initGuard(),this.eventBus.dispatch("redraw-restoring"),this.rowManager.restoreRedraw(),this.columnManager.restoreRedraw(),this.eventBus.dispatch("redraw-restored")}setData(e,t,i){return this.initGuard(!1,"To set initial data please use the 'data' property in the table constructor."),this.dataLoader.load(e,t,i,!1)}clearData(){this.initGuard(),this.dataLoader.blockActiveLoad(),this.rowManager.clearData()}getData(e){return this.rowManager.getData(e)}getDataCount(e){return this.rowManager.getDataCount(e)}replaceData(e,t,i){return this.initGuard(),this.dataLoader.load(e,t,i,!0,!0)}updateData(e){var t=0;return this.initGuard(),new Promise(((i,s)=>{this.dataLoader.blockActiveLoad(),"string"==typeof e&&(e=JSON.parse(e)),e&&e.length>0?e.forEach((e=>{var o=this.rowManager.findRow(e[this.options.index]);o?(t++,o.updateData(e).then((()=>{--t||i()})).catch((t=>{s("Update Error - Unable to update row",e,t)}))):s("Update Error - Unable to find row",e)})):(console.warn("Update Error - No data provided"),s("Update Error - No data provided"))}))}addData(e,t,i){return this.initGuard(),new Promise(((s,o)=>{this.dataLoader.blockActiveLoad(),"string"==typeof e&&(e=JSON.parse(e)),e?this.rowManager.addRows(e,t,i).then((e=>{var t=[];e.forEach((function(e){t.push(e.getComponent())})),s(t)})):(console.warn("Update Error - No data provided"),o("Update Error - No data provided"))}))}updateOrAddData(e){var t=[],i=0;return this.initGuard(),new Promise(((s,o)=>{this.dataLoader.blockActiveLoad(),"string"==typeof e&&(e=JSON.parse(e)),e&&e.length>0?e.forEach((e=>{var o=this.rowManager.findRow(e[this.options.index]);i++,o?o.updateData(e).then((()=>{i--,t.push(o.getComponent()),i||s(t)})):this.rowManager.addRows(e).then((e=>{i--,t.push(e[0].getComponent()),i||s(t)}))})):(console.warn("Update Error - No data provided"),o("Update Error - No data provided"))}))}getRow(e){var t=this.rowManager.findRow(e);return t?t.getComponent():(console.warn("Find Error - No matching row found:",e),!1)}getRowFromPosition(e){var t=this.rowManager.getRowFromPosition(e);return t?t.getComponent():(console.warn("Find Error - No matching row found:",e),!1)}deleteRow(e){var t=[];this.initGuard(),Array.isArray(e)||(e=[e]);for(let i of e){let e=this.rowManager.findRow(i,!0);if(!e)return console.error("Delete Error - No matching row found:",i),Promise.reject("Delete Error - No matching row found");t.push(e)}return t.sort(((e,t)=>this.rowManager.rows.indexOf(e)>this.rowManager.rows.indexOf(t)?1:-1)),t.forEach((e=>{e.delete()})),this.rowManager.reRenderInPosition(),Promise.resolve()}addRow(e,t,i){return this.initGuard(),"string"==typeof e&&(e=JSON.parse(e)),this.rowManager.addRows(e,t,i,!0).then((e=>e[0].getComponent()))}updateOrAddRow(e,t){var i=this.rowManager.findRow(e);return this.initGuard(),"string"==typeof t&&(t=JSON.parse(t)),i?i.updateData(t).then((()=>i.getComponent())):this.rowManager.addRows(t).then((e=>e[0].getComponent()))}updateRow(e,t){var i=this.rowManager.findRow(e);return this.initGuard(),"string"==typeof t&&(t=JSON.parse(t)),i?i.updateData(t).then((()=>Promise.resolve(i.getComponent()))):(console.warn("Update Error - No matching row found:",e),Promise.reject("Update Error - No matching row found"))}scrollToRow(e,t,i){var s=this.rowManager.findRow(e);return s?this.rowManager.scrollToRow(s,t,i):(console.warn("Scroll Error - No matching row found:",e),Promise.reject("Scroll Error - No matching row found"))}moveRow(e,t,i){var s=this.rowManager.findRow(e);this.initGuard(),s?s.moveToRow(t,i):console.warn("Move Error - No matching row found:",e)}getRows(e){return this.rowManager.getComponents(e)}getRowPosition(e){var t=this.rowManager.findRow(e);return t?t.getPosition():(console.warn("Position Error - No matching row found:",e),!1)}setColumns(e){this.initGuard(!1,"To set initial columns please use the 'columns' property in the table constructor"),this.columnManager.setColumns(e)}getColumns(e){return this.columnManager.getComponents(e)}getColumn(e){var t=this.columnManager.findColumn(e);return t?t.getComponent():(console.warn("Find Error - No matching column found:",e),!1)}getColumnDefinitions(){return this.columnManager.getDefinitionTree()}showColumn(e){var t=this.columnManager.findColumn(e);if(this.initGuard(),!t)return console.warn("Column Show Error - No matching column found:",e),!1;t.show()}hideColumn(e){var t=this.columnManager.findColumn(e);if(this.initGuard(),!t)return console.warn("Column Hide Error - No matching column found:",e),!1;t.hide()}toggleColumn(e){var t=this.columnManager.findColumn(e);if(this.initGuard(),!t)return console.warn("Column Visibility Toggle Error - No matching column found:",e),!1;t.visible?t.hide():t.show()}addColumn(e,t,i){var s=this.columnManager.findColumn(i);return this.initGuard(),this.columnManager.addColumn(e,t,s).then((e=>e.getComponent()))}deleteColumn(e){var t=this.columnManager.findColumn(e);return this.initGuard(),t?t.delete():(console.warn("Column Delete Error - No matching column found:",e),Promise.reject())}updateColumnDefinition(e,t){var i=this.columnManager.findColumn(e);return this.initGuard(),i?i.updateDefinition(t):(console.warn("Column Update Error - No matching column found:",e),Promise.reject())}moveColumn(e,t,i){var s=this.columnManager.findColumn(e),o=this.columnManager.findColumn(t);this.initGuard(),s?o?this.columnManager.moveColumn(s,o,i):console.warn("Move Error - No matching column found:",o):console.warn("Move Error - No matching column found:",e)}scrollToColumn(e,t,i){return new Promise(((s,o)=>{var n=this.columnManager.findColumn(e);return n?this.columnManager.scrollToColumn(n,t,i):(console.warn("Scroll Error - No matching column found:",e),Promise.reject("Scroll Error - No matching column found"))}))}redraw(e){this.initGuard(),this.columnManager.redraw(e),this.rowManager.redraw(e)}setHeight(e){this.options.height=isNaN(e)?e:e+"px",this.element.style.height=this.options.height,this.rowManager.initializeRenderer(),this.rowManager.redraw(!0)}on(e,t){this.externalEvents.subscribe(e,t)}off(e,t){this.externalEvents.unsubscribe(e,t)}dispatchEvent(){Array.from(arguments).shift(),this.externalEvents.dispatch(...arguments)}alert(e,t){this.initGuard(),this.alertManager.alert(e,t)}clearAlert(){this.initGuard(),this.alertManager.clear()}modExists(e,t){return!!this.modules[e]||(t&&console.error("Tabulator Module Not Installed: "+e),!1)}module(e){var t=this.modules[e];return t||console.error("Tabulator module not installed: "+e),t}}var A={rownum:function(e,t,i,s,o,n){return n.getPosition()}};class B extends M{static moduleName="accessor";static accessors=A;constructor(e){super(e),this.allowedTypes=["","data","download","clipboard","print","htmlOutput"],this.registerColumnOption("accessor"),this.registerColumnOption("accessorParams"),this.registerColumnOption("accessorData"),this.registerColumnOption("accessorDataParams"),this.registerColumnOption("accessorDownload"),this.registerColumnOption("accessorDownloadParams"),this.registerColumnOption("accessorClipboard"),this.registerColumnOption("accessorClipboardParams"),this.registerColumnOption("accessorPrint"),this.registerColumnOption("accessorPrintParams"),this.registerColumnOption("accessorHtmlOutput"),this.registerColumnOption("accessorHtmlOutputParams")}initialize(){this.subscribe("column-layout",this.initializeColumn.bind(this)),this.subscribe("row-data-retrieve",this.transformRow.bind(this))}initializeColumn(e){var t=!1,i={};this.allowedTypes.forEach((s=>{var o,n="accessor"+(s.charAt(0).toUpperCase()+s.slice(1));e.definition[n]&&(o=this.lookupAccessor(e.definition[n]))&&(t=!0,i[n]={accessor:o,params:e.definition[n+"Params"]||{}})})),t&&(e.modules.accessor=i)}lookupAccessor(e){var t=!1;switch(typeof e){case"string":B.accessors[e]?t=B.accessors[e]:console.warn("Accessor Error - No such accessor found, ignoring: ",e);break;case"function":t=e}return t}transformRow(e,t){var i="accessor"+(t.charAt(0).toUpperCase()+t.slice(1)),s=e.getComponent(),o=a.deepClone(e.data||{});return this.table.columnManager.traverse((function(e){var n,r,a,l;e.modules.accessor&&(r=e.modules.accessor[i]||e.modules.accessor.accessor||!1)&&"undefined"!=(n=e.getFieldValue(o))&&(l=e.getComponent(),a="function"==typeof r.params?r.params(n,o,t,l,s):r.params,e.setFieldValue(o,r.accessor(n,o,t,a,l,s)))})),o}}var V={method:"GET"};function I(e,t){var i=[];if(t=t||"",Array.isArray(e))e.forEach(((e,s)=>{i=i.concat(I(e,t?t+"["+s+"]":s))}));else if("object"==typeof e)for(var s in e)i=i.concat(I(e[s],t?t+"["+s+"]":s));else i.push({key:t,value:e});return i}function N(e){var t=I(e),i=[];return t.forEach((function(e){i.push(encodeURIComponent(e.key)+"="+encodeURIComponent(e.value))})),i.join("&")}function W(e,t,i){return e&&i&&Object.keys(i).length&&(t.method&&"get"!=t.method.toLowerCase()||(t.method="get",e+=(e.includes("?")?"&":"?")+N(i))),e}function j(e,t,i){var s;return new Promise(((o,n)=>{if(e=this.urlGenerator.call(this.table,e,t,i),"GET"!=t.method.toUpperCase())if(s="object"==typeof this.table.options.ajaxContentType?this.table.options.ajaxContentType:this.contentTypeFormatters[this.table.options.ajaxContentType]){for(var r in s.headers)t.headers||(t.headers={}),void 0===t.headers[r]&&(t.headers[r]=s.headers[r]);t.body=s.body.call(this,e,t,i)}else console.warn("Ajax Error - Invalid ajaxContentType value:",this.table.options.ajaxContentType);e?(void 0===t.headers&&(t.headers={}),void 0===t.headers.Accept&&(t.headers.Accept="application/json"),void 0===t.headers["X-Requested-With"]&&(t.headers["X-Requested-With"]="XMLHttpRequest"),void 0===t.mode&&(t.mode="cors"),"cors"==t.mode?(void 0===t.headers.Origin&&(t.headers.Origin=window.location.origin),void 0===t.credentials&&(t.credentials="same-origin")):void 0===t.credentials&&(t.credentials="include"),fetch(e,t).then((e=>{e.ok?e.json().then((e=>{o(e)})).catch((e=>{n(e),console.warn("Ajax Load Error - Invalid JSON returned",e)})):(console.error("Ajax Load Error - Connection Error: "+e.status,e.statusText),n(e))})).catch((e=>{console.error("Ajax Load Error - Connection Error: ",e),n(e)}))):(console.warn("Ajax Load Error - No URL Set"),o([]))}))}function G(e,t){var i=[];if(t=t||"",Array.isArray(e))e.forEach(((e,s)=>{i=i.concat(G(e,t?t+"["+s+"]":s))}));else if("object"==typeof e)for(var s in e)i=i.concat(G(e[s],t?t+"["+s+"]":s));else i.push({key:t,value:e});return i}var U={json:{headers:{"Content-Type":"application/json"},body:function(e,t,i){return JSON.stringify(i)}},form:{headers:{},body:function(e,t,i){var s=G(i),o=new FormData;return s.forEach((function(e){o.append(e.key,e.value)})),o}}};class X extends M{static moduleName="ajax";static defaultConfig=V;static defaultURLGenerator=W;static defaultLoaderPromise=j;static contentTypeFormatters=U;constructor(e){super(e),this.config={},this.url="",this.urlGenerator=!1,this.params=!1,this.loaderPromise=!1,this.registerTableOption("ajaxURL",!1),this.registerTableOption("ajaxURLGenerator",!1),this.registerTableOption("ajaxParams",{}),this.registerTableOption("ajaxConfig","get"),this.registerTableOption("ajaxContentType","form"),this.registerTableOption("ajaxRequestFunc",!1),this.registerTableOption("ajaxRequesting",(function(){})),this.registerTableOption("ajaxResponse",!1),this.contentTypeFormatters=X.contentTypeFormatters}initialize(){this.loaderPromise=this.table.options.ajaxRequestFunc||X.defaultLoaderPromise,this.urlGenerator=this.table.options.ajaxURLGenerator||X.defaultURLGenerator,this.table.options.ajaxURL&&this.setUrl(this.table.options.ajaxURL),this.setDefaultConfig(this.table.options.ajaxConfig),this.registerTableFunction("getAjaxUrl",this.getUrl.bind(this)),this.subscribe("data-loading",this.requestDataCheck.bind(this)),this.subscribe("data-params",this.requestParams.bind(this)),this.subscribe("data-load",this.requestData.bind(this))}requestParams(e,t,i,s){var o=this.table.options.ajaxParams;return o&&("function"==typeof o&&(o=o.call(this.table)),s=Object.assign(Object.assign({},o),s)),s}requestDataCheck(e,t,i,s){return!((e||!this.url)&&"string"!=typeof e)}requestData(e,t,i,s,o){var n;return!o&&this.requestDataCheck(e)?(e&&this.setUrl(e),n=this.generateConfig(i),this.sendRequest(this.url,t,n)):o}setDefaultConfig(e={}){this.config=Object.assign({},X.defaultConfig),"string"==typeof e?this.config.method=e:Object.assign(this.config,e)}generateConfig(e={}){var t=Object.assign({},this.config);return"string"==typeof e?t.method=e:Object.assign(t,e),t}setUrl(e){this.url=e}getUrl(){return this.url}sendRequest(e,t,i){return!1!==this.table.options.ajaxRequesting.call(this.table,e,t)?this.loaderPromise(e,i,t).then((i=>(this.table.options.ajaxResponse&&(i=this.table.options.ajaxResponse.call(this.table,e,t,i)),i))):Promise.reject()}}var J={replace:function(e){return this.table.setData(e)},update:function(e){return this.table.updateOrAddData(e)},insert:function(e){return this.table.addData(e)}},q={table:function(e){var t=[],i=!0,s=this.table.columnManager.columns,o=[],n=[];return(e=e.split("\n")).forEach((function(e){t.push(e.split("\t"))})),!(!t.length||1===t.length&&t[0].length<2)&&(t[0].forEach((function(e){var t=s.find((function(t){return e&&t.definition.title&&e.trim()&&t.definition.title.trim()===e.trim()}));t?o.push(t):i=!1})),i||(i=!0,o=[],t[0].forEach((function(e){var t=s.find((function(t){return e&&t.field&&e.trim()&&t.field.trim()===e.trim()}));t?o.push(t):i=!1})),i||(o=this.table.columnManager.columnsByIndex)),i&&t.shift(),t.forEach((function(e){var t={};e.forEach((function(e,i){o[i]&&(t[o[i].field]=e)})),n.push(t)})),n)}},K={keybindings:{bindings:{copyToClipboard:["ctrl + 67","meta + 67"]},actions:{copyToClipboard:function(e){this.table.modules.edit.currentCell||this.table.modExists("clipboard",!0)&&this.table.modules.clipboard.copy(!1,!0)}}}};class Y extends M{static moduleName="clipboard";static moduleExtensions=K;static pasteActions=J;static pasteParsers=q;constructor(e){super(e),this.mode=!0,this.pasteParser=function(){},this.pasteAction=function(){},this.customSelection=!1,this.rowRange=!1,this.blocked=!0,this.registerTableOption("clipboard",!1),this.registerTableOption("clipboardCopyStyled",!0),this.registerTableOption("clipboardCopyConfig",!1),this.registerTableOption("clipboardCopyFormatter",!1),this.registerTableOption("clipboardCopyRowRange","active"),this.registerTableOption("clipboardPasteParser","table"),this.registerTableOption("clipboardPasteAction","insert"),this.registerColumnOption("clipboard"),this.registerColumnOption("titleClipboard")}initialize(){this.mode=this.table.options.clipboard,this.rowRange=this.table.options.clipboardCopyRowRange,!0!==this.mode&&"copy"!==this.mode||this.table.element.addEventListener("copy",(e=>{var t,i,s;this.blocked||(e.preventDefault(),this.customSelection?(t=this.customSelection,this.table.options.clipboardCopyFormatter&&(t=this.table.options.clipboardCopyFormatter("plain",t))):(s=this.table.modules.export.generateExportList(this.table.options.clipboardCopyConfig,this.table.options.clipboardCopyStyled,this.rowRange,"clipboard"),t=(i=this.table.modules.export.generateHTMLTable(s))?this.generatePlainContent(s):"",this.table.options.clipboardCopyFormatter&&(t=this.table.options.clipboardCopyFormatter("plain",t),i=this.table.options.clipboardCopyFormatter("html",i))),window.clipboardData&&window.clipboardData.setData?window.clipboardData.setData("Text",t):e.clipboardData&&e.clipboardData.setData?(e.clipboardData.setData("text/plain",t),i&&e.clipboardData.setData("text/html",i)):e.originalEvent&&e.originalEvent.clipboardData.setData&&(e.originalEvent.clipboardData.setData("text/plain",t),i&&e.originalEvent.clipboardData.setData("text/html",i)),this.dispatchExternal("clipboardCopied",t,i),this.reset())})),!0!==this.mode&&"paste"!==this.mode||this.table.element.addEventListener("paste",(e=>{this.paste(e)})),this.setPasteParser(this.table.options.clipboardPasteParser),this.setPasteAction(this.table.options.clipboardPasteAction),this.registerTableFunction("copyToClipboard",this.copy.bind(this))}reset(){this.blocked=!0,this.customSelection=!1}generatePlainContent(e){var t=[];return e.forEach((e=>{var i=[];e.columns.forEach((t=>{var s="";if(t)if("group"===e.type&&(t.value=t.component.getKey()),null===t.value)s="";else switch(typeof t.value){case"object":s=JSON.stringify(t.value);break;case"undefined":s="";break;default:s=t.value}i.push(s)})),t.push(i.join("\t"))})),t.join("\n")}copy(e,t){var i,s;this.blocked=!1,this.customSelection=!1,!0!==this.mode&&"copy"!==this.mode||(this.rowRange=e||this.table.options.clipboardCopyRowRange,void 0!==window.getSelection&&void 0!==document.createRange?((e=document.createRange()).selectNodeContents(this.table.element),(i=window.getSelection()).toString()&&t&&(this.customSelection=i.toString()),i.removeAllRanges(),i.addRange(e)):void 0!==document.selection&&void 0!==document.body.createTextRange&&((s=document.body.createTextRange()).moveToElementText(this.table.element),s.select()),document.execCommand("copy"),i&&i.removeAllRanges())}setPasteAction(e){switch(typeof e){case"string":this.pasteAction=Y.pasteActions[e],this.pasteAction||console.warn("Clipboard Error - No such paste action found:",e);break;case"function":this.pasteAction=e}}setPasteParser(e){switch(typeof e){case"string":this.pasteParser=Y.pasteParsers[e],this.pasteParser||console.warn("Clipboard Error - No such paste parser found:",e);break;case"function":this.pasteParser=e}}paste(e){var t,i,s;this.checkPasteOrigin(e)&&(t=this.getPasteData(e),(i=this.pasteParser.call(this,t))?(e.preventDefault(),this.table.modExists("mutator")&&(i=this.mutateData(i)),s=this.pasteAction.call(this,i),this.dispatchExternal("clipboardPasted",t,i,s)):this.dispatchExternal("clipboardPasteError",t))}mutateData(e){var t=[];return Array.isArray(e)?e.forEach((e=>{t.push(this.table.modules.mutator.transformRow(e,"clipboard"))})):t=e,t}checkPasteOrigin(e){var t=!0;return!this.confirm("clipboard-paste",[e])&&["DIV","SPAN"].includes(e.target.tagName)||(t=!1),t}getPasteData(e){var t;return window.clipboardData&&window.clipboardData.getData?t=window.clipboardData.getData("Text"):e.clipboardData&&e.clipboardData.getData?t=e.clipboardData.getData("text/plain"):e.originalEvent&&e.originalEvent.clipboardData.getData&&(t=e.originalEvent.clipboardData.getData("text/plain")),t}}class ${constructor(e){return this._row=e,new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._row.table.componentFunctionBinder.handle("row",e._row,t)}})}getData(e){return this._row.getData(e)}getElement(){return this._row.getElement()}getTable(){return this._row.table}getCells(){var e=[];return this._row.getCells().forEach((function(t){e.push(t.getComponent())})),e}getCell(e){var t=this._row.getCell(e);return!!t&&t.getComponent()}_getSelf(){return this._row}}var Q={avg:function(e,t,i){var s=0,o=void 0!==i.precision?i.precision:2;return e.length&&(s=e.reduce((function(e,t){return Number(e)+Number(t)})),s/=e.length,s=!1!==o?s.toFixed(o):s),parseFloat(s).toString()},max:function(e,t,i){var s=null,o=void 0!==i.precision&&i.precision;return e.forEach((function(e){((e=Number(e))>s||null===s)&&(s=e)})),null!==s?!1!==o?s.toFixed(o):s:""},min:function(e,t,i){var s=null,o=void 0!==i.precision&&i.precision;return e.forEach((function(e){((e=Number(e))(e||0===t)&&e.indexOf(t)===i)).length}};class Z extends M{static moduleName="columnCalcs";static calculations=Q;constructor(e){super(e),this.topCalcs=[],this.botCalcs=[],this.genColumn=!1,this.topElement=this.createElement(),this.botElement=this.createElement(),this.topRow=!1,this.botRow=!1,this.topInitialized=!1,this.botInitialized=!1,this.blocked=!1,this.recalcAfterBlock=!1,this.registerTableOption("columnCalcs",!0),this.registerColumnOption("topCalc"),this.registerColumnOption("topCalcParams"),this.registerColumnOption("topCalcFormatter"),this.registerColumnOption("topCalcFormatterParams"),this.registerColumnOption("bottomCalc"),this.registerColumnOption("bottomCalcParams"),this.registerColumnOption("bottomCalcFormatter"),this.registerColumnOption("bottomCalcFormatterParams")}createElement(){var e=document.createElement("div");return e.classList.add("tabulator-calcs-holder"),e}initialize(){this.genColumn=new r({field:"value"},this),this.subscribe("cell-value-changed",this.cellValueChanged.bind(this)),this.subscribe("column-init",this.initializeColumnCheck.bind(this)),this.subscribe("row-deleted",this.rowsUpdated.bind(this)),this.subscribe("scroll-horizontal",this.scrollHorizontal.bind(this)),this.subscribe("row-added",this.rowsUpdated.bind(this)),this.subscribe("column-moved",this.recalcActiveRows.bind(this)),this.subscribe("column-add",this.recalcActiveRows.bind(this)),this.subscribe("data-refreshed",this.recalcActiveRowsRefresh.bind(this)),this.subscribe("table-redraw",this.tableRedraw.bind(this)),this.subscribe("rows-visible",this.visibleRows.bind(this)),this.subscribe("scrollbar-vertical",this.adjustForScrollbar.bind(this)),this.subscribe("redraw-blocked",this.blockRedraw.bind(this)),this.subscribe("redraw-restored",this.restoreRedraw.bind(this)),this.subscribe("table-redrawing",this.resizeHolderWidth.bind(this)),this.subscribe("column-resized",this.resizeHolderWidth.bind(this)),this.subscribe("column-show",this.resizeHolderWidth.bind(this)),this.subscribe("column-hide",this.resizeHolderWidth.bind(this)),this.registerTableFunction("getCalcResults",this.getResults.bind(this)),this.registerTableFunction("recalc",this.userRecalc.bind(this)),this.resizeHolderWidth()}resizeHolderWidth(){this.topElement.style.minWidth=this.table.columnManager.headersElement.offsetWidth+"px"}tableRedraw(e){this.recalc(this.table.rowManager.activeRows),e&&this.redraw()}blockRedraw(){this.blocked=!0,this.recalcAfterBlock=!1}restoreRedraw(){this.blocked=!1,this.recalcAfterBlock&&(this.recalcAfterBlock=!1,this.recalcActiveRowsRefresh())}userRecalc(){this.recalc(this.table.rowManager.activeRows)}blockCheck(){return this.blocked&&(this.recalcAfterBlock=!0),this.blocked}visibleRows(e,t){return this.topRow&&t.unshift(this.topRow),this.botRow&&t.push(this.botRow),t}rowsUpdated(e){this.table.options.groupBy?this.recalcRowGroup(e):this.recalcActiveRows()}recalcActiveRowsRefresh(){this.table.options.groupBy&&this.table.options.dataTreeStartExpanded&&this.table.options.dataTree?this.recalcAll():this.recalcActiveRows()}recalcActiveRows(){this.recalc(this.table.rowManager.activeRows)}cellValueChanged(e){(e.column.definition.topCalc||e.column.definition.bottomCalc)&&(this.table.options.groupBy?("table"!=this.table.options.columnCalcs&&"both"!=this.table.options.columnCalcs||this.recalcActiveRows(),"table"!=this.table.options.columnCalcs&&this.recalcRowGroup(e.row)):this.recalcActiveRows())}initializeColumnCheck(e){(e.definition.topCalc||e.definition.bottomCalc)&&this.initializeColumn(e)}initializeColumn(e){var t=e.definition,i={topCalcParams:t.topCalcParams||{},botCalcParams:t.bottomCalcParams||{}};if(t.topCalc){switch(typeof t.topCalc){case"string":Z.calculations[t.topCalc]?i.topCalc=Z.calculations[t.topCalc]:console.warn("Column Calc Error - No such calculation found, ignoring: ",t.topCalc);break;case"function":i.topCalc=t.topCalc}i.topCalc&&(e.modules.columnCalcs=i,this.topCalcs.push(e),"group"!=this.table.options.columnCalcs&&this.initializeTopRow())}if(t.bottomCalc){switch(typeof t.bottomCalc){case"string":Z.calculations[t.bottomCalc]?i.botCalc=Z.calculations[t.bottomCalc]:console.warn("Column Calc Error - No such calculation found, ignoring: ",t.bottomCalc);break;case"function":i.botCalc=t.bottomCalc}i.botCalc&&(e.modules.columnCalcs=i,this.botCalcs.push(e),"group"!=this.table.options.columnCalcs&&this.initializeBottomRow())}}registerColumnField(){}removeCalcs(){var e=!1;this.topInitialized&&(this.topInitialized=!1,this.topElement.parentNode.removeChild(this.topElement),e=!0),this.botInitialized&&(this.botInitialized=!1,this.footerRemove(this.botElement),e=!0),e&&this.table.rowManager.adjustTableSize()}reinitializeCalcs(){this.topCalcs.length&&this.initializeTopRow(),this.botCalcs.length&&this.initializeBottomRow()}initializeTopRow(){var e=document.createDocumentFragment();this.topInitialized||(e.appendChild(document.createElement("br")),e.appendChild(this.topElement),this.table.columnManager.getContentsElement().insertBefore(e,this.table.columnManager.headersElement.nextSibling),this.topInitialized=!0)}initializeBottomRow(){this.botInitialized||(this.footerPrepend(this.botElement),this.botInitialized=!0)}scrollHorizontal(e){this.botInitialized&&this.botRow&&(this.botElement.scrollLeft=e)}recalc(e){var t,i;if(!this.blockCheck()&&(this.topInitialized||this.botInitialized)){if(t=this.rowsToData(e),this.topInitialized){for(this.topRow&&this.topRow.deleteCells(),i=this.generateRow("top",t),this.topRow=i;this.topElement.firstChild;)this.topElement.removeChild(this.topElement.firstChild);this.topElement.appendChild(i.getElement()),i.initialize(!0)}if(this.botInitialized){for(this.botRow&&this.botRow.deleteCells(),i=this.generateRow("bottom",t),this.botRow=i;this.botElement.firstChild;)this.botElement.removeChild(this.botElement.firstChild);this.botElement.appendChild(i.getElement()),i.initialize(!0)}this.table.rowManager.adjustTableSize(),this.table.modExists("frozenColumns")&&this.table.modules.frozenColumns.layout()}}recalcRowGroup(e){this.recalcGroup(this.table.modules.groupRows.getRowGroup(e))}recalcAll(){(this.topCalcs.length||this.botCalcs.length)&&("group"!==this.table.options.columnCalcs&&this.recalcActiveRows(),this.table.options.groupBy&&"table"!==this.table.options.columnCalcs&&this.table.modules.groupRows.getChildGroups().forEach((e=>{this.recalcGroup(e)})))}recalcGroup(e){var t,i;this.blockCheck()||e&&e.calcs&&(e.calcs.bottom&&(t=this.rowsToData(e.rows),i=this.generateRowData("bottom",t),e.calcs.bottom.updateData(i),e.calcs.bottom.reinitialize()),e.calcs.top&&(t=this.rowsToData(e.rows),i=this.generateRowData("top",t),e.calcs.top.updateData(i),e.calcs.top.reinitialize()))}generateTopRow(e){return this.generateRow("top",this.rowsToData(e))}generateBottomRow(e){return this.generateRow("bottom",this.rowsToData(e))}rowsToData(e){var t=[],i=this.table.options.dataTree&&this.table.options.dataTreeChildColumnCalcs,s=this.table.modules.dataTree;return e.forEach((e=>{t.push(e.getData()),i&&e.modules.dataTree?.open&&this.rowsToData(s.getFilteredTreeChildren(e)).forEach((i=>{t.push(e)}))})),t}generateRow(e,t){var i,s=this.generateRowData(e,t);return this.table.modExists("mutator")&&this.table.modules.mutator.disable(),i=new p(s,this,"calc"),this.table.modExists("mutator")&&this.table.modules.mutator.enable(),i.getElement().classList.add("tabulator-calcs","tabulator-calcs-"+e),i.component=!1,i.getComponent=()=>(i.component||(i.component=new $(i)),i.component),i.generateCells=()=>{var t=[];this.table.columnManager.columnsByIndex.forEach((s=>{this.genColumn.setField(s.getField()),this.genColumn.hozAlign=s.hozAlign,s.definition[e+"CalcFormatter"]&&this.table.modExists("format")?this.genColumn.modules.format={formatter:this.table.modules.format.getFormatter(s.definition[e+"CalcFormatter"]),params:s.definition[e+"CalcFormatterParams"]||{}}:this.genColumn.modules.format={formatter:this.table.modules.format.getFormatter("plaintext"),params:{}},this.genColumn.definition.cssClass=s.definition.cssClass;var o=new n(this.genColumn,i);o.getElement(),o.column=s,o.setWidth(),s.cells.push(o),t.push(o),s.visible||o.hide()})),i.cells=t},i}generateRowData(e,t){var i,s,o={},n="top"==e?this.topCalcs:this.botCalcs,r="top"==e?"topCalc":"botCalc";return n.forEach((function(e){var n=[];e.modules.columnCalcs&&e.modules.columnCalcs[r]&&(t.forEach((function(t){n.push(e.getFieldValue(t))})),s=r+"Params",i="function"==typeof e.modules.columnCalcs[s]?e.modules.columnCalcs[s](n,t):e.modules.columnCalcs[s],e.setFieldValue(o,e.modules.columnCalcs[r](n,t,i)))})),o}hasTopCalcs(){return!!this.topCalcs.length}hasBottomCalcs(){return!!this.botCalcs.length}redraw(){this.topRow&&this.topRow.normalizeHeight(!0),this.botRow&&this.botRow.normalizeHeight(!0)}getResults(){var e={};return this.table.options.groupBy&&this.table.modExists("groupRows")?this.table.modules.groupRows.getGroups(!0).forEach((t=>{e[t.getKey()]=this.getGroupResults(t)})):e={top:this.topRow?this.topRow.getData():{},bottom:this.botRow?this.botRow.getData():{}},e}getGroupResults(e){var t=e._getSelf(),i=e.getSubGroups(),s={};return i.forEach((e=>{s[e.getKey()]=this.getGroupResults(e)})),{top:t.calcs.top?t.calcs.top.getData():{},bottom:t.calcs.bottom?t.calcs.bottom.getData():{},groups:s}}adjustForScrollbar(e){this.botRow&&(this.table.rtl?this.botElement.style.paddingLeft=e+"px":this.botElement.style.paddingRight=e+"px")}}var ee={csv:function(e,t={},i){var s=t.delimiter?t.delimiter:",",o=[],n=[];e.forEach((e=>{var t=[];switch(e.type){case"group":console.warn("Download Warning - CSV downloader cannot process row groups");break;case"calc":console.warn("Download Warning - CSV downloader cannot process column calculations");break;case"header":e.columns.forEach(((e,t)=>{e&&1===e.depth&&(n[t]=void 0===e.value||null===e.value?"":'"'+String(e.value).split('"').join('""')+'"')}));break;case"row":e.columns.forEach((e=>{if(e){switch(typeof e.value){case"object":e.value=null!==e.value?JSON.stringify(e.value):"";break;case"undefined":e.value=""}t.push('"'+String(e.value).split('"').join('""')+'"')}})),o.push(t.join(s))}})),n.length&&o.unshift(n.join(s)),o=o.join("\n"),t.bom&&(o="\ufeff"+o),i(o,"text/csv")},json:function(e,t,i){var s=[];e.forEach((e=>{var t={};switch(e.type){case"header":break;case"group":console.warn("Download Warning - JSON downloader cannot process row groups");break;case"calc":console.warn("Download Warning - JSON downloader cannot process column calculations");break;case"row":e.columns.forEach((e=>{e&&(t[e.component.getTitleDownload()||e.component.getField()]=e.value)})),s.push(t)}})),i(s=JSON.stringify(s,null,"\t"),"application/json")},jsonLines:function(e,t,i){const s=[];e.forEach((e=>{const t={};switch(e.type){case"header":break;case"group":console.warn("Download Warning - JSON downloader cannot process row groups");break;case"calc":console.warn("Download Warning - JSON downloader cannot process column calculations");break;case"row":e.columns.forEach((e=>{e&&(t[e.component.getTitleDownload()||e.component.getField()]=e.value)})),s.push(JSON.stringify(t))}})),i(s.join("\n"),"application/x-ndjson")},pdf:function(e,t={},i){var s=[],o=[],n={},r=t.rowGroupStyles||{fontStyle:"bold",fontSize:12,cellPadding:6,fillColor:220},a=t.rowCalcStyles||{fontStyle:"bold",fontSize:10,cellPadding:4,fillColor:232},l=t.jsPDF||{},h=t.title?t.title:"";function d(e,t){var i=[];return e.columns.forEach((e=>{var s;if(e){switch(typeof e.value){case"object":e.value=null!==e.value?JSON.stringify(e.value):"";break;case"undefined":e.value=""}s={content:e.value,colSpan:e.width,rowSpan:e.height},t&&(s.styles=t),i.push(s)}})),i}l.orientation||(l.orientation=t.orientation||"landscape"),l.unit||(l.unit="pt"),e.forEach((e=>{switch(e.type){case"header":s.push(d(e));break;case"group":o.push(d(e,r));break;case"calc":o.push(d(e,a));break;case"row":o.push(d(e))}}));var c=new jspdf.jsPDF(l);t.autoTable&&(n="function"==typeof t.autoTable?t.autoTable(c)||{}:t.autoTable),h&&(n.didDrawPage=function(e){c.text(h,40,30)}),n.head=s,n.body=o,c.autoTable(n),t.documentProcessing&&t.documentProcessing(c),i(c.output("arraybuffer"),"application/pdf")},xlsx:function(e,i,s){var o=i.sheetName||"Sheet1",n=XLSX.utils.book_new(),r=new t(this),a=!("compress"in i)||i.compress,l=i.writeOptions||{bookType:"xlsx",bookSST:!0,compression:a};function h(){var t=[],i=[],s={},o={s:{c:0,r:0},e:{c:e[0]?e[0].columns.reduce(((e,t)=>e+(t&&t.width?t.width:1)),0):0,r:e.length}};return e.forEach(((e,s)=>{var o=[];e.columns.forEach((function(e,t){e?(o.push(e.value instanceof Date||"object"!=typeof e.value?e.value:JSON.stringify(e.value)),(e.width>1||e.height>-1)&&(e.height>1||e.width>1)&&i.push({s:{r:s,c:t},e:{r:s+e.height-1,c:t+e.width-1}})):o.push("")})),t.push(o)})),XLSX.utils.sheet_add_aoa(s,t),s["!ref"]=XLSX.utils.encode_range(o),i.length&&(s["!merges"]=i),s}if(l.type="binary",n.SheetNames=[],n.Sheets={},i.sheetOnly)s(h());else{if(i.sheets)for(var d in i.sheets)!0===i.sheets[d]?(n.SheetNames.push(d),n.Sheets[d]=h()):(n.SheetNames.push(d),r.commsSend(i.sheets[d],"download","intercept",{type:"xlsx",options:{sheetOnly:!0},active:this.active,intercept:function(e){n.Sheets[d]=e}}));else n.SheetNames.push(o),n.Sheets[o]=h();i.documentProcessing&&(n=i.documentProcessing(n)),s(function(e){for(var t=new ArrayBuffer(e.length),i=new Uint8Array(t),s=0;s!=e.length;++s)i[s]=255&e.charCodeAt(s);return t}(XLSX.write(n,l)),"application/octet-stream")}},html:function(e,t,i){this.modExists("export",!0)&&i(this.modules.export.generateHTMLTable(e),"text/html")}};class te extends M{static moduleName="download";static downloaders=ee;constructor(e){super(e),this.registerTableOption("downloadEncoder",(function(e,t){return new Blob([e],{type:t})})),this.registerTableOption("downloadConfig",{}),this.registerTableOption("downloadRowRange","active"),this.registerColumnOption("download"),this.registerColumnOption("titleDownload")}initialize(){this.deprecatedOptionsCheck(),this.registerTableFunction("download",this.download.bind(this)),this.registerTableFunction("downloadToTab",this.downloadToTab.bind(this))}deprecatedOptionsCheck(){}downloadToTab(e,t,i,s){this.download(e,t,i,s,!0)}download(e,t,i,s,o){var n=!1;if("function"==typeof e?n=e:te.downloaders[e]?n=te.downloaders[e]:console.warn("Download Error - No such download type found: ",e),n){var r=this.generateExportList(s);n.call(this.table,r,i||{},function(i,s){o?!0===o?this.triggerDownload(i,s,e,t,!0):o(i):this.triggerDownload(i,s,e,t)}.bind(this))}}generateExportList(e){var t=this.table.modules.export.generateExportList(this.table.options.downloadConfig,!1,e||this.table.options.downloadRowRange,"download"),i=this.table.options.groupHeaderDownload;return i&&!Array.isArray(i)&&(i=[i]),t.forEach((e=>{var t;"group"===e.type&&(t=e.columns[0],i&&i[e.indent]&&(t.value=i[e.indent](t.value,e.component._group.getRowCount(),e.component._group.getData(),e.component)))})),t}triggerDownload(e,t,i,s,o){var n=document.createElement("a"),r=this.table.options.downloadEncoder(e,t);r&&(o?window.open(window.URL.createObjectURL(r)):(s=s||"Tabulator."+("function"==typeof i?"txt":i),navigator.msSaveOrOpenBlob?navigator.msSaveOrOpenBlob(r,s):(n.setAttribute("href",window.URL.createObjectURL(r)),n.setAttribute("download",s),n.style.display="none",document.body.appendChild(n),n.click(),document.body.removeChild(n))),this.dispatchExternal("downloadComplete"))}commsReceived(e,t,i){if("intercept"===t)this.download(i.type,"",i.options,i.active,i.intercept)}}function ie(e,t){var i=t.mask,s=void 0!==t.maskLetterChar?t.maskLetterChar:"A",o=void 0!==t.maskNumberChar?t.maskNumberChar:"9",n=void 0!==t.maskWildcardChar?t.maskWildcardChar:"*";function r(t){var a=i[t];void 0!==a&&a!==n&&a!==s&&a!==o&&(e.value=e.value+""+a,r(t+1))}e.addEventListener("keydown",(t=>{var r=e.value.length,a=t.key;if(t.keyCode>46&&!t.ctrlKey&&!t.metaKey){if(r>=i.length)return t.preventDefault(),t.stopPropagation(),!1;switch(i[r]){case s:if(a.toUpperCase()==a.toLowerCase())return t.preventDefault(),t.stopPropagation(),!1;break;case o:if(isNaN(a))return t.preventDefault(),t.stopPropagation(),!1;break;case n:break;default:if(a!==i[r])return t.preventDefault(),t.stopPropagation(),!1}}})),e.addEventListener("keyup",(i=>{i.keyCode>46&&t.maskAutoFill&&r(e.value.length)})),e.placeholder||(e.placeholder=i),t.maskAutoFill&&r(e.value.length)}let se=class{constructor(e,t,i,s,o,n){this.edit=e,this.table=e.table,this.cell=t,this.params=this._initializeParams(n),this.data=[],this.displayItems=[],this.currentItems=[],this.focusedItem=null,this.input=this._createInputElement(),this.listEl=this._createListElement(),this.initialValues=null,this.isFilter="header"===t.getType(),this.filterTimeout=null,this.filtered=!1,this.typing=!1,this.values=[],this.popup=null,this.listIteration=0,this.lastAction="",this.filterTerm="",this.blurable=!0,this.actions={success:s,cancel:o},this._deprecatedOptionsCheck(),this._initializeValue(),i(this._onRendered.bind(this))}_deprecatedOptionsCheck(){}_initializeValue(){var e=this.cell.getValue();void 0===e&&void 0!==this.params.defaultValue&&(e=this.params.defaultValue),this.initialValues=this.params.multiselect?e:[e],this.isFilter&&(this.input.value=this.initialValues?this.initialValues.join(","):"",this.headerFilterInitialListGen())}_onRendered(){var e=this.cell.getElement();function t(e){e.stopPropagation()}this.isFilter||(this.input.style.height="100%",this.input.focus({preventScroll:!0})),e.addEventListener("click",t),setTimeout((()=>{e.removeEventListener("click",t)}),1e3),this.input.addEventListener("mousedown",this._preventPopupBlur.bind(this))}_createListElement(){var e=document.createElement("div");return e.classList.add("tabulator-edit-list"),e.addEventListener("mousedown",this._preventBlur.bind(this)),e.addEventListener("keydown",this._inputKeyDown.bind(this)),e}_setListWidth(){var e=this.isFilter?this.input:this.cell.getElement();this.listEl.style.minWidth=e.offsetWidth+"px",this.params.maxWidth&&(!0===this.params.maxWidth?this.listEl.style.maxWidth=e.offsetWidth+"px":"number"==typeof this.params.maxWidth?this.listEl.style.maxWidth=this.params.maxWidth+"px":this.listEl.style.maxWidth=this.params.maxWidth)}_createInputElement(){var e=this.params.elementAttributes,t=document.createElement("input");if(t.setAttribute("type",this.params.clearable?"search":"text"),t.style.padding="4px",t.style.width="100%",t.style.boxSizing="border-box",this.params.autocomplete||(t.style.cursor="default",t.style.caretColor="transparent"),e&&"object"==typeof e)for(let i in e)"+"==i.charAt(0)?(i=i.slice(1),t.setAttribute(i,t.getAttribute(i)+e["+"+i])):t.setAttribute(i,e[i]);return this.params.mask&&ie(t,this.params),this._bindInputEvents(t),t}_initializeParams(e){var t,i=["values","valuesURL","valuesLookup"];return(e=Object.assign({},e)).verticalNavigation=e.verticalNavigation||"editor",e.placeholderLoading=void 0===e.placeholderLoading?"Searching ...":e.placeholderLoading,e.placeholderEmpty=void 0===e.placeholderEmpty?"No Results Found":e.placeholderEmpty,e.filterDelay=void 0===e.filterDelay?300:e.filterDelay,e.emptyValue=Object.keys(e).includes("emptyValue")?e.emptyValue:"",(t=Object.keys(e).filter((e=>i.includes(e))).length)?t>1&&console.warn("list editor config error - only one of the values, valuesURL, or valuesLookup options can be set on the same editor"):console.warn("list editor config error - either the values, valuesURL, or valuesLookup option must be set"),e.autocomplete?e.multiselect&&(e.multiselect=!1,console.warn("list editor config error - multiselect option is not available when autocomplete is enabled")):(e.freetext&&(e.freetext=!1,console.warn("list editor config error - freetext option is only available when autocomplete is enabled")),e.filterFunc&&(e.filterFunc=!1,console.warn("list editor config error - filterFunc option is only available when autocomplete is enabled")),e.filterRemote&&(e.filterRemote=!1,console.warn("list editor config error - filterRemote option is only available when autocomplete is enabled")),e.mask&&(e.mask=!1,console.warn("list editor config error - mask option is only available when autocomplete is enabled")),e.allowEmpty&&(e.allowEmpty=!1,console.warn("list editor config error - allowEmpty option is only available when autocomplete is enabled")),e.listOnEmpty&&(e.listOnEmpty=!1,console.warn("list editor config error - listOnEmpty option is only available when autocomplete is enabled"))),e.filterRemote&&"function"!=typeof e.valuesLookup&&!e.valuesURL&&(e.filterRemote=!1,console.warn("list editor config error - filterRemote option should only be used when values list is populated from a remote source")),e}_bindInputEvents(e){e.addEventListener("focus",this._inputFocus.bind(this)),e.addEventListener("click",this._inputClick.bind(this)),e.addEventListener("blur",this._inputBlur.bind(this)),e.addEventListener("keydown",this._inputKeyDown.bind(this)),e.addEventListener("search",this._inputSearch.bind(this)),this.params.autocomplete&&e.addEventListener("keyup",this._inputKeyUp.bind(this))}_inputFocus(e){this.rebuildOptionsList()}_filter(){this.params.filterRemote?(clearTimeout(this.filterTimeout),this.filterTimeout=setTimeout((()=>{this.rebuildOptionsList()}),this.params.filterDelay)):this._filterList()}_inputClick(e){e.stopPropagation()}_inputBlur(e){this.blurable&&(this.popup?this.popup.hide():this._resolveValue(!0))}_inputSearch(){this._clearChoices()}_inputKeyDown(e){switch(e.keyCode){case 38:this._keyUp(e);break;case 40:this._keyDown(e);break;case 37:case 39:this._keySide(e);break;case 13:this._keyEnter();break;case 27:this._keyEsc();break;case 36:case 35:this._keyHomeEnd(e);break;case 9:this._keyTab(e);break;default:this._keySelectLetter(e)}}_inputKeyUp(e){switch(e.keyCode){case 38:case 37:case 39:case 40:case 13:case 27:break;default:this._keyAutoCompLetter(e)}}_preventPopupBlur(){this.popup&&this.popup.blockHide(),setTimeout((()=>{this.popup&&this.popup.restoreHide()}),10)}_preventBlur(){this.blurable=!1,setTimeout((()=>{this.blurable=!0}),10)}_keyTab(e){this.params.autocomplete&&"typing"===this.lastAction?this._resolveValue(!0):this.focusedItem&&this._chooseItem(this.focusedItem,!0)}_keyUp(e){var t=this.displayItems.indexOf(this.focusedItem);("editor"==this.params.verticalNavigation||"hybrid"==this.params.verticalNavigation&&t)&&(e.stopImmediatePropagation(),e.stopPropagation(),e.preventDefault(),t>0&&this._focusItem(this.displayItems[t-1]))}_keyDown(e){var t=this.displayItems.indexOf(this.focusedItem);("editor"==this.params.verticalNavigation||"hybrid"==this.params.verticalNavigation&&t=38&&e.keyCode<=90&&this._scrollToValue(e.keyCode))}_keyAutoCompLetter(e){this._filter(),this.lastAction="typing",this.typing=!0}_scrollToValue(e){clearTimeout(this.filterTimeout);var t=String.fromCharCode(e).toLowerCase();this.filterTerm+=t.toLowerCase();var i=this.displayItems.find((e=>void 0!==e.label&&e.label.toLowerCase().startsWith(this.filterTerm)));i&&this._focusItem(i),this.filterTimeout=setTimeout((()=>{this.filterTerm=""}),800)}_focusItem(e){this.lastAction="focus",this.focusedItem&&this.focusedItem.element&&this.focusedItem.element.classList.remove("focused"),this.focusedItem=e,e&&e.element&&(e.element.classList.add("focused"),e.element.scrollIntoView({behavior:"smooth",block:"nearest",inline:"start"}))}headerFilterInitialListGen(){this._generateOptions(!0)}rebuildOptionsList(){this._generateOptions().then(this._sortOptions.bind(this)).then(this._buildList.bind(this)).then(this._showList.bind(this)).catch((e=>{Number.isInteger(e)||console.error("List generation error",e)}))}_filterList(){this._buildList(this._filterOptions()),this._showList()}_generateOptions(e){var t=[],i=++this.listIteration;return this.filtered=!1,this.params.values?t=this.params.values:this.params.valuesURL?t=this._ajaxRequest(this.params.valuesURL,this.input.value):"function"==typeof this.params.valuesLookup?t=this.params.valuesLookup(this.cell,this.input.value):this.params.valuesLookup&&(t=this._uniqueColumnValues(this.params.valuesLookupField)),t instanceof Promise?(e||this._addPlaceholder(this.params.placeholderLoading),t.then().then((e=>this.listIteration===i?this._parseList(e):Promise.reject(i)))):Promise.resolve(this._parseList(t))}_addPlaceholder(e){var t=document.createElement("div");"function"==typeof e&&(e=e(this.cell.getComponent(),this.listEl)),e&&(this._clearList(),e instanceof HTMLElement?t=e:(t.classList.add("tabulator-edit-list-placeholder"),t.innerHTML=e),this.listEl.appendChild(t),this._showList())}_ajaxRequest(e,t){return e=W(e,{},this.params.filterRemote?{term:t}:{}),fetch(e).then((e=>e.ok?e.json().catch((e=>(console.warn("List Ajax Load Error - Invalid JSON returned",e),Promise.reject(e)))):(console.error("List Ajax Load Error - Connection Error: "+e.status,e.statusText),Promise.reject(e)))).catch((e=>(console.error("List Ajax Load Error - Connection Error: ",e),Promise.reject(e))))}_uniqueColumnValues(e){var t,i={},s=this.table.getData(this.params.valuesLookup);return(t=e?this.table.columnManager.getColumnByField(e):this.cell.getColumn()._getSelf())?s.forEach((e=>{var s=t.getFieldValue(e);null!=s&&""!==s&&(i[s]=!0)})):(console.warn("unable to find matching column to create select lookup list:",e),i=[]),Object.keys(i)}_parseList(e){var t=[];return Array.isArray(e)||(e=Object.entries(e).map((([e,t])=>({label:t,value:e})))),e.forEach((e=>{"object"!=typeof e&&(e={label:e,value:e}),this._parseListItem(e,t,0)})),!this.currentItems.length&&this.params.freetext&&(this.input.value=this.initialValues,this.typing=!0,this.lastAction="typing"),this.data=t,t}_parseListItem(e,t,i){var s={};e.options?s=this._parseListGroup(e,i+1):(s={label:e.label,value:e.value,itemParams:e.itemParams,elementAttributes:e.elementAttributes,element:!1,selected:!1,visible:!0,level:i,original:e},this.initialValues&&this.initialValues.indexOf(e.value)>-1&&this._chooseItem(s,!0)),t.push(s)}_parseListGroup(e,t){var i={label:e.label,group:!0,itemParams:e.itemParams,elementAttributes:e.elementAttributes,element:!1,visible:!0,level:t,options:[],original:e};return e.options.forEach((e=>{this._parseListItem(e,i.options,t)})),i}_sortOptions(e){var t;return this.params.sort&&(t="function"==typeof this.params.sort?this.params.sort:this._defaultSortFunction.bind(this),this._sortGroup(t,e)),e}_sortGroup(e,t){t.sort(((t,i)=>e(t.label,i.label,t.value,i.value,t.original,i.original))),t.forEach((t=>{t.group&&this._sortGroup(e,t.options)}))}_defaultSortFunction(e,t){var i,s,o,n,r,a=0,l=/(\d+)|(\D+)/g,h=/\d/,d=0;if("desc"===this.params.sort&&([e,t]=[t,e]),e||0===e){if(t||0===t){if(isFinite(e)&&isFinite(t))return e-t;if((i=String(e).toLowerCase())===(s=String(t).toLowerCase()))return 0;if(!h.test(i)||!h.test(s))return i>s?1:-1;for(i=i.match(l),s=s.match(l),r=i.length>s.length?s.length:i.length;an?1:-1;return i.length>s.length}d=1}else d=t||0===t?-1:0;return d}_filterOptions(){var e=this.params.filterFunc||this._defaultFilterFunc,t=this.input.value;return t?(this.filtered=!0,this.data.forEach((i=>{this._filterItem(e,t,i)}))):this.filtered=!1,this.data}_filterItem(e,t,i){var s=!1;return i.group?(i.options.forEach((i=>{this._filterItem(e,t,i)&&(s=!0)})),i.visible=s):i.visible=e(t,i.label,i.value,i.original),i.visible}_defaultFilterFunc(e,t,i,s){return e=String(e).toLowerCase(),null!=t&&(String(t).toLowerCase().indexOf(e)>-1||String(i).toLowerCase().indexOf(e)>-1)}_clearList(){for(;this.listEl.firstChild;)this.listEl.removeChild(this.listEl.firstChild);this.displayItems=[]}_buildList(e){this._clearList(),e.forEach((e=>{this._buildItem(e)})),this.displayItems.length||this._addPlaceholder(this.params.placeholderEmpty)}_buildItem(e){var t,i=e.element;if(!this.filtered||e.visible){if(!i){if((i=document.createElement("div")).tabIndex=0,(t=this.params.itemFormatter?this.params.itemFormatter(e.label,e.value,e.original,i):e.label)instanceof HTMLElement?i.appendChild(t):i.innerHTML=t,e.group?i.classList.add("tabulator-edit-list-group"):i.classList.add("tabulator-edit-list-item"),i.classList.add("tabulator-edit-list-group-level-"+e.level),e.elementAttributes&&"object"==typeof e.elementAttributes)for(let t in e.elementAttributes)"+"==t.charAt(0)?(t=t.slice(1),i.setAttribute(t,this.input.getAttribute(t)+e.elementAttributes["+"+t])):i.setAttribute(t,e.elementAttributes[t]);e.group?i.addEventListener("click",this._groupClick.bind(this,e)):i.addEventListener("click",this._itemClick.bind(this,e)),i.addEventListener("mousedown",this._preventBlur.bind(this)),e.element=i}this._styleItem(e),this.listEl.appendChild(i),e.group?e.options.forEach((e=>{this._buildItem(e)})):this.displayItems.push(e)}}_showList(){var e=this.popup&&this.popup.isVisible();if(this.input.parentNode){if(this.params.autocomplete&&""===this.input.value&&!this.params.listOnEmpty)return void(this.popup&&this.popup.hide(!0));this._setListWidth(),this.popup||(this.popup=this.edit.popup(this.listEl)),this.popup.show(this.cell.getElement(),"bottom"),e||setTimeout((()=>{this.popup.hideOnBlur(this._resolveValue.bind(this,!0))}),10)}}_styleItem(e){e&&e.element&&(e.selected?e.element.classList.add("active"):e.element.classList.remove("active"))}_itemClick(e,t){t.stopPropagation(),this._chooseItem(e)}_groupClick(e,t){t.stopPropagation()}_cancel(){this.popup.hide(!0),this.actions.cancel()}_clearChoices(){this.typing=!0,this.currentItems.forEach((e=>{e.selected=!1,this._styleItem(e)})),this.currentItems=[],this.focusedItem=null}_chooseItem(e,t){var i;this.typing=!1,this.params.multiselect?((i=this.currentItems.indexOf(e))>-1?(this.currentItems.splice(i,1),e.selected=!1):(this.currentItems.push(e),e.selected=!0),this.input.value=this.currentItems.map((e=>e.label)).join(","),this._styleItem(e)):(this.currentItems=[e],e.selected=!0,this.input.value=e.label,this._styleItem(e),t||this._resolveValue()),this._focusItem(e)}_resolveValue(e){var t,i;if(this.popup&&this.popup.hide(!0),this.params.multiselect)t=this.currentItems.map((e=>e.value));else if(e&&this.params.autocomplete&&this.typing){if(!(this.params.freetext||this.params.allowEmpty&&""===this.input.value))return void this.actions.cancel();t=this.input.value}else t=this.currentItems[0]?this.currentItems[0].value:null==(i=Array.isArray(this.initialValues)?this.initialValues[0]:this.initialValues)||""===i?i:this.params.emptyValue;""===t&&(t=this.params.emptyValue),this.actions.success(t),this.isFilter&&(this.initialValues=t&&!Array.isArray(t)?[t]:t,this.currentItems=[])}};var oe={input:function(e,t,i,s,o){var n=e.getValue(),r=document.createElement("input");if(r.setAttribute("type",o.search?"search":"text"),r.style.padding="4px",r.style.width="100%",r.style.boxSizing="border-box",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),r.setAttribute(e,r.getAttribute(e)+o.elementAttributes["+"+e])):r.setAttribute(e,o.elementAttributes[e]);function a(e){null==n&&""!==r.value||r.value!==n?i(r.value)&&(n=r.value):s()}return r.value=void 0!==n?n:"",t((function(){"cell"===e.getType()&&(r.focus({preventScroll:!0}),r.style.height="100%",o.selectContents&&r.select())})),r.addEventListener("change",a),r.addEventListener("blur",a),r.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:a();break;case 27:s();break;case 35:case 36:e.stopPropagation()}})),o.mask&&ie(r,o),r},textarea:function(e,t,i,s,o){var n=e.getValue(),r=o.verticalNavigation||"hybrid",a=String(null!=n?n:""),l=document.createElement("textarea"),h=0;if(l.style.display="block",l.style.padding="2px",l.style.height="100%",l.style.width="100%",l.style.boxSizing="border-box",l.style.whiteSpace="pre-wrap",l.style.resize="none",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),l.setAttribute(e,l.getAttribute(e)+o.elementAttributes["+"+e])):l.setAttribute(e,o.elementAttributes[e]);function d(t){null==n&&""!==l.value||l.value!==n?(i(l.value)&&(n=l.value),setTimeout((function(){e.getRow().normalizeHeight()}),300)):s()}return l.value=a,t((function(){"cell"===e.getType()&&(l.focus({preventScroll:!0}),l.style.height="100%",l.scrollHeight,l.style.height=l.scrollHeight+"px",e.getRow().normalizeHeight(),o.selectContents&&l.select())})),l.addEventListener("change",d),l.addEventListener("blur",d),l.addEventListener("keyup",(function(){l.style.height="";var t=l.scrollHeight;l.style.height=t+"px",t!=h&&(h=t,e.getRow().normalizeHeight())})),l.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:e.shiftKey&&o.shiftEnterSubmit&&d();break;case 27:s();break;case 38:("editor"==r||"hybrid"==r&&l.selectionStart)&&(e.stopImmediatePropagation(),e.stopPropagation());break;case 40:("editor"==r||"hybrid"==r&&l.selectionStart!==l.value.length)&&(e.stopImmediatePropagation(),e.stopPropagation());break;case 35:case 36:e.stopPropagation()}})),o.mask&&ie(l,o),l},number:function(e,t,i,s,o){var n=e.getValue(),r=o.verticalNavigation||"editor",a=document.createElement("input");if(a.setAttribute("type","number"),void 0!==o.max&&a.setAttribute("max",o.max),void 0!==o.min&&a.setAttribute("min",o.min),void 0!==o.step&&a.setAttribute("step",o.step),a.style.padding="4px",a.style.width="100%",a.style.boxSizing="border-box",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),a.setAttribute(e,a.getAttribute(e)+o.elementAttributes["+"+e])):a.setAttribute(e,o.elementAttributes[e]);a.value=n;var l=function(e){h()};function h(){var e=a.value;isNaN(e)||""===e||(e=Number(e)),e!==n?i(e)&&(n=e):s()}return t((function(){"cell"===e.getType()&&(a.removeEventListener("blur",l),a.focus({preventScroll:!0}),a.style.height="100%",a.addEventListener("blur",l),o.selectContents&&a.select())})),a.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:h();break;case 27:s();break;case 38:case 40:"editor"==r&&(e.stopImmediatePropagation(),e.stopPropagation());break;case 35:case 36:e.stopPropagation()}})),o.mask&&ie(a,o),a},range:function(e,t,i,s,o){var n=e.getValue(),r=document.createElement("input");if(r.setAttribute("type","range"),void 0!==o.max&&r.setAttribute("max",o.max),void 0!==o.min&&r.setAttribute("min",o.min),void 0!==o.step&&r.setAttribute("step",o.step),r.style.padding="4px",r.style.width="100%",r.style.boxSizing="border-box",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),r.setAttribute(e,r.getAttribute(e)+o.elementAttributes["+"+e])):r.setAttribute(e,o.elementAttributes[e]);function a(){var e=r.value;isNaN(e)||""===e||(e=Number(e)),e!=n?i(e)&&(n=e):s()}return r.value=n,t((function(){"cell"===e.getType()&&(r.focus({preventScroll:!0}),r.style.height="100%")})),r.addEventListener("blur",(function(e){a()})),r.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:a();break;case 27:s()}})),r},date:function(e,t,i,s,o){var n=o.format,r=o.verticalNavigation||"editor",a=n?window.DateTime||luxon.DateTime:null,l=e.getValue(),h=document.createElement("input");function d(e){return(a.isDateTime(e)?e:"iso"===n?a.fromISO(String(e)):a.fromFormat(String(e),n)).toFormat("yyyy-MM-dd")}if(h.type="date",h.style.padding="4px",h.style.width="100%",h.style.boxSizing="border-box",o.max&&h.setAttribute("max",n?d(o.max):o.max),o.min&&h.setAttribute("min",n?d(o.min):o.min),o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),h.setAttribute(e,h.getAttribute(e)+o.elementAttributes["+"+e])):h.setAttribute(e,o.elementAttributes[e]);function c(){var e,t=h.value;if(null==l&&""!==t||t!==l){if(t&&n)switch(e=a.fromFormat(String(t),"yyyy-MM-dd"),n){case!0:t=e;break;case"iso":t=e.toISO();break;default:t=e.toFormat(n)}i(t)&&(l=h.value)}else s()}return l=void 0!==l?l:"",n&&(a?l=d(l):console.error("Editor Error - 'date' editor 'format' param is dependant on luxon.js")),h.value=l,t((function(){"cell"===e.getType()&&(h.focus({preventScroll:!0}),h.style.height="100%",o.selectContents&&h.select())})),h.addEventListener("blur",(function(e){(e.relatedTarget||e.rangeParent||e.explicitOriginalTarget!==h)&&c()})),h.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:c();break;case 27:s();break;case 35:case 36:e.stopPropagation();break;case 38:case 40:"editor"==r&&(e.stopImmediatePropagation(),e.stopPropagation())}})),h},time:function(e,t,i,s,o){var n,r=o.format,a=o.verticalNavigation||"editor",l=r?window.DateTime||luxon.DateTime:null,h=e.getValue(),d=document.createElement("input");if(d.type="time",d.style.padding="4px",d.style.width="100%",d.style.boxSizing="border-box",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),d.setAttribute(e,d.getAttribute(e)+o.elementAttributes["+"+e])):d.setAttribute(e,o.elementAttributes[e]);function c(){var e,t=d.value;if(null==h&&""!==t||t!==h){if(t&&r)switch(e=l.fromFormat(String(t),"hh:mm"),r){case!0:t=e;break;case"iso":t=e.toISO();break;default:t=e.toFormat(r)}i(t)&&(h=d.value)}else s()}return h=void 0!==h?h:"",r&&(l?(n=l.isDateTime(h)?h:"iso"===r?l.fromISO(String(h)):l.fromFormat(String(h),r),h=n.toFormat("HH:mm")):console.error("Editor Error - 'date' editor 'format' param is dependant on luxon.js")),d.value=h,t((function(){"cell"==e.getType()&&(d.focus({preventScroll:!0}),d.style.height="100%",o.selectContents&&d.select())})),d.addEventListener("blur",(function(e){(e.relatedTarget||e.rangeParent||e.explicitOriginalTarget!==d)&&c()})),d.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:c();break;case 27:s();break;case 35:case 36:e.stopPropagation();break;case 38:case 40:"editor"==a&&(e.stopImmediatePropagation(),e.stopPropagation())}})),d},datetime:function(e,t,i,s,o){var n,r=o.format,a=o.verticalNavigation||"editor",l=r?window.DateTime||luxon.DateTime:null,h=e.getValue(),d=document.createElement("input");if(d.type="datetime-local",d.style.padding="4px",d.style.width="100%",d.style.boxSizing="border-box",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),d.setAttribute(e,d.getAttribute(e)+o.elementAttributes["+"+e])):d.setAttribute(e,o.elementAttributes[e]);function c(){var e,t=d.value;if(null==h&&""!==t||t!==h){if(t&&r)switch(e=l.fromISO(String(t)),r){case!0:t=e;break;case"iso":t=e.toISO();break;default:t=e.toFormat(r)}i(t)&&(h=d.value)}else s()}return h=void 0!==h?h:"",r&&(l?(n=l.isDateTime(h)?h:"iso"===r?l.fromISO(String(h)):l.fromFormat(String(h),r),h=n.toFormat("yyyy-MM-dd")+"T"+n.toFormat("HH:mm")):console.error("Editor Error - 'date' editor 'format' param is dependant on luxon.js")),d.value=h,t((function(){"cell"===e.getType()&&(d.focus({preventScroll:!0}),d.style.height="100%",o.selectContents&&d.select())})),d.addEventListener("blur",(function(e){(e.relatedTarget||e.rangeParent||e.explicitOriginalTarget!==d)&&c()})),d.addEventListener("keydown",(function(e){switch(e.keyCode){case 13:c();break;case 27:s();break;case 35:case 36:e.stopPropagation();break;case 38:case 40:"editor"==a&&(e.stopImmediatePropagation(),e.stopPropagation())}})),d},list:function(e,t,i,s,o){return new se(this,e,t,i,s,o).input},star:function(e,t,i,s,o){var n=this,r=e.getElement(),a=e.getValue(),l=r.getElementsByTagName("svg").length||5,h=r.getElementsByTagName("svg")[0]?r.getElementsByTagName("svg")[0].getAttribute("width"):14,d=[],c=document.createElement("div"),u=document.createElementNS("http://www.w3.org/2000/svg","svg");function m(e){d.forEach((function(t,i){i'):("ie"==n.table.browser?t.setAttribute("class","tabulator-star-inactive"):t.classList.replace("tabulator-star-active","tabulator-star-inactive"),t.innerHTML='')}))}function p(e){var t=document.createElement("span"),s=u.cloneNode(!0);d.push(s),t.addEventListener("mouseenter",(function(t){t.stopPropagation(),t.stopImmediatePropagation(),m(e)})),t.addEventListener("mousemove",(function(e){e.stopPropagation(),e.stopImmediatePropagation()})),t.addEventListener("click",(function(t){t.stopPropagation(),t.stopImmediatePropagation(),i(e),r.blur()})),t.appendChild(s),c.appendChild(t)}function g(e){a=e,m(e)}if(r.style.whiteSpace="nowrap",r.style.overflow="hidden",r.style.textOverflow="ellipsis",c.style.verticalAlign="middle",c.style.display="inline-block",c.style.padding="4px",u.setAttribute("width",h),u.setAttribute("height",h),u.setAttribute("viewBox","0 0 512 512"),u.setAttribute("xml:space","preserve"),u.style.padding="0 1px",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),c.setAttribute(e,c.getAttribute(e)+o.elementAttributes["+"+e])):c.setAttribute(e,o.elementAttributes[e]);for(var b=1;b<=l;b++)p(b);return m(a=Math.min(parseInt(a),l)),c.addEventListener("mousemove",(function(e){m(0)})),c.addEventListener("click",(function(e){i(0)})),r.addEventListener("blur",(function(e){s()})),r.addEventListener("keydown",(function(e){switch(e.keyCode){case 39:g(a+1);break;case 37:g(a-1);break;case 13:i(a);break;case 27:s()}})),c},progress:function(e,t,i,s,o){var n,r,a=e.getElement(),l=void 0===o.max?a.getElementsByTagName("div")[0]&&a.getElementsByTagName("div")[0].getAttribute("max")||100:o.max,h=void 0===o.min?a.getElementsByTagName("div")[0]&&a.getElementsByTagName("div")[0].getAttribute("min")||0:o.min,d=(l-h)/100,c=e.getValue()||0,u=document.createElement("div"),m=document.createElement("div");function p(){var e=window.getComputedStyle(a,null),t=d*Math.round(m.offsetWidth/((a.clientWidth-parseInt(e.getPropertyValue("padding-left"))-parseInt(e.getPropertyValue("padding-right")))/100))+h;i(t),a.setAttribute("aria-valuenow",t),a.setAttribute("aria-label",c)}if(u.style.position="absolute",u.style.right="0",u.style.top="0",u.style.bottom="0",u.style.width="5px",u.classList.add("tabulator-progress-handle"),m.style.display="inline-block",m.style.position="relative",m.style.height="100%",m.style.backgroundColor="#488CE9",m.style.maxWidth="100%",m.style.minWidth="0%",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),m.setAttribute(e,m.getAttribute(e)+o.elementAttributes["+"+e])):m.setAttribute(e,o.elementAttributes[e]);return a.style.padding="4px 4px",c=Math.min(parseFloat(c),l),c=Math.max(parseFloat(c),h),c=Math.round((c-h)/d),m.style.width=c+"%",a.setAttribute("aria-valuemin",h),a.setAttribute("aria-valuemax",l),m.appendChild(u),u.addEventListener("mousedown",(function(e){n=e.screenX,r=m.offsetWidth})),u.addEventListener("mouseover",(function(){u.style.cursor="ew-resize"})),a.addEventListener("mousemove",(function(e){n&&(m.style.width=r+e.screenX-n+"px")})),a.addEventListener("mouseup",(function(e){n&&(e.stopPropagation(),e.stopImmediatePropagation(),n=!1,r=!1,p())})),a.addEventListener("keydown",(function(e){switch(e.keyCode){case 39:e.preventDefault(),m.style.width=m.clientWidth+a.clientWidth/100+"px";break;case 37:e.preventDefault(),m.style.width=m.clientWidth-a.clientWidth/100+"px";break;case 9:case 13:p();break;case 27:s()}})),a.addEventListener("blur",(function(){s()})),m},tickCross:function(e,t,i,s,o){var n=e.getValue(),r=document.createElement("input"),a=o.tristate,l=void 0===o.indeterminateValue?null:o.indeterminateValue,h=!1,d=Object.keys(o).includes("trueValue"),c=Object.keys(o).includes("falseValue");if(r.setAttribute("type","checkbox"),r.style.marginTop="5px",r.style.boxSizing="border-box",o.elementAttributes&&"object"==typeof o.elementAttributes)for(let e in o.elementAttributes)"+"==e.charAt(0)?(e=e.slice(1),r.setAttribute(e,r.getAttribute(e)+o.elementAttributes["+"+e])):r.setAttribute(e,o.elementAttributes[e]);function u(e){var t=r.checked;return d&&t?t=o.trueValue:c&&!t&&(t=o.falseValue),a?e?h?l:t:r.checked&&!h?(r.checked=!1,r.indeterminate=!0,h=!0,l):(h=!1,t):t}return r.value=n,!a||void 0!==n&&n!==l&&""!==n||(h=!0,r.indeterminate=!0),"firefox"!=this.table.browser&&"safari"!=this.table.browser&&t((function(){"cell"===e.getType()&&r.focus({preventScroll:!0})})),r.checked=d?n===o.trueValue:!0===n||"true"===n||"True"===n||1===n,r.addEventListener("change",(function(e){i(u())})),r.addEventListener("blur",(function(e){i(u(!0))})),r.addEventListener("keydown",(function(e){13==e.keyCode&&i(u()),27==e.keyCode&&s()})),r}};class ne extends M{static moduleName="edit";static editors=oe;constructor(e){super(e),this.currentCell=!1,this.mouseClick=!1,this.recursionBlock=!1,this.invalidEdit=!1,this.editedCells=[],this.convertEmptyValues=!1,this.editors=ne.editors,this.registerTableOption("editTriggerEvent","focus"),this.registerTableOption("editorEmptyValue"),this.registerTableOption("editorEmptyValueFunc",this.emptyValueCheck.bind(this)),this.registerColumnOption("editable"),this.registerColumnOption("editor"),this.registerColumnOption("editorParams"),this.registerColumnOption("editorEmptyValue"),this.registerColumnOption("editorEmptyValueFunc"),this.registerColumnOption("cellEditing"),this.registerColumnOption("cellEdited"),this.registerColumnOption("cellEditCancelled"),this.registerTableFunction("getEditedCells",this.getEditedCells.bind(this)),this.registerTableFunction("clearCellEdited",this.clearCellEdited.bind(this)),this.registerTableFunction("navigatePrev",this.navigatePrev.bind(this)),this.registerTableFunction("navigateNext",this.navigateNext.bind(this)),this.registerTableFunction("navigateLeft",this.navigateLeft.bind(this)),this.registerTableFunction("navigateRight",this.navigateRight.bind(this)),this.registerTableFunction("navigateUp",this.navigateUp.bind(this)),this.registerTableFunction("navigateDown",this.navigateDown.bind(this)),this.registerComponentFunction("cell","isEdited",this.cellIsEdited.bind(this)),this.registerComponentFunction("cell","clearEdited",this.clearEdited.bind(this)),this.registerComponentFunction("cell","edit",this.editCell.bind(this)),this.registerComponentFunction("cell","cancelEdit",this.cellCancelEdit.bind(this)),this.registerComponentFunction("cell","navigatePrev",this.navigatePrev.bind(this)),this.registerComponentFunction("cell","navigateNext",this.navigateNext.bind(this)),this.registerComponentFunction("cell","navigateLeft",this.navigateLeft.bind(this)),this.registerComponentFunction("cell","navigateRight",this.navigateRight.bind(this)),this.registerComponentFunction("cell","navigateUp",this.navigateUp.bind(this)),this.registerComponentFunction("cell","navigateDown",this.navigateDown.bind(this))}initialize(){this.subscribe("cell-init",this.bindEditor.bind(this)),this.subscribe("cell-delete",this.clearEdited.bind(this)),this.subscribe("cell-value-changed",this.updateCellClass.bind(this)),this.subscribe("column-layout",this.initializeColumnCheck.bind(this)),this.subscribe("column-delete",this.columnDeleteCheck.bind(this)),this.subscribe("row-deleting",this.rowDeleteCheck.bind(this)),this.subscribe("row-layout",this.rowEditableCheck.bind(this)),this.subscribe("data-refreshing",this.cancelEdit.bind(this)),this.subscribe("clipboard-paste",this.pasteBlocker.bind(this)),this.subscribe("keybinding-nav-prev",this.navigatePrev.bind(this,void 0)),this.subscribe("keybinding-nav-next",this.keybindingNavigateNext.bind(this)),this.subscribe("keybinding-nav-up",this.navigateUp.bind(this,void 0)),this.subscribe("keybinding-nav-down",this.navigateDown.bind(this,void 0)),Object.keys(this.table.options).includes("editorEmptyValue")&&(this.convertEmptyValues=!0)}pasteBlocker(e){if(this.currentCell)return!0}keybindingNavigateNext(e){var t=this.currentCell,i=this.options("tabEndNewRow");t&&(this.navigateNext(t,e)||i&&(t.getElement().firstChild.blur(),this.invalidEdit||(i=!0===i?this.table.addRow({}):"function"==typeof i?this.table.addRow(i(t.row.getComponent())):this.table.addRow(Object.assign({},i))).then((()=>{setTimeout((()=>{t.getComponent().navigateNext()}))}))))}cellIsEdited(e){return!!e.modules.edit&&e.modules.edit.edited}cellCancelEdit(e){e===this.currentCell?this.table.modules.edit.cancelEdit():console.warn("Cancel Editor Error - This cell is not currently being edited ")}updateCellClass(e){this.allowEdit(e)?e.getElement().classList.add("tabulator-editable"):e.getElement().classList.remove("tabulator-editable")}clearCellEdited(e){e||(e=this.table.modules.edit.getEditedCells()),Array.isArray(e)||(e=[e]),e.forEach((e=>{this.table.modules.edit.clearEdited(e._getSelf())}))}navigatePrev(e=this.currentCell,t){var i,s;if(e){if(t&&t.preventDefault(),i=this.navigateLeft())return!0;if((s=this.table.rowManager.prevDisplayRow(e.row,!0))&&(i=this.findPrevEditableCell(s,s.cells.length)))return i.getComponent().edit(),!0}return!1}navigateNext(e=this.currentCell,t){var i,s;if(e){if(t&&t.preventDefault(),i=this.navigateRight())return!0;if((s=this.table.rowManager.nextDisplayRow(e.row,!0))&&(i=this.findNextEditableCell(s,-1)))return i.getComponent().edit(),!0}return!1}navigateLeft(e=this.currentCell,t){var i,s;return!!(e&&(t&&t.preventDefault(),i=e.getIndex(),s=this.findPrevEditableCell(e.row,i)))&&(s.getComponent().edit(),!0)}navigateRight(e=this.currentCell,t){var i,s;return!!(e&&(t&&t.preventDefault(),i=e.getIndex(),s=this.findNextEditableCell(e.row,i)))&&(s.getComponent().edit(),!0)}navigateUp(e=this.currentCell,t){var i,s;return!!(e&&(t&&t.preventDefault(),i=e.getIndex(),s=this.table.rowManager.prevDisplayRow(e.row,!0)))&&(s.cells[i].getComponent().edit(),!0)}navigateDown(e=this.currentCell,t){var i,s;return!!(e&&(t&&t.preventDefault(),i=e.getIndex(),s=this.table.rowManager.nextDisplayRow(e.row,!0)))&&(s.cells[i].getComponent().edit(),!0)}findNextEditableCell(e,t){var i=!1;if(t0)for(var s=t-1;s>=0;s--){let t=e.cells[s];if(t.column.modules.edit&&a.elVisible(t.getElement())){if(this.allowEdit(t)){i=t;break}}}return i}initializeColumnCheck(e){void 0!==e.definition.editor&&this.initializeColumn(e)}columnDeleteCheck(e){this.currentCell&&this.currentCell.column===e&&this.cancelEdit()}rowDeleteCheck(e){this.currentCell&&this.currentCell.row===e&&this.cancelEdit()}rowEditableCheck(e){e.getCells().forEach((e=>{e.column.modules.edit&&"function"==typeof e.column.modules.edit.check&&this.updateCellClass(e)}))}initializeColumn(e){var t=Object.keys(e.definition).includes("editorEmptyValue"),i={editor:!1,blocked:!1,check:e.definition.editable,params:e.definition.editorParams||{},convertEmptyValues:t,editorEmptyValue:e.definition.editorEmptyValue,editorEmptyValueFunc:e.definition.editorEmptyValueFunc};switch(typeof e.definition.editor){case"string":this.editors[e.definition.editor]?i.editor=this.editors[e.definition.editor]:console.warn("Editor Error - No such editor found: ",e.definition.editor);break;case"function":i.editor=e.definition.editor;break;case"boolean":!0===e.definition.editor&&("function"!=typeof e.definition.formatter?this.editors[e.definition.formatter]?i.editor=this.editors[e.definition.formatter]:i.editor=this.editors.input:console.warn("Editor Error - Cannot auto lookup editor for a custom formatter: ",e.definition.formatter))}i.editor&&(e.modules.edit=i)}getCurrentCell(){return!!this.currentCell&&this.currentCell.getComponent()}clearEditor(e){var t,i=this.currentCell;if(this.invalidEdit=!1,i){for(this.currentCell=!1,t=i.getElement(),this.dispatch("edit-editor-clear",i,e),t.classList.remove("tabulator-editing");t.firstChild;)t.removeChild(t.firstChild);i.row.getElement().classList.remove("tabulator-editing"),i.table.element.classList.remove("tabulator-editing")}}cancelEdit(){if(this.currentCell){var e=this.currentCell,t=this.currentCell.getComponent();this.clearEditor(!0),e.setValueActual(e.getValue()),e.cellRendered(),("textarea"==e.column.definition.editor||e.column.definition.variableHeight)&&e.row.normalizeHeight(!0),e.column.definition.cellEditCancelled&&e.column.definition.cellEditCancelled.call(this.table,t),this.dispatch("edit-cancelled",e),this.dispatchExternal("cellEditCancelled",t)}}bindEditor(e){if(e.column.modules.edit){var t=this,i=e.getElement(!0);this.updateCellClass(e),i.setAttribute("tabindex",0),i.addEventListener("mousedown",(function(e){2===e.button?e.preventDefault():t.mouseClick=!0})),"dblclick"===this.options("editTriggerEvent")&&i.addEventListener("dblclick",(function(s){i.classList.contains("tabulator-editing")||(i.focus({preventScroll:!0}),t.edit(e,s,!1))})),"focus"!==this.options("editTriggerEvent")&&"click"!==this.options("editTriggerEvent")||i.addEventListener("click",(function(s){i.classList.contains("tabulator-editing")||(i.focus({preventScroll:!0}),t.edit(e,s,!1))})),"focus"===this.options("editTriggerEvent")&&i.addEventListener("focus",(function(i){t.recursionBlock||t.edit(e,i,!1)}))}}focusCellNoEvent(e,t){this.recursionBlock=!0,t&&"ie"===this.table.browser||e.getElement().focus({preventScroll:!0}),this.recursionBlock=!1}editCell(e,t){this.focusCellNoEvent(e),this.edit(e,!1,t)}focusScrollAdjust(e){if("virtual"==this.table.rowManager.getRenderMode()){var t=this.table.rowManager.element.scrollTop,i=this.table.rowManager.element.clientHeight+this.table.rowManager.element.scrollTop,s=e.row.getElement();s.offsetTopi&&(this.table.rowManager.element.scrollTop+=s.offsetTop+s.offsetHeight-i);var o=this.table.rowManager.element.scrollLeft,n=this.table.rowManager.element.clientWidth+this.table.rowManager.element.scrollLeft,r=e.getElement();this.table.modExists("frozenColumns")&&(o+=parseInt(this.table.modules.frozenColumns.leftMargin||0),n-=parseInt(this.table.modules.frozenColumns.rightMargin||0)),"virtual"===this.table.options.renderHorizontal&&(o-=parseInt(this.table.columnManager.renderer.vDomPadLeft),n-=parseInt(this.table.columnManager.renderer.vDomPadLeft)),r.offsetLeftn&&(this.table.rowManager.element.scrollLeft+=r.offsetLeft+r.offsetWidth-n)}}allowEdit(e){var t=!!e.column.modules.edit;if(e.column.modules.edit)switch(typeof e.column.modules.edit.check){case"function":e.row.initialized&&(t=e.column.modules.edit.check(e.getComponent()));break;case"string":t=!!e.row.data[e.column.modules.edit.check];break;case"boolean":t=e.column.modules.edit.check}return t}edit(e,t,i){var s,o,n,r=this,a=function(){},l=e.getElement(),h=!1;if(!this.currentCell){if(e.column.modules.edit.blocked)return this.mouseClick=!1,this.blur(l),!1;if(t&&t.stopPropagation(),this.allowEdit(e)||i){if(r.cancelEdit(),r.currentCell=e,this.focusScrollAdjust(e),o=e.getComponent(),this.mouseClick&&(this.mouseClick=!1,e.column.definition.cellClick&&e.column.definition.cellClick.call(this.table,t,o)),e.column.definition.cellEditing&&e.column.definition.cellEditing.call(this.table,o),this.dispatch("cell-editing",e),this.dispatchExternal("cellEditing",o),n="function"==typeof e.column.modules.edit.params?e.column.modules.edit.params(o):e.column.modules.edit.params,s=e.column.modules.edit.editor.call(r,o,(function(e){a=e}),(function(t){if(r.currentCell===e&&!h){var i=r.chain("edit-success",[e,t],!0,!0);return!0===i||"highlight"===r.table.options.validationMode?(h=!0,r.clearEditor(),e.modules.edit||(e.modules.edit={}),e.modules.edit.edited=!0,-1==r.editedCells.indexOf(e)&&r.editedCells.push(e),t=r.transformEmptyValues(t,e),e.setValue(t,!0),!0===i):(h=!0,r.invalidEdit=!0,r.focusCellNoEvent(e,!0),a(),setTimeout((()=>{h=!1}),10),!1)}}),(function(){r.currentCell!==e||h||r.cancelEdit()}),n),!this.currentCell||!1===s)return this.blur(l),!1;if(!(s instanceof Node))return console.warn("Edit Error - Editor should return an instance of Node, the editor returned:",s),this.blur(l),!1;for(l.classList.add("tabulator-editing"),e.row.getElement().classList.add("tabulator-editing"),e.table.element.classList.add("tabulator-editing");l.firstChild;)l.removeChild(l.firstChild);l.appendChild(s),a();for(var d=l.children,c=0;c{e.push(t.getComponent())})),e}clearEdited(e){var t;e.modules.edit&&e.modules.edit.edited&&(e.modules.edit.edited=!1,this.dispatch("edit-edited-clear",e)),(t=this.editedCells.indexOf(e))>-1&&this.editedCells.splice(t,1)}}class re{constructor(e,t,i,s){this.type=e,this.columns=t,this.component=i||!1,this.indent=s||0}}class ae{constructor(e,t,i,s,o){this.value=e,this.component=t||!1,this.width=i,this.height=s,this.depth=o}}var le={},he={visible:function(){return this.rowManager.getVisibleRows(!1,!0)},all:function(){return this.rowManager.rows},selected:function(){return this.modules.selectRow.selectedRows},active:function(){return this.options.pagination?this.rowManager.getDisplayRows(this.rowManager.displayRows.length-2):this.rowManager.getDisplayRows()}};class de extends M{static moduleName="export";static columnLookups=le;static rowLookups=he;constructor(e){super(e),this.config={},this.cloneTableStyle=!0,this.colVisProp="",this.colVisPropAttach="",this.registerTableOption("htmlOutputConfig",!1),this.registerColumnOption("htmlOutput"),this.registerColumnOption("titleHtmlOutput")}initialize(){this.registerTableFunction("getHtml",this.getHtml.bind(this))}generateExportList(e,t,i,s){var o,n,r,a;return this.cloneTableStyle=t,this.config=e||{},this.colVisProp=s,this.colVisPropAttach=this.colVisProp.charAt(0).toUpperCase()+this.colVisProp.slice(1),(a=de.columnLookups[i])&&(r=(r=a.call(this.table)).filter((e=>this.columnVisCheck(e)))),o=!1!==this.config.columnHeaders?this.headersToExportRows(this.generateColumnGroupHeaders(r)):[],r&&(r=r.map((e=>e.getComponent()))),n=this.bodyToExportRows(this.rowLookup(i),r),o.concat(n)}generateTable(e,t,i,s){var o=this.generateExportList(e,t,i,s);return this.generateTableElement(o)}rowLookup(e){var t,i=[];return"function"==typeof e?e.call(this.table).forEach((e=>{(e=this.table.rowManager.findRow(e))&&i.push(e)})):(t=de.rowLookups[e]||de.rowLookups.active,i=t.call(this.table)),Object.assign([],i)}generateColumnGroupHeaders(e){var t=[];return e||(e=!1!==this.config.columnGroups?this.table.columnManager.columns:this.table.columnManager.columnsByIndex),e.forEach((e=>{var i=this.processColumnGroup(e);i&&t.push(i)})),t}processColumnGroup(e){var t=e.columns,i=0,s={title:e.definition["title"+this.colVisPropAttach]||e.definition.title,column:e,depth:1};if(t.length){if(s.subGroups=[],s.width=0,t.forEach((e=>{var t=this.processColumnGroup(e);t&&(s.width+=t.width,s.subGroups.push(t),t.depth>i&&(i=t.depth))})),s.depth+=i,!s.width)return!1}else{if(!this.columnVisCheck(e))return!1;s.width=1}return s}columnVisCheck(e){var t=e.definition[this.colVisProp];return(!1!==this.config.rowHeaders||!e.isRowHeader)&&("function"==typeof t&&(t=t.call(this.table,e.getComponent())),!1===t||!0===t?t:e.visible&&e.field)}headersToExportRows(e){var t=[],i=0,s=[];function o(e,s){var n=i-s;if(void 0===t[s]&&(t[s]=[]),e.height=e.subGroups?1:n-e.depth+1,t[s].push(e),e.height>1)for(let i=1;i1)for(let i=1;ii&&(i=e.depth)})),e.forEach((function(e){o(e,0)})),t.forEach((e=>{var t=[];e.forEach((e=>{if(e){let i=void 0===e.title?"":e.title;t.push(new ae(i,e.column.getComponent(),e.width,e.height,e.depth))}else t.push(null)})),s.push(new re("header",t))})),s}bodyToExportRows(e,t=[]){var i=[];return 0===t.length&&this.table.columnManager.columnsByIndex.forEach((e=>{this.columnVisCheck(e)&&t.push(e.getComponent())})),!1!==this.config.columnCalcs&&this.table.modExists("columnCalcs")&&(this.table.modules.columnCalcs.topInitialized&&e.unshift(this.table.modules.columnCalcs.topRow),this.table.modules.columnCalcs.botInitialized&&e.push(this.table.modules.columnCalcs.botRow)),(e=e.filter((e=>{switch(e.type){case"group":return!1!==this.config.rowGroups;case"calc":return!1!==this.config.columnCalcs;case"row":return!(this.table.options.dataTree&&!1===this.config.dataTree&&e.modules.dataTree.parent)}return!0}))).forEach(((e,s)=>{var o=e.getData(this.colVisProp),n=[],r=0;switch(e.type){case"group":r=e.level,n.push(new ae(e.key,e.getComponent(),t.length,1));break;case"calc":case"row":t.forEach((e=>{n.push(new ae(e._column.getFieldValue(o),e,1,1))})),this.table.options.dataTree&&!1!==this.config.dataTree&&(r=e.modules.dataTree.index)}i.push(new re(e.type,n,e.getComponent(),r))})),i}generateTableElement(e){var t=document.createElement("table"),i=document.createElement("thead"),s=document.createElement("tbody"),o=this.lookupTableStyles(),n=this.table.options["rowFormatter"+this.colVisPropAttach],r={};return r.rowFormatter=null!==n?n:this.table.options.rowFormatter,this.table.options.dataTree&&!1!==this.config.dataTree&&this.table.modExists("columnCalcs")&&(r.treeElementField=this.table.modules.dataTree.elementField),r.groupHeader=this.table.options["groupHeader"+this.colVisPropAttach],r.groupHeader&&!Array.isArray(r.groupHeader)&&(r.groupHeader=[r.groupHeader]),t.classList.add("tabulator-print-table"),this.mapElementStyles(this.table.columnManager.getHeadersElement(),i,["border-top","border-left","border-right","border-bottom","background-color","color","font-weight","font-family","font-size"]),e.length>1e3&&console.warn("It may take a long time to render an HTML table with more than 1000 rows"),e.forEach(((e,t)=>{let n;switch(e.type){case"header":i.appendChild(this.generateHeaderElement(e,r,o));break;case"group":s.appendChild(this.generateGroupElement(e,r,o));break;case"calc":s.appendChild(this.generateCalcElement(e,r,o));break;case"row":n=this.generateRowElement(e,r,o),this.mapElementStyles(t%2&&o.evenRow?o.evenRow:o.oddRow,n,["border-top","border-left","border-right","border-bottom","color","font-weight","font-family","font-size","background-color"]),s.appendChild(n)}})),i.innerHTML&&t.appendChild(i),t.appendChild(s),this.mapElementStyles(this.table.element,t,["border-top","border-left","border-right","border-bottom"]),t}lookupTableStyles(){var e={};return this.cloneTableStyle&&window.getComputedStyle&&(e.oddRow=this.table.element.querySelector(".tabulator-row-odd:not(.tabulator-group):not(.tabulator-calcs)"),e.evenRow=this.table.element.querySelector(".tabulator-row-even:not(.tabulator-group):not(.tabulator-calcs)"),e.calcRow=this.table.element.querySelector(".tabulator-row.tabulator-calcs"),e.firstRow=this.table.element.querySelector(".tabulator-row:not(.tabulator-group):not(.tabulator-calcs)"),e.firstGroup=this.table.element.getElementsByClassName("tabulator-group")[0],e.firstRow&&(e.styleCells=e.firstRow.getElementsByClassName("tabulator-cell"),e.styleRowHeader=e.firstRow.getElementsByClassName("tabulator-row-header")[0],e.firstCell=e.styleCells[0],e.lastCell=e.styleCells[e.styleCells.length-1])),e}generateHeaderElement(e,t,i){var s=document.createElement("tr");return e.columns.forEach((e=>{if(e){var t=document.createElement("th"),i=e.component._column.definition.cssClass?e.component._column.definition.cssClass.split(" "):[];t.colSpan=e.width,t.rowSpan=e.height,t.innerHTML=e.value,this.cloneTableStyle&&(t.style.boxSizing="border-box"),i.forEach((function(e){t.classList.add(e)})),this.mapElementStyles(e.component.getElement(),t,["text-align","border-left","border-right","background-color","color","font-weight","font-family","font-size"]),this.mapElementStyles(e.component._column.contentElement,t,["padding-top","padding-left","padding-right","padding-bottom"]),e.component._column.visible?this.mapElementStyles(e.component.getElement(),t,["width"]):e.component._column.definition.width&&(t.style.width=e.component._column.definition.width+"px"),e.component._column.parent&&e.component._column.parent.isGroup?this.mapElementStyles(e.component._column.parent.groupElement,t,["border-top"]):this.mapElementStyles(e.component.getElement(),t,["border-top"]),e.component._column.isGroup?this.mapElementStyles(e.component.getElement(),t,["border-bottom"]):this.mapElementStyles(this.table.columnManager.getElement(),t,["border-bottom"]),s.appendChild(t)}})),s}generateGroupElement(e,t,i){var s=document.createElement("tr"),o=document.createElement("td"),n=e.columns[0];return s.classList.add("tabulator-print-table-row"),t.groupHeader&&t.groupHeader[e.indent]?n.value=t.groupHeader[e.indent](n.value,e.component._group.getRowCount(),e.component._group.getData(),e.component):!1!==t.groupHeader&&(n.value=e.component._group.generator(n.value,e.component._group.getRowCount(),e.component._group.getData(),e.component)),o.colSpan=n.width,o.innerHTML=n.value,s.classList.add("tabulator-print-table-group"),s.classList.add("tabulator-group-level-"+e.indent),n.component.isVisible()&&s.classList.add("tabulator-group-visible"),this.mapElementStyles(i.firstGroup,s,["border-top","border-left","border-right","border-bottom","color","font-weight","font-family","font-size","background-color"]),this.mapElementStyles(i.firstGroup,o,["padding-top","padding-left","padding-right","padding-bottom"]),s.appendChild(o),s}generateCalcElement(e,t,i){var s=this.generateRowElement(e,t,i);return s.classList.add("tabulator-print-table-calcs"),this.mapElementStyles(i.calcRow,s,["border-top","border-left","border-right","border-bottom","color","font-weight","font-family","font-size","background-color"]),s}generateRowElement(e,t,i){var s=document.createElement("tr");if(s.classList.add("tabulator-print-table-row"),e.columns.forEach(((o,n)=>{if(o){var r,a,l=document.createElement("td"),h=o.component._column,d=this.table,c=d.columnManager.findColumnIndex(h),u=o.value,m={modules:{},getValue:function(){return u},getField:function(){return h.definition.field},getElement:function(){return l},getType:function(){return"cell"},getColumn:function(){return h.getComponent()},getData:function(){return e.component.getData()},getRow:function(){return e.component},getTable:function(){return d},getComponent:function(){return m},column:h};if((h.definition.cssClass?h.definition.cssClass.split(" "):[]).forEach((function(e){l.classList.add(e)})),this.table.modExists("format")&&!1!==this.config.formatCells)u=this.table.modules.format.formatExportValue(m,this.colVisProp);else switch(typeof u){case"object":u=null!==u?JSON.stringify(u):"";break;case"undefined":u=""}u instanceof Node?l.appendChild(u):l.innerHTML=u,a=["padding-top","padding-left","padding-right","padding-bottom","border-top","border-left","border-right","border-bottom","color","font-weight","font-family","font-size","text-align"],h.isRowHeader?(r=i.styleRowHeader,a.push("background-color")):r=i.styleCells&&i.styleCells[c]?i.styleCells[c]:i.firstCell,r&&(this.mapElementStyles(r,l,a),h.definition.align&&(l.style.textAlign=h.definition.align)),this.table.options.dataTree&&!1!==this.config.dataTree&&(t.treeElementField&&t.treeElementField==h.field||!t.treeElementField&&0==n)&&(e.component._row.modules.dataTree.controlEl&&l.insertBefore(e.component._row.modules.dataTree.controlEl.cloneNode(!0),l.firstChild),e.component._row.modules.dataTree.branchEl&&l.insertBefore(e.component._row.modules.dataTree.branchEl.cloneNode(!0),l.firstChild)),s.appendChild(l),m.modules.format&&m.modules.format.renderedCallback&&m.modules.format.renderedCallback()}})),t.rowFormatter&&"row"===e.type&&!1!==this.config.formatCells){Object.assign(e.component).getElement=function(){return s},t.rowFormatter(e.component)}return s}generateHTMLTable(e){var t=document.createElement("div");return t.appendChild(this.generateTableElement(e)),t.innerHTML}getHtml(e,t,i,s){var o=this.generateExportList(i||this.table.options.htmlOutputConfig,t,e,s||"htmlOutput");return this.generateHTMLTable(o)}mapElementStyles(e,t,i){if(this.cloneTableStyle&&e&&t){var s={"background-color":"backgroundColor",color:"fontColor",width:"width","font-weight":"fontWeight","font-family":"fontFamily","font-size":"fontSize","text-align":"textAlign","border-top":"borderTop","border-left":"borderLeft","border-right":"borderRight","border-bottom":"borderBottom","padding-top":"paddingTop","padding-left":"paddingLeft","padding-right":"paddingRight","padding-bottom":"paddingBottom"};if(window.getComputedStyle){var o=window.getComputedStyle(e);i.forEach((function(e){t.style[s[e]]||(t.style[s[e]]=o.getPropertyValue(e))}))}}}}var ce={"=":function(e,t,i,s){return t==e},"<":function(e,t,i,s){return t":function(e,t,i,s){return t>e},">=":function(e,t,i,s){return t>=e},"!=":function(e,t,i,s){return t!=e},regex:function(e,t,i,s){return"string"==typeof e&&(e=new RegExp(e)),e.test(t)},like:function(e,t,i,s){return null==e?t===e:null!=t&&String(t).toLowerCase().indexOf(e.toLowerCase())>-1},keywords:function(e,t,i,s){var o=e.toLowerCase().split(void 0===s.separator?" ":s.separator),n=String(null==t?"":t).toLowerCase(),r=[];return o.forEach((e=>{n.includes(e)&&r.push(!0)})),s.matchAll?r.length===o.length:!!r.length},starts:function(e,t,i,s){return null==e?t===e:null!=t&&String(t).toLowerCase().startsWith(e.toLowerCase())},ends:function(e,t,i,s){return null==e?t===e:null!=t&&String(t).toLowerCase().endsWith(e.toLowerCase())},in:function(e,t,i,s){return Array.isArray(e)?!e.length||e.indexOf(t)>-1:(console.warn("Filter Error - filter value is not an array:",e),!1)}};class ue extends M{static moduleName="filter";static filters=ce;constructor(e){super(e),this.filterList=[],this.headerFilters={},this.headerFilterColumns=[],this.prevHeaderFilterChangeCheck="",this.prevHeaderFilterChangeCheck="{}",this.changed=!1,this.tableInitialized=!1,this.registerTableOption("filterMode","local"),this.registerTableOption("initialFilter",!1),this.registerTableOption("initialHeaderFilter",!1),this.registerTableOption("headerFilterLiveFilterDelay",300),this.registerTableOption("placeholderHeaderFilter",!1),this.registerColumnOption("headerFilter"),this.registerColumnOption("headerFilterPlaceholder"),this.registerColumnOption("headerFilterParams"),this.registerColumnOption("headerFilterEmptyCheck"),this.registerColumnOption("headerFilterFunc"),this.registerColumnOption("headerFilterFuncParams"),this.registerColumnOption("headerFilterLiveFilter"),this.registerTableFunction("searchRows",this.searchRows.bind(this)),this.registerTableFunction("searchData",this.searchData.bind(this)),this.registerTableFunction("setFilter",this.userSetFilter.bind(this)),this.registerTableFunction("refreshFilter",this.userRefreshFilter.bind(this)),this.registerTableFunction("addFilter",this.userAddFilter.bind(this)),this.registerTableFunction("getFilters",this.getFilters.bind(this)),this.registerTableFunction("setHeaderFilterFocus",this.userSetHeaderFilterFocus.bind(this)),this.registerTableFunction("getHeaderFilterValue",this.userGetHeaderFilterValue.bind(this)),this.registerTableFunction("setHeaderFilterValue",this.userSetHeaderFilterValue.bind(this)),this.registerTableFunction("getHeaderFilters",this.getHeaderFilters.bind(this)),this.registerTableFunction("removeFilter",this.userRemoveFilter.bind(this)),this.registerTableFunction("clearFilter",this.userClearFilter.bind(this)),this.registerTableFunction("clearHeaderFilter",this.userClearHeaderFilter.bind(this)),this.registerComponentFunction("column","headerFilterFocus",this.setHeaderFilterFocus.bind(this)),this.registerComponentFunction("column","reloadHeaderFilter",this.reloadHeaderFilter.bind(this)),this.registerComponentFunction("column","getHeaderFilterValue",this.getHeaderFilterValue.bind(this)),this.registerComponentFunction("column","setHeaderFilterValue",this.setHeaderFilterValue.bind(this))}initialize(){this.subscribe("column-init",this.initializeColumnHeaderFilter.bind(this)),this.subscribe("column-width-fit-before",this.hideHeaderFilterElements.bind(this)),this.subscribe("column-width-fit-after",this.showHeaderFilterElements.bind(this)),this.subscribe("table-built",this.tableBuilt.bind(this)),this.subscribe("placeholder",this.generatePlaceholder.bind(this)),"remote"===this.table.options.filterMode&&this.subscribe("data-params",this.remoteFilterParams.bind(this)),this.registerDataHandler(this.filter.bind(this),10)}tableBuilt(){this.table.options.initialFilter&&this.setFilter(this.table.options.initialFilter),this.table.options.initialHeaderFilter&&this.table.options.initialHeaderFilter.forEach((e=>{var t=this.table.columnManager.findColumn(e.field);if(!t)return console.warn("Column Filter Error - No matching column found:",e.field),!1;this.setHeaderFilterValue(t,e.value)})),this.tableInitialized=!0}remoteFilterParams(e,t,i,s){return s.filter=this.getFilters(!0,!0),s}generatePlaceholder(e){if(this.table.options.placeholderHeaderFilter&&Object.keys(this.headerFilters).length)return this.table.options.placeholderHeaderFilter}userSetFilter(e,t,i,s){this.setFilter(e,t,i,s),this.refreshFilter()}userRefreshFilter(){this.refreshFilter()}userAddFilter(e,t,i,s){this.addFilter(e,t,i,s),this.refreshFilter()}userSetHeaderFilterFocus(e){var t=this.table.columnManager.findColumn(e);if(!t)return console.warn("Column Filter Focus Error - No matching column found:",e),!1;this.setHeaderFilterFocus(t)}userGetHeaderFilterValue(e){var t=this.table.columnManager.findColumn(e);if(t)return this.getHeaderFilterValue(t);console.warn("Column Filter Error - No matching column found:",e)}userSetHeaderFilterValue(e,t){var i=this.table.columnManager.findColumn(e);if(!i)return console.warn("Column Filter Error - No matching column found:",e),!1;this.setHeaderFilterValue(i,t)}userRemoveFilter(e,t,i){this.removeFilter(e,t,i),this.refreshFilter()}userClearFilter(e){this.clearFilter(e),this.refreshFilter()}userClearHeaderFilter(){this.clearHeaderFilter(),this.refreshFilter()}searchRows(e,t,i){return this.search("rows",e,t,i)}searchData(e,t,i){return this.search("data",e,t,i)}initializeColumnHeaderFilter(e){e.definition.headerFilter&&this.initializeColumn(e)}initializeColumn(e,t){var i=this,s=e.getField();e.modules.filter={success:function(t){var o,n="input"==e.modules.filter.tagType&&"text"==e.modules.filter.attrType||"textarea"==e.modules.filter.tagType?"partial":"match",r="",a="";if(void 0===e.modules.filter.prevSuccess||e.modules.filter.prevSuccess!==t){if(e.modules.filter.prevSuccess=t,e.modules.filter.emptyFunc(t))delete i.headerFilters[s];else{switch(e.modules.filter.value=t,typeof e.definition.headerFilterFunc){case"string":ue.filters[e.definition.headerFilterFunc]?(r=e.definition.headerFilterFunc,o=function(i){var s=e.definition.headerFilterFuncParams||{},o=e.getFieldValue(i);return s="function"==typeof s?s(t,o,i):s,ue.filters[e.definition.headerFilterFunc](t,o,i,s)}):console.warn("Header Filter Error - Matching filter function not found: ",e.definition.headerFilterFunc);break;case"function":r=o=function(i){var s=e.definition.headerFilterFuncParams||{},o=e.getFieldValue(i);return s="function"==typeof s?s(t,o,i):s,e.definition.headerFilterFunc(t,o,i,s)}}if(!o)if("partial"===n)o=function(i){var s=e.getFieldValue(i);return null!=s&&String(s).toLowerCase().indexOf(String(t).toLowerCase())>-1},r="like";else o=function(i){return e.getFieldValue(i)==t},r="=";i.headerFilters[s]={value:t,func:o,type:r}}e.modules.filter.value=t,a=JSON.stringify(i.headerFilters),i.prevHeaderFilterChangeCheck!==a&&(i.prevHeaderFilterChangeCheck=a,i.trackChanges(),i.refreshFilter())}return!0},attrType:!1,tagType:!1,emptyFunc:!1},this.generateHeaderFilterElement(e)}generateHeaderFilterElement(e,t,i){var s,o,n,r,a,l,h,d,c=this,u=e.modules.filter.success,m=e.getField();if(e.modules.filter.value=t,e.modules.filter.headerElement&&e.modules.filter.headerElement.parentNode&&e.contentElement.removeChild(e.modules.filter.headerElement.parentNode),m){switch(e.modules.filter.emptyFunc=e.definition.headerFilterEmptyCheck||function(e){return!e&&0!==e},(s=document.createElement("div")).classList.add("tabulator-header-filter"),typeof e.definition.headerFilter){case"string":c.table.modules.edit.editors[e.definition.headerFilter]?(o=c.table.modules.edit.editors[e.definition.headerFilter],"tick"!==e.definition.headerFilter&&"tickCross"!==e.definition.headerFilter||e.definition.headerFilterEmptyCheck||(e.modules.filter.emptyFunc=function(e){return!0!==e&&!1!==e})):console.warn("Filter Error - Cannot build header filter, No such editor found: ",e.definition.editor);break;case"function":o=e.definition.headerFilter;break;case"boolean":e.modules.edit&&e.modules.edit.editor?o=e.modules.edit.editor:e.definition.formatter&&c.table.modules.edit.editors[e.definition.formatter]?(o=c.table.modules.edit.editors[e.definition.formatter],"tick"!==e.definition.formatter&&"tickCross"!==e.definition.formatter||e.definition.headerFilterEmptyCheck||(e.modules.filter.emptyFunc=function(e){return!0!==e&&!1!==e})):o=c.table.modules.edit.editors.input}if(o){if(r={getValue:function(){return void 0!==t?t:""},getField:function(){return e.definition.field},getElement:function(){return s},getColumn:function(){return e.getComponent()},getTable:()=>this.table,getType:()=>"header",getRow:function(){return{normalizeHeight:function(){}}}},h="function"==typeof(h=e.definition.headerFilterParams||{})?h.call(c.table,r):h,!(n=o.call(this.table.modules.edit,r,(function(e){d=e}),u,(function(){}),h)))return void console.warn("Filter Error - Cannot add filter to "+m+" column, editor returned a value of false");if(!(n instanceof Node))return void console.warn("Filter Error - Cannot add filter to "+m+" column, editor should return an instance of Node, the editor returned:",n);c.langBind("headerFilters|columns|"+e.definition.field,(function(t){n.setAttribute("placeholder",void 0!==t&&t?t:e.definition.headerFilterPlaceholder||c.langText("headerFilters|default"))})),n.addEventListener("click",(function(e){e.stopPropagation(),n.focus()})),n.addEventListener("focus",(e=>{var t=this.table.columnManager.contentsElement.scrollLeft;t!==this.table.rowManager.element.scrollLeft&&(this.table.rowManager.scrollHorizontal(t),this.table.columnManager.scrollHorizontal(t))})),a=!1,l=function(e){a&&clearTimeout(a),a=setTimeout((function(){u(n.value)}),c.table.options.headerFilterLiveFilterDelay)},e.modules.filter.headerElement=n,e.modules.filter.attrType=n.hasAttribute("type")?n.getAttribute("type").toLowerCase():"",e.modules.filter.tagType=n.tagName.toLowerCase(),!1!==e.definition.headerFilterLiveFilter&&("autocomplete"!==e.definition.headerFilter&&"tickCross"!==e.definition.headerFilter&&("autocomplete"!==e.definition.editor&&"tickCross"!==e.definition.editor||!0!==e.definition.headerFilter)&&(n.addEventListener("keyup",l),n.addEventListener("search",l),"number"==e.modules.filter.attrType&&n.addEventListener("change",(function(e){u(n.value)})),"text"==e.modules.filter.attrType&&"ie"!==this.table.browser&&n.setAttribute("type","search")),"input"!=e.modules.filter.tagType&&"select"!=e.modules.filter.tagType&&"textarea"!=e.modules.filter.tagType||n.addEventListener("mousedown",(function(e){e.stopPropagation()}))),s.appendChild(n),e.contentElement.appendChild(s),i||c.headerFilterColumns.push(e),d&&d()}}else console.warn("Filter Error - Cannot add header filter, column has no field set:",e.definition.title)}hideHeaderFilterElements(){this.headerFilterColumns.forEach((function(e){e.modules.filter&&e.modules.filter.headerElement&&(e.modules.filter.headerElement.style.display="none")}))}showHeaderFilterElements(){this.headerFilterColumns.forEach((function(e){e.modules.filter&&e.modules.filter.headerElement&&(e.modules.filter.headerElement.style.display="")}))}setHeaderFilterFocus(e){e.modules.filter&&e.modules.filter.headerElement?e.modules.filter.headerElement.focus():console.warn("Column Filter Focus Error - No header filter set on column:",e.getField())}getHeaderFilterValue(e){if(e.modules.filter&&e.modules.filter.headerElement)return e.modules.filter.value;console.warn("Column Filter Error - No header filter set on column:",e.getField())}setHeaderFilterValue(e,t){e&&(e.modules.filter&&e.modules.filter.headerElement?(this.generateHeaderFilterElement(e,t,!0),e.modules.filter.success(t)):console.warn("Column Filter Error - No header filter set on column:",e.getField()))}reloadHeaderFilter(e){e&&(e.modules.filter&&e.modules.filter.headerElement?this.generateHeaderFilterElement(e,e.modules.filter.value,!0):console.warn("Column Filter Error - No header filter set on column:",e.getField()))}refreshFilter(){this.tableInitialized&&("remote"===this.table.options.filterMode?this.reloadData(null,!1,!1):this.refreshData(!0))}trackChanges(){this.changed=!0,this.dispatch("filter-changed")}hasChanged(){var e=this.changed;return this.changed=!1,e}setFilter(e,t,i,s){this.filterList=[],Array.isArray(e)||(e=[{field:e,type:t,value:i,params:s}]),this.addFilter(e)}addFilter(e,t,i,s){var o=!1;Array.isArray(e)||(e=[{field:e,type:t,value:i,params:s}]),e.forEach((e=>{(e=this.findFilter(e))&&(this.filterList.push(e),o=!0)})),o&&this.trackChanges()}findFilter(e){var t;if(Array.isArray(e))return this.findSubFilters(e);var i=!1;return"function"==typeof e.field?i=function(t){return e.field(t,e.type||{})}:ue.filters[e.type]?i=(t=this.table.columnManager.getColumnByField(e.field))?function(i){return ue.filters[e.type](e.value,t.getFieldValue(i),i,e.params||{})}:function(t){return ue.filters[e.type](e.value,t[e.field],t,e.params||{})}:console.warn("Filter Error - No such filter type found, ignoring: ",e.type),e.func=i,!!e.func&&e}findSubFilters(e){var t=[];return e.forEach((e=>{(e=this.findFilter(e))&&t.push(e)})),!!t.length&&t}getFilters(e,t){var i=[];return e&&(i=this.getHeaderFilters()),t&&i.forEach((function(e){"function"==typeof e.type&&(e.type="function")})),i=i.concat(this.filtersToArray(this.filterList,t))}filtersToArray(e,t){var i=[];return e.forEach((e=>{var s;Array.isArray(e)?i.push(this.filtersToArray(e,t)):(s={field:e.field,type:e.type,value:e.value},t&&"function"==typeof s.type&&(s.type="function"),i.push(s))})),i}getHeaderFilters(){var e=[];for(var t in this.headerFilters)e.push({field:t,type:this.headerFilters[t].type,value:this.headerFilters[t].value});return e}removeFilter(e,t,i){Array.isArray(e)||(e=[{field:e,type:t,value:i}]),e.forEach((e=>{var t=-1;(t="object"==typeof e.field?this.filterList.findIndex((t=>e===t)):this.filterList.findIndex((t=>e.field===t.field&&e.type===t.type&&e.value===t.value)))>-1?this.filterList.splice(t,1):console.warn("Filter Error - No matching filter type found, ignoring: ",e.type)})),this.trackChanges()}clearFilter(e){this.filterList=[],e&&this.clearHeaderFilter(),this.trackChanges()}clearHeaderFilter(){this.headerFilters={},this.prevHeaderFilterChangeCheck="{}",this.headerFilterColumns.forEach((e=>{void 0!==e.modules.filter.value&&delete e.modules.filter.value,e.modules.filter.prevSuccess=void 0,this.reloadHeaderFilter(e)})),this.trackChanges()}search(e,t,i,s){var o=[],n=[];return Array.isArray(t)||(t=[{field:t,type:i,value:s}]),t.forEach((e=>{(e=this.findFilter(e))&&n.push(e)})),this.table.rowManager.rows.forEach((t=>{var i=!0;n.forEach((e=>{this.filterRecurse(e,t.getData())||(i=!1)})),i&&o.push("data"===e?t.getData("data"):t.getComponent())})),o}filter(e,t){var i=[],s=[];return this.subscribedExternal("dataFiltering")&&this.dispatchExternal("dataFiltering",this.getFilters(!0)),"remote"!==this.table.options.filterMode&&(this.filterList.length||Object.keys(this.headerFilters).length)?e.forEach((e=>{this.filterRow(e)&&i.push(e)})):i=e.slice(0),this.subscribedExternal("dataFiltered")&&(i.forEach((e=>{s.push(e.getComponent())})),this.dispatchExternal("dataFiltered",this.getFilters(!0),s)),i}filterRow(e,t){var i=!0,s=e.getData();for(var o in this.filterList.forEach((e=>{this.filterRecurse(e,s)||(i=!1)})),this.headerFilters)this.headerFilters[o].func(s)||(i=!1);return i}filterRecurse(e,t){var i=!1;return Array.isArray(e)?e.forEach((e=>{this.filterRecurse(e,t)&&(i=!0)})):i=e.func(t),i}}var me={plaintext:function(e,t,i){return this.emptyToSpace(this.sanitizeHTML(e.getValue()))},html:function(e,t,i){return e.getValue()},textarea:function(e,t,i){return e.getElement().style.whiteSpace="pre-wrap",this.emptyToSpace(this.sanitizeHTML(e.getValue()))},money:function(e,t,i){var s,o,n,r,a,l=parseFloat(e.getValue()),h="",d=t.decimal||".",c=t.thousand||",",u=t.negativeSign||"-",m=t.symbol||"",p=!!t.symbolAfter,g=void 0!==t.precision?t.precision:2;if(isNaN(l))return this.emptyToSpace(this.sanitizeHTML(e.getValue()));if(l<0&&(l=Math.abs(l),h=u),s=!1!==g?l.toFixed(g):l,o=(s=String(s).split("."))[0],n=s.length>1?d+s[1]:"",!1!==t.thousand)for(r=/(\d+)(\d{3})/;r.test(o);)o=o.replace(r,"$1"+c+"$2");return a=o+n,!0===h?(a="("+a+")",p?a+m:m+a):p?h+a+m:h+m+a},link:function(e,t,i){var s,o=e.getValue(),n=t.urlPrefix||"",r=t.download,l=o,h=document.createElement("a");if(t.labelField&&(s=e.getData(),l=function e(t,i){var s=i[t.shift()];return t.length&&"object"==typeof s?e(t,s):s}(t.labelField.split(this.table.options.nestedFieldSeparator),s)),t.label)switch(typeof t.label){case"string":l=t.label;break;case"function":l=t.label(e)}if(l){if(t.urlField&&(s=e.getData(),o=a.retrieveNestedData(this.table.options.nestedFieldSeparator,t.urlField,s)),t.url)switch(typeof t.url){case"string":o=t.url;break;case"function":o=t.url(e)}return h.setAttribute("href",n+o),t.target&&h.setAttribute("target",t.target),t.download&&(r="function"==typeof r?r(e):!0===r?"":r,h.setAttribute("download",r)),h.innerHTML=this.emptyToSpace(this.sanitizeHTML(l)),h}return" "},image:function(e,t,i){var s=document.createElement("img"),o=e.getValue();switch(t.urlPrefix&&(o=t.urlPrefix+e.getValue()),t.urlSuffix&&(o+=t.urlSuffix),s.setAttribute("src",o),typeof t.height){case"number":s.style.height=t.height+"px";break;case"string":s.style.height=t.height}switch(typeof t.width){case"number":s.style.width=t.width+"px";break;case"string":s.style.width=t.width}return s.addEventListener("load",(function(){e.getRow().normalizeHeight()})),s},tickCross:function(e,t,i){var s=e.getValue(),o=e.getElement(),n=t.allowEmpty,r=t.allowTruthy,a=Object.keys(t).includes("trueValue"),l=void 0!==t.tickElement?t.tickElement:'',h=void 0!==t.crossElement?t.crossElement:'';return a&&s===t.trueValue||!a&&(r&&s||!0===s||"true"===s||"True"===s||1===s||"1"===s)?(o.setAttribute("aria-checked",!0),l||""):!n||"null"!==s&&""!==s&&null!=s?(o.setAttribute("aria-checked",!1),h||""):(o.setAttribute("aria-checked","mixed"),"")},datetime:function(e,t,i){var s,o=window.DateTime||luxon.DateTime,n=t.inputFormat||"yyyy-MM-dd HH:mm:ss",r=t.outputFormat||"dd/MM/yyyy HH:mm:ss",a=void 0!==t.invalidPlaceholder?t.invalidPlaceholder:"",l=e.getValue();if(void 0!==o)return(s=o.isDateTime(l)?l:"iso"===n?o.fromISO(String(l)):o.fromFormat(String(l),n)).isValid?(t.timezone&&(s=s.setZone(t.timezone)),s.toFormat(r)):!0!==a&&l?"function"==typeof a?a(l):a:l;console.error("Format Error - 'datetime' formatter is dependant on luxon.js")},datetimediff:function(e,t,i){var s,o=window.DateTime||luxon.DateTime,n=t.inputFormat||"yyyy-MM-dd HH:mm:ss",r=void 0!==t.invalidPlaceholder?t.invalidPlaceholder:"",a=void 0!==t.suffix&&t.suffix,l=void 0!==t.unit?t.unit:"days",h=void 0!==t.humanize&&t.humanize,d=void 0!==t.date?t.date:o.now(),c=e.getValue();if(void 0!==o)return(s=o.isDateTime(c)?c:"iso"===n?o.fromISO(String(c)):o.fromFormat(String(c),n)).isValid?h?s.diff(d,l).toHuman()+(a?" "+a:""):parseInt(s.diff(d,l)[l])+(a?" "+a:""):!0===r?c:"function"==typeof r?r(c):r;console.error("Format Error - 'datetimediff' formatter is dependant on luxon.js")},lookup:function(e,t,i){var s=e.getValue();return void 0===t[s]?(console.warn("Missing display value for "+s),s):t[s]},star:function(e,t,i){var s=e.getValue(),o=e.getElement(),n=t&&t.stars?t.stars:5,r=document.createElement("span"),a=document.createElementNS("http://www.w3.org/2000/svg","svg");r.style.verticalAlign="middle",a.setAttribute("width","14"),a.setAttribute("height","14"),a.setAttribute("viewBox","0 0 512 512"),a.setAttribute("xml:space","preserve"),a.style.padding="0 1px",s=s&&!isNaN(s)?parseInt(s):0,s=Math.max(0,Math.min(s,n));for(var l=1;l<=n;l++){var h=a.cloneNode(!0);h.innerHTML=l<=s?'':'',r.appendChild(h)}return o.style.whiteSpace="nowrap",o.style.overflow="hidden",o.style.textOverflow="ellipsis",o.setAttribute("aria-label",s),r},traffic:function(e,t,i){var s,o,n=this.sanitizeHTML(e.getValue())||0,r=document.createElement("span"),a=t&&t.max?t.max:100,l=t&&t.min?t.min:0,h=t&&void 0!==t.color?t.color:["red","orange","green"],d="#666666";if(!isNaN(n)&&void 0!==e.getValue()){switch(r.classList.add("tabulator-traffic-light"),o=parseFloat(n)<=a?parseFloat(n):a,o=parseFloat(o)>=l?parseFloat(o):l,s=(a-l)/100,o=Math.round((o-l)/s),typeof h){case"string":d=h;break;case"function":d=h(n);break;case"object":if(Array.isArray(h)){var c=100/h.length,u=Math.floor(o/c);u=Math.min(u,h.length-1),d=h[u=Math.max(u,0)];break}}return r.style.backgroundColor=d,r}},progress:function(e,t={},i){var s,n,r,a,l,h=this.sanitizeHTML(e.getValue())||0,d=e.getElement(),c=t.max?t.max:100,u=t.min?t.min:0,m=t.legendAlign?t.legendAlign:"center";switch(n=parseFloat(h)<=c?parseFloat(h):c,n=parseFloat(n)>=u?parseFloat(n):u,s=(c-u)/100,n=Math.round((n-u)/s),typeof t.color){case"string":r=t.color;break;case"function":r=t.color(h);break;case"object":if(Array.isArray(t.color)){let e=100/t.color.length,i=Math.floor(n/e);i=Math.min(i,t.color.length-1),i=Math.max(i,0),r=t.color[i];break}default:r="#2DC214"}switch(typeof t.legend){case"string":a=t.legend;break;case"function":a=t.legend(h);break;case"boolean":a=h;break;default:a=!1}switch(typeof t.legendColor){case"string":l=t.legendColor;break;case"function":l=t.legendColor(h);break;case"object":if(Array.isArray(t.legendColor)){let e=100/t.legendColor.length,i=Math.floor(n/e);i=Math.min(i,t.legendColor.length-1),i=Math.max(i,0),l=t.legendColor[i]}break;default:l="#000"}d.style.minWidth="30px",d.style.position="relative",d.setAttribute("aria-label",n);var p=document.createElement("div");p.style.display="inline-block",p.style.width=n+"%",p.style.backgroundColor=r,p.style.height="100%",p.setAttribute("data-max",c),p.setAttribute("data-min",u);var g=document.createElement("div");if(g.style.position="relative",g.style.width="100%",g.style.height="100%",a){var b=document.createElement("div");b.style.position="absolute",b.style.top=0,b.style.left=0,b.style.textAlign=m,b.style.width="100%",b.style.color=l,b.innerHTML=a}return i((function(){if(!(e instanceof o)){var t=document.createElement("div");t.style.position="absolute",t.style.top="4px",t.style.bottom="4px",t.style.left="4px",t.style.right="4px",d.appendChild(t),d=t}d.appendChild(g),g.appendChild(p),a&&g.appendChild(b)})),""},color:function(e,t,i){return e.getElement().style.backgroundColor=this.sanitizeHTML(e.getValue()),""},buttonTick:function(e,t,i){return''},buttonCross:function(e,t,i){return''},toggle:function(e,t,i){var s,o,n=e.getValue(),r=t.size||15,a=r+"px",l=!t.hasOwnProperty("onValue")||t.onValue,h=!!t.hasOwnProperty("offValue")&&t.offValue,d=t.onTruthy?n:n===l;return(s=document.createElement("div")).classList.add("tabulator-toggle"),d?(s.classList.add("tabulator-toggle-on"),s.style.flexDirection="row-reverse",t.onColor&&(s.style.background=t.onColor)):t.offColor&&(s.style.background=t.offColor),s.style.width=2.5*r+"px",s.style.borderRadius=a,t.clickable&&s.addEventListener("click",(t=>{e.setValue(d?h:l)})),(o=document.createElement("div")).classList.add("tabulator-toggle-switch"),o.style.height=a,o.style.width=a,o.style.borderRadius=a,s.appendChild(o),s},rownum:function(e,t,i){var s=document.createElement("span"),o=e.getRow(),n=e.getTable();return o.watchPosition((e=>{t.relativeToPage&&(e+=n.modules.page.getPageSize()*(n.modules.page.getPage()-1)),s.innerText=e})),s},handle:function(e,t,i){return e.getElement().classList.add("tabulator-row-handle"),"
    "}};class pe extends M{static moduleName="format";static formatters=me;constructor(e){super(e),this.registerColumnOption("formatter"),this.registerColumnOption("formatterParams"),this.registerColumnOption("formatterPrint"),this.registerColumnOption("formatterPrintParams"),this.registerColumnOption("formatterClipboard"),this.registerColumnOption("formatterClipboardParams"),this.registerColumnOption("formatterHtmlOutput"),this.registerColumnOption("formatterHtmlOutputParams"),this.registerColumnOption("titleFormatter"),this.registerColumnOption("titleFormatterParams")}initialize(){this.subscribe("cell-format",this.formatValue.bind(this)),this.subscribe("cell-rendered",this.cellRendered.bind(this)),this.subscribe("column-layout",this.initializeColumn.bind(this)),this.subscribe("column-format",this.formatHeader.bind(this))}initializeColumn(e){e.modules.format=this.lookupFormatter(e,""),void 0!==e.definition.formatterPrint&&(e.modules.format.print=this.lookupFormatter(e,"Print")),void 0!==e.definition.formatterClipboard&&(e.modules.format.clipboard=this.lookupFormatter(e,"Clipboard")),void 0!==e.definition.formatterHtmlOutput&&(e.modules.format.htmlOutput=this.lookupFormatter(e,"HtmlOutput"))}lookupFormatter(e,t){var i={params:e.definition["formatter"+t+"Params"]||{}},s=e.definition["formatter"+t];switch(typeof s){case"string":pe.formatters[s]?i.formatter=pe.formatters[s]:(console.warn("Formatter Error - No such formatter found: ",s),i.formatter=pe.formatters.plaintext);break;case"function":i.formatter=s;break;default:i.formatter=pe.formatters.plaintext}return i}cellRendered(e){e.modules.format&&e.modules.format.renderedCallback&&!e.modules.format.rendered&&(e.modules.format.renderedCallback(),e.modules.format.rendered=!0)}formatHeader(e,t,i){var s,o,n,r;return e.definition.titleFormatter?(s=this.getFormatter(e.definition.titleFormatter),n=t=>{e.titleFormatterRendered=t},r={getValue:function(){return t},getElement:function(){return i},getType:function(){return"header"},getColumn:function(){return e.getComponent()},getTable:()=>this.table},o="function"==typeof(o=e.definition.titleFormatterParams||{})?o():o,s.call(this,r,o,n)):t}formatValue(e){var t=e.getComponent(),i="function"==typeof e.column.modules.format.params?e.column.modules.format.params(t):e.column.modules.format.params;return e.column.modules.format.formatter.call(this,t,i,(function(t){e.modules.format||(e.modules.format={}),e.modules.format.renderedCallback=t,e.modules.format.rendered=!1}))}formatExportValue(e,t){var i,s=e.column.modules.format[t];if(s){function o(t){e.modules.format||(e.modules.format={}),e.modules.format.renderedCallback=t,e.modules.format.rendered=!1}return i="function"==typeof s.params?s.params(e.getComponent()):s.params,s.formatter.call(this,e.getComponent(),i,o)}return this.formatValue(e)}sanitizeHTML(e){if(e){var t={"&":"&","<":"<",">":">",'"':""","'":"'","/":"/","`":"`","=":"="};return String(e).replace(/[&<>"'`=/]/g,(function(e){return t[e]}))}return e}emptyToSpace(e){return null==e||""===e?" ":e}getFormatter(e){switch(typeof e){case"string":pe.formatters[e]?e=pe.formatters[e]:(console.warn("Formatter Error - No such formatter found: ",e),e=pe.formatters.plaintext);break;case"function":break;default:e=pe.formatters.plaintext}return e}}class ge{constructor(e){return this._group=e,this.type="GroupComponent",new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._group.groupManager.table.componentFunctionBinder.handle("group",e._group,t)}})}getKey(){return this._group.key}getField(){return this._group.field}getElement(){return this._group.element}getRows(){return this._group.getRows(!0)}getSubGroups(){return this._group.getSubGroups(!0)}getParentGroup(){return!!this._group.parent&&this._group.parent.getComponent()}isVisible(){return this._group.visible}show(){this._group.show()}hide(){this._group.hide()}toggle(){this._group.toggleVisibility()}scrollTo(e,t){return this._group.groupManager.table.rowManager.scrollToRow(this._group,e,t)}_getSelf(){return this._group}getTable(){return this._group.groupManager.table}}class be{constructor(e,t,i,s,o,n,r){this.groupManager=e,this.parent=t,this.key=s,this.level=i,this.field=o,this.hasSubGroups=i{e.modules&&delete e.modules.group}))),this.element=!1,this.arrowElement=!1,this.elementContents=!1}createElements(){var e=document.createElement("div");e.classList.add("tabulator-arrow"),this.element=document.createElement("div"),this.element.classList.add("tabulator-row"),this.element.classList.add("tabulator-group"),this.element.classList.add("tabulator-group-level-"+this.level),this.element.setAttribute("role","rowgroup"),this.arrowElement=document.createElement("div"),this.arrowElement.classList.add("tabulator-group-toggle"),this.arrowElement.appendChild(e),!1!==this.groupManager.table.options.movableRows&&this.groupManager.table.modExists("moveRow")&&this.groupManager.table.modules.moveRow.initializeGroupHeader(this)}createValueGroups(){var e=this.level+1;this.groupManager.allowedValues&&this.groupManager.allowedValues[e]&&this.groupManager.allowedValues[e].forEach((t=>{this._createGroup(t,e)}))}addBindings(){this.groupManager.table.options.groupToggleElement&&("arrow"==this.groupManager.table.options.groupToggleElement?this.arrowElement:this.element).addEventListener("click",(e=>{"arrow"===this.groupManager.table.options.groupToggleElement&&(e.stopPropagation(),e.stopImmediatePropagation()),setTimeout((()=>{this.toggleVisibility()}))}))}_createGroup(e,t){var i=t+"_"+e,s=new be(this.groupManager,this,t,e,this.groupManager.groupIDLookups[t].field,this.groupManager.headerGenerator[t]||this.groupManager.headerGenerator[0],!!this.old&&this.old.groups[i]);this.groups[i]=s,this.groupList.push(s)}_addRowToGroup(e){var t=this.level+1;if(this.hasSubGroups){var i=this.groupManager.groupIDLookups[t].func(e.getData()),s=t+"_"+i;this.groupManager.allowedValues&&this.groupManager.allowedValues[t]?this.groups[s]&&this.groups[s].addRow(e):(this.groups[s]||this._createGroup(i,t),this.groups[s].addRow(e))}}_addRow(e){this.rows.push(e),e.modules.group=this}insertRow(e,t,i){var s=this.conformRowData({});e.updateData(s);var o=this.rows.indexOf(t);o>-1?i?this.rows.splice(o+1,0,e):this.rows.splice(o,0,e):i?this.rows.push(e):this.rows.unshift(e),e.modules.group=this,this.groupManager.table.modExists("columnCalcs")&&"table"!=this.groupManager.table.options.columnCalcs&&this.groupManager.table.modules.columnCalcs.recalcGroup(this),this.groupManager.updateGroupRows(!0)}scrollHeader(e){this.arrowElement&&(this.arrowElement.style.marginLeft=e,this.groupList.forEach((function(t){t.scrollHeader(e)})))}getRowIndex(e){}conformRowData(e){return this.field?e[this.field]=this.key:console.warn("Data Conforming Error - Cannot conform row data to match new group as groupBy is a function"),this.parent&&(e=this.parent.conformRowData(e)),e}removeRow(e){var t=this.rows.indexOf(e),i=e.getElement();t>-1&&this.rows.splice(t,1),this.groupManager.table.options.groupValues||this.rows.length?(i.parentNode&&i.parentNode.removeChild(i),this.groupManager.blockRedraw||(this.generateGroupHeaderContents(),this.groupManager.table.modExists("columnCalcs")&&"table"!=this.groupManager.table.options.columnCalcs&&this.groupManager.table.modules.columnCalcs.recalcGroup(this))):(this.parent?this.parent.removeGroup(this):this.groupManager.removeGroup(this),this.groupManager.updateGroupRows(!0))}removeGroup(e){var t,i=e.level+"_"+e.key;this.groups[i]&&(delete this.groups[i],(t=this.groupList.indexOf(e))>-1&&this.groupList.splice(t,1),this.groupList.length||(this.parent?this.parent.removeGroup(this):this.groupManager.removeGroup(this)))}getHeadersAndRows(){var e=[];return e.push(this),this._visSet(),this.calcs.top&&(this.calcs.top.detachElement(),this.calcs.top.deleteCells()),this.calcs.bottom&&(this.calcs.bottom.detachElement(),this.calcs.bottom.deleteCells()),this.visible?this.groupList.length?this.groupList.forEach((function(t){e=e.concat(t.getHeadersAndRows())})):("table"!=this.groupManager.table.options.columnCalcs&&this.groupManager.table.modExists("columnCalcs")&&this.groupManager.table.modules.columnCalcs.hasTopCalcs()&&(this.calcs.top=this.groupManager.table.modules.columnCalcs.generateTopRow(this.rows),e.push(this.calcs.top)),e=e.concat(this.rows),"table"!=this.groupManager.table.options.columnCalcs&&this.groupManager.table.modExists("columnCalcs")&&this.groupManager.table.modules.columnCalcs.hasBottomCalcs()&&(this.calcs.bottom=this.groupManager.table.modules.columnCalcs.generateBottomRow(this.rows),e.push(this.calcs.bottom))):this.groupList.length||"table"==this.groupManager.table.options.columnCalcs||this.groupManager.table.modExists("columnCalcs")&&(this.groupManager.table.modules.columnCalcs.hasTopCalcs()&&this.groupManager.table.options.groupClosedShowCalcs&&(this.calcs.top=this.groupManager.table.modules.columnCalcs.generateTopRow(this.rows),e.push(this.calcs.top)),this.groupManager.table.modules.columnCalcs.hasBottomCalcs()&&this.groupManager.table.options.groupClosedShowCalcs&&(this.calcs.bottom=this.groupManager.table.modules.columnCalcs.generateBottomRow(this.rows),e.push(this.calcs.bottom))),e}getData(e,t){var i=[];return this._visSet(),(!e||e&&this.visible)&&this.rows.forEach((e=>{i.push(e.getData(t||"data"))})),i}getRowCount(){var e=0;return this.groupList.length?this.groupList.forEach((t=>{e+=t.getRowCount()})):e=this.rows.length,e}toggleVisibility(){this.visible?this.hide():this.show()}hide(){this.visible=!1,"basic"!=this.groupManager.table.rowManager.getRenderMode()||this.groupManager.table.options.pagination||(this.element.classList.remove("tabulator-group-visible"),this.groupList.length?this.groupList.forEach((e=>{e.getHeadersAndRows().forEach((e=>{e.detachElement()}))})):this.rows.forEach((e=>{var t=e.getElement();t.parentNode.removeChild(t)}))),this.groupManager.updateGroupRows(!0),this.groupManager.table.externalEvents.dispatch("groupVisibilityChanged",this.getComponent(),!1)}show(){if(this.visible=!0,"basic"!=this.groupManager.table.rowManager.getRenderMode()||this.groupManager.table.options.pagination)this.groupManager.updateGroupRows(!0);else{this.element.classList.add("tabulator-group-visible");var e=this.generateElement();this.groupList.length?this.groupList.forEach((t=>{t.getHeadersAndRows().forEach((t=>{var i=t.getElement();e.parentNode.insertBefore(i,e.nextSibling),t.initialize(),e=i}))})):this.rows.forEach((t=>{var i=t.getElement();e.parentNode.insertBefore(i,e.nextSibling),t.initialize(),e=i})),this.groupManager.updateGroupRows(!0)}this.groupManager.table.externalEvents.dispatch("groupVisibilityChanged",this.getComponent(),!0)}_visSet(){var e=[];"function"==typeof this.visible&&(this.rows.forEach((function(t){e.push(t.getData())})),this.visible=this.visible(this.key,this.getRowCount(),e,this.getComponent()))}getRowGroup(e){var t=!1;return this.groupList.length?this.groupList.forEach((function(i){var s=i.getRowGroup(e);s&&(t=s)})):this.rows.find((function(t){return t===e}))&&(t=this),t}getSubGroups(e){var t=[];return this.groupList.forEach((function(i){t.push(e?i.getComponent():i)})),t}getRows(e,t){var i=[];return t&&this.groupList.length?this.groupList.forEach((s=>{i=i.concat(s.getRows(e,t))})):this.rows.forEach((function(t){i.push(e?t.getComponent():t)})),i}generateGroupHeaderContents(){var e=[];for(this.getRows(!1,!0).forEach((function(t){e.push(t.getData())})),this.elementContents=this.generator(this.key,this.getRowCount(),e,this.getComponent());this.element.firstChild;)this.element.removeChild(this.element.firstChild);"string"==typeof this.elementContents?this.element.innerHTML=this.elementContents:this.element.appendChild(this.elementContents),this.element.insertBefore(this.arrowElement,this.element.firstChild)}getPath(e=[]){return e.unshift(this.key),this.parent&&this.parent.getPath(e),e}getElement(){return this.elementContents?this.element:this.generateElement()}generateElement(){this.addBindings=!1,this._visSet(),this.visible?this.element.classList.add("tabulator-group-visible"):this.element.classList.remove("tabulator-group-visible");for(var e=0;e0;this.table.rowManager.moveRowActual(e.component,this.table.rowManager.getRowFromPosition(e.data.posFrom),t),this.table.rowManager.regenerateRowPositions(),this.table.rowManager.reRenderInPosition()}},ve={cellEdit:function(e){e.component.setValueProcessData(e.data.newValue),e.component.cellRendered()},rowAdd:function(e){var t=this.table.rowManager.addRowActual(e.data.data,e.data.pos,e.data.index);this.table.options.groupBy&&this.table.modExists("groupRows")&&this.table.modules.groupRows.updateGroupRows(!0),this._rebindRow(e.component,t),this.table.rowManager.checkPlaceholder()},rowDelete:function(e){e.component.deleteActual(),this.table.rowManager.checkPlaceholder()},rowMove:function(e){this.table.rowManager.moveRowActual(e.component,this.table.rowManager.getRowFromPosition(e.data.posTo),e.data.after),this.table.rowManager.regenerateRowPositions(),this.table.rowManager.reRenderInPosition()}},we={keybindings:{bindings:{undo:["ctrl + 90","meta + 90"],redo:["ctrl + 89","meta + 89"]},actions:{undo:function(e){this.table.options.history&&this.table.modExists("history")&&this.table.modExists("edit")&&(this.table.modules.edit.currentCell||(e.preventDefault(),this.table.modules.history.undo()))},redo:function(e){this.table.options.history&&this.table.modExists("history")&&this.table.modExists("edit")&&(this.table.modules.edit.currentCell||(e.preventDefault(),this.table.modules.history.redo()))}}}};class Ce extends M{static moduleName="history";static moduleExtensions=we;static undoers=fe;static redoers=ve;constructor(e){super(e),this.history=[],this.index=-1,this.registerTableOption("history",!1)}initialize(){this.table.options.history&&(this.subscribe("cell-value-updated",this.cellUpdated.bind(this)),this.subscribe("cell-delete",this.clearComponentHistory.bind(this)),this.subscribe("row-delete",this.rowDeleted.bind(this)),this.subscribe("rows-wipe",this.clear.bind(this)),this.subscribe("row-added",this.rowAdded.bind(this)),this.subscribe("row-move",this.rowMoved.bind(this))),this.registerTableFunction("undo",this.undo.bind(this)),this.registerTableFunction("redo",this.redo.bind(this)),this.registerTableFunction("getHistoryUndoSize",this.getHistoryUndoSize.bind(this)),this.registerTableFunction("getHistoryRedoSize",this.getHistoryRedoSize.bind(this)),this.registerTableFunction("clearHistory",this.clear.bind(this))}rowMoved(e,t,i){this.action("rowMove",e,{posFrom:e.getPosition(),posTo:t.getPosition(),to:t,after:i})}rowAdded(e,t,i,s){this.action("rowAdd",e,{data:t,pos:i,index:s})}rowDeleted(e){var t,i;this.table.options.groupBy?(t=(i=e.getComponent().getGroup()._getSelf().rows).indexOf(e))&&(t=i[t-1]):(t=e.table.rowManager.getRowIndex(e))&&(t=e.table.rowManager.rows[t-1]),this.action("rowDelete",e,{data:e.getData(),pos:!t,index:t})}cellUpdated(e){this.action("cellEdit",e,{oldValue:e.oldValue,newValue:e.value})}clear(){this.history=[],this.index=-1}action(e,t,i){this.history=this.history.slice(0,this.index+1),this.history.push({type:e,component:t,data:i}),this.index++}getHistoryUndoSize(){return this.index+1}getHistoryRedoSize(){return this.history.length-(this.index+1)}clearComponentHistory(e){var t=this.history.findIndex((function(t){return t.component===e}));t>-1&&(this.history.splice(t,1),t<=this.index&&this.index--,this.clearComponentHistory(e))}undo(){if(this.index>-1){let e=this.history[this.index];return Ce.undoers[e.type].call(this,e),this.index--,this.dispatchExternal("historyUndo",e.type,e.component.getComponent(),e.data),!0}return console.warn(this.options("history")?"History Undo Error - No more history to undo":"History module not enabled"),!1}redo(){if(this.history.length-1>this.index){this.index++;let e=this.history[this.index];return Ce.redoers[e.type].call(this,e),this.dispatchExternal("historyRedo",e.type,e.component.getComponent(),e.data),!0}return console.warn(this.options("history")?"History Redo Error - No more history to redo":"History module not enabled"),!1}_rebindRow(e,t){this.history.forEach((function(i){if(i.component instanceof p)i.component===e&&(i.component=t);else if(i.component instanceof n&&i.component.row===e){var s=i.component.column.getField();s&&(i.component=t.getCell(s))}}))}}var Ee={csv:function(e){var t=[],i=0,s=0,o=!1;for(let n=0;n(console.error("Import Error:",e||"Unable to import data"),Promise.reject(e))))}lookupImporter(e){var t;return e||(e=this.table.options.importFormat),(t="string"==typeof e?ye.importers[e]:e)||console.error("Import Error - Importer not found:",e),t}importFromFile(e,t,i){var s=this.lookupImporter(e);if(s)return this.pickFile(t,i).then(this.importData.bind(this,s)).then(this.structureData.bind(this)).then(this.setData.bind(this)).catch((e=>(this.dispatch("import-error",e),this.dispatchExternal("importError",e),console.error("Import Error:",e||"Unable to import file"),Promise.reject(e))))}pickFile(e,t){return new Promise(((i,s)=>{var o=document.createElement("input");o.type="file",o.accept=e,o.addEventListener("change",(e=>{var n=o.files[0],r=new FileReader;switch(this.dispatch("import-importing",o.files),this.dispatchExternal("importImporting",o.files),t||this.table.options.importReader){case"buffer":r.readAsArrayBuffer(n);break;case"binary":r.readAsBinaryString(n);break;case"url":r.readAsDataURL(n);break;default:r.readAsText(n)}r.onload=e=>{i(r.result)},r.onerror=e=>{console.warn("File Load Error - Unable to read file"),s()}})),this.dispatch("import-choose"),this.dispatchExternal("importChoose"),o.click()}))}importData(e,t){var i=e.call(this.table,t);return i instanceof Promise?i:i?Promise.resolve(i):Promise.reject()}structureData(e){return Array.isArray(e)&&e.length&&Array.isArray(e[0])?this.table.options.autoColumns?this.structureArrayToObject(e):this.structureArrayToColumns(e):e}structureArrayToObject(e){var t=e.shift();return e.map((e=>{var i={};return t.forEach(((t,s)=>{i[t]=e[s]})),i}))}structureArrayToColumns(e){var t=[],i=this.table.getColumns();return i[0]&&e[0][0]&&i[0].getDefinition().title===e[0][0]&&e.shift(),e.forEach((e=>{var s={};e.forEach(((e,t)=>{var o=i[t];o&&(s[o.getField()]=e)})),t.push(s)})),t}setData(e){return this.dispatch("import-imported",e),this.dispatchExternal("importImported",e),this.table.setData(e)}}var Re={navPrev:"shift + 9",navNext:9,navUp:38,navDown:40,navLeft:37,navRight:39,scrollPageUp:33,scrollPageDown:34,scrollToStart:36,scrollToEnd:35},xe={keyBlock:function(e){e.stopPropagation(),e.preventDefault()},scrollPageUp:function(e){var t=this.table.rowManager,i=t.scrollTop-t.element.clientHeight;e.preventDefault(),t.displayRowsCount&&(i>=0?t.element.scrollTop=i:t.scrollToRow(t.getDisplayRows()[0])),this.table.element.focus()},scrollPageDown:function(e){var t=this.table.rowManager,i=t.scrollTop+t.element.clientHeight,s=t.element.scrollHeight;e.preventDefault(),t.displayRowsCount&&(i<=s?t.element.scrollTop=i:t.scrollToRow(t.getDisplayRows()[t.displayRowsCount-1])),this.table.element.focus()},scrollToStart:function(e){var t=this.table.rowManager;e.preventDefault(),t.displayRowsCount&&t.scrollToRow(t.getDisplayRows()[0]),this.table.element.focus()},scrollToEnd:function(e){var t=this.table.rowManager;e.preventDefault(),t.displayRowsCount&&t.scrollToRow(t.getDisplayRows()[t.displayRowsCount-1]),this.table.element.focus()},navPrev:function(e){this.dispatch("keybinding-nav-prev",e)},navNext:function(e){this.dispatch("keybinding-nav-next",e)},navLeft:function(e){this.dispatch("keybinding-nav-left",e)},navRight:function(e){this.dispatch("keybinding-nav-right",e)},navUp:function(e){this.dispatch("keybinding-nav-up",e)},navDown:function(e){this.dispatch("keybinding-nav-down",e)}};class Te extends M{static moduleName="keybindings";static bindings=Re;static actions=xe;constructor(e){super(e),this.watchKeys=null,this.pressedKeys=null,this.keyupBinding=!1,this.keydownBinding=!1,this.registerTableOption("keybindings",{}),this.registerTableOption("tabEndNewRow",!1)}initialize(){var e=this.table.options.keybindings,t={};this.watchKeys={},this.pressedKeys=[],!1!==e&&(Object.assign(t,Te.bindings),Object.assign(t,e),this.mapBindings(t),this.bindEvents()),this.subscribe("table-destroy",this.clearBindings.bind(this))}mapBindings(e){for(let t in e)Te.actions[t]?e[t]&&("object"!=typeof e[t]&&(e[t]=[e[t]]),e[t].forEach((e=>{(Array.isArray(e)?e:[e]).forEach((e=>{this.mapBinding(t,e)}))}))):console.warn("Key Binding Error - no such action:",t)}mapBinding(e,t){var i={action:Te.actions[e],keys:[],ctrl:!1,shift:!1,meta:!1};t.toString().toLowerCase().split(" ").join("").split("+").forEach((e=>{switch(e){case"ctrl":i.ctrl=!0;break;case"shift":i.shift=!0;break;case"meta":i.meta=!0;break;default:e=isNaN(e)?e.toUpperCase().charCodeAt(0):parseInt(e),i.keys.push(e),this.watchKeys[e]||(this.watchKeys[e]=[]),this.watchKeys[e].push(i)}}))}bindEvents(){var e=this;this.keyupBinding=function(t){var i=t.keyCode,s=e.watchKeys[i];s&&(e.pressedKeys.push(i),s.forEach((function(i){e.checkBinding(t,i)})))},this.keydownBinding=function(t){var i=t.keyCode;if(e.watchKeys[i]){var s=e.pressedKeys.indexOf(i);s>-1&&e.pressedKeys.splice(s,1)}},this.table.element.addEventListener("keydown",this.keyupBinding),this.table.element.addEventListener("keyup",this.keydownBinding)}clearBindings(){this.keyupBinding&&this.table.element.removeEventListener("keydown",this.keyupBinding),this.keydownBinding&&this.table.element.removeEventListener("keyup",this.keydownBinding)}checkBinding(e,t){var i=!0;return e.ctrlKey==t.ctrl&&e.shiftKey==t.shift&&e.metaKey==t.meta&&(t.keys.forEach((e=>{-1==this.pressedKeys.indexOf(e)&&(i=!1)})),i&&t.action.call(this,e),!0)}}var Me={delete:function(e,t,i){e.delete()}},ke={insert:function(e,t,i){return this.table.addRow(e.getData(),void 0,t),!0},add:function(e,t,i){return this.table.addRow(e.getData()),!0},update:function(e,t,i){return!!t&&(t.update(e.getData()),!0)},replace:function(e,t,i){return!!t&&(this.table.addRow(e.getData(),void 0,t),t.delete(),!0)}};class Le extends M{static moduleName="moveRow";static senders=Me;static receivers=ke;constructor(e){super(e),this.placeholderElement=this.createPlaceholderElement(),this.hoverElement=!1,this.checkTimeout=!1,this.checkPeriod=150,this.moving=!1,this.toRow=!1,this.toRowAfter=!1,this.hasHandle=!1,this.startY=0,this.startX=0,this.moveHover=this.moveHover.bind(this),this.endMove=this.endMove.bind(this),this.tableRowDropEvent=!1,this.touchMove=!1,this.connection=!1,this.connectionSelectorsTables=!1,this.connectionSelectorsElements=!1,this.connectionElements=[],this.connections=[],this.connectedTable=!1,this.connectedRow=!1,this.registerTableOption("movableRows",!1),this.registerTableOption("movableRowsConnectedTables",!1),this.registerTableOption("movableRowsConnectedElements",!1),this.registerTableOption("movableRowsSender",!1),this.registerTableOption("movableRowsReceiver","insert"),this.registerColumnOption("rowHandle")}createPlaceholderElement(){var e=document.createElement("div");return e.classList.add("tabulator-row"),e.classList.add("tabulator-row-placeholder"),e}initialize(){this.table.options.movableRows&&(this.connectionSelectorsTables=this.table.options.movableRowsConnectedTables,this.connectionSelectorsElements=this.table.options.movableRowsConnectedElements,this.connection=this.connectionSelectorsTables||this.connectionSelectorsElements,this.subscribe("cell-init",this.initializeCell.bind(this)),this.subscribe("column-init",this.initializeColumn.bind(this)),this.subscribe("row-init",this.initializeRow.bind(this)))}initializeGroupHeader(e){var t=this,i={};i.mouseup=function(i){t.tableRowDrop(i,e)}.bind(t),i.mousemove=function(i){var s;i.pageY-a.elOffset(e.element).top+t.table.rowManager.element.scrollTop>e.getHeight()/2?t.toRow===e&&t.toRowAfter||((s=e.getElement()).parentNode.insertBefore(t.placeholderElement,s.nextSibling),t.moveRow(e,!0)):(t.toRow!==e||t.toRowAfter)&&(s=e.getElement()).previousSibling&&(s.parentNode.insertBefore(t.placeholderElement,s),t.moveRow(e,!1))}.bind(t),e.modules.moveRow=i}initializeRow(e){var t,i=this,s={};s.mouseup=function(t){i.tableRowDrop(t,e)}.bind(i),s.mousemove=function(t){var s=e.getElement();t.pageY-a.elOffset(s).top+i.table.rowManager.element.scrollTop>e.getHeight()/2?i.toRow===e&&i.toRowAfter||(s.parentNode.insertBefore(i.placeholderElement,s.nextSibling),i.moveRow(e,!0)):(i.toRow!==e||i.toRowAfter)&&(s.parentNode.insertBefore(i.placeholderElement,s),i.moveRow(e,!1))}.bind(i),this.hasHandle||((t=e.getElement()).addEventListener("mousedown",(function(t){1===t.which&&(i.checkTimeout=setTimeout((function(){i.startMove(t,e)}),i.checkPeriod))})),t.addEventListener("mouseup",(function(e){1===e.which&&i.checkTimeout&&clearTimeout(i.checkTimeout)})),this.bindTouchEvents(e,e.getElement())),e.modules.moveRow=s}initializeColumn(e){e.definition.rowHandle&&!1!==this.table.options.movableRows&&(this.hasHandle=!0)}initializeCell(e){if(e.column.definition.rowHandle&&!1!==this.table.options.movableRows){var t=this,i=e.getElement(!0);i.addEventListener("mousedown",(function(i){1===i.which&&(t.checkTimeout=setTimeout((function(){t.startMove(i,e.row)}),t.checkPeriod))})),i.addEventListener("mouseup",(function(e){1===e.which&&t.checkTimeout&&clearTimeout(t.checkTimeout)})),this.bindTouchEvents(e.row,i)}}bindTouchEvents(e,t){var i,s,o,n,r,a,l=!1;t.addEventListener("touchstart",(t=>{this.checkTimeout=setTimeout((()=>{this.touchMove=!0,i=e.nextRow(),o=i?i.getHeight()/2:0,s=e.prevRow(),n=s?s.getHeight()/2:0,r=0,a=0,l=!1,this.startMove(t,e)}),this.checkPeriod)}),{passive:!0}),this.moving,this.toRow,this.toRowAfter,t.addEventListener("touchmove",(t=>{var h,d;this.moving&&(t.preventDefault(),this.moveHover(t),l||(l=t.touches[0].pageY),(h=t.touches[0].pageY-l)>0?i&&h-r>o&&(d=i)!==e&&(l=t.touches[0].pageY,d.getElement().parentNode.insertBefore(this.placeholderElement,d.getElement().nextSibling),this.moveRow(d,!0)):s&&-h-a>n&&(d=s)!==e&&(l=t.touches[0].pageY,d.getElement().parentNode.insertBefore(this.placeholderElement,d.getElement()),this.moveRow(d,!1)),d&&(i=d.nextRow(),r=o,o=i?i.getHeight()/2:0,s=d.prevRow(),a=n,n=s?s.getHeight()/2:0))})),t.addEventListener("touchend",(e=>{this.checkTimeout&&clearTimeout(this.checkTimeout),this.moving&&(this.endMove(e),this.touchMove=!1)}))}_bindMouseMove(){this.table.rowManager.getDisplayRows().forEach((e=>{("row"===e.type||"group"===e.type)&&e.modules.moveRow&&e.modules.moveRow.mousemove&&e.getElement().addEventListener("mousemove",e.modules.moveRow.mousemove)}))}_unbindMouseMove(){this.table.rowManager.getDisplayRows().forEach((e=>{("row"===e.type||"group"===e.type)&&e.modules.moveRow&&e.modules.moveRow.mousemove&&e.getElement().removeEventListener("mousemove",e.modules.moveRow.mousemove)}))}startMove(e,t){var i=t.getElement();this.setStartPosition(e,t),this.moving=t,this.table.element.classList.add("tabulator-block-select"),this.placeholderElement.style.width=t.getWidth()+"px",this.placeholderElement.style.height=t.getHeight()+"px",this.connection?(this.table.element.classList.add("tabulator-movingrow-sending"),this.connectToTables(t)):(i.parentNode.insertBefore(this.placeholderElement,i),i.parentNode.removeChild(i)),this.hoverElement=i.cloneNode(!0),this.hoverElement.classList.add("tabulator-moving"),this.connection?(document.body.appendChild(this.hoverElement),this.hoverElement.style.left="0",this.hoverElement.style.top="0",this.hoverElement.style.width=this.table.element.clientWidth+"px",this.hoverElement.style.whiteSpace="nowrap",this.hoverElement.style.overflow="hidden",this.hoverElement.style.pointerEvents="none"):(this.table.rowManager.getTableElement().appendChild(this.hoverElement),this.hoverElement.style.left="0",this.hoverElement.style.top="0",this._bindMouseMove()),document.body.addEventListener("mousemove",this.moveHover),document.body.addEventListener("mouseup",this.endMove),this.dispatchExternal("rowMoving",t.getComponent()),this.moveHover(e)}setStartPosition(e,t){var i,s,o=this.touchMove?e.touches[0].pageX:e.pageX,n=this.touchMove?e.touches[0].pageY:e.pageY;i=t.getElement(),this.connection?(s=i.getBoundingClientRect(),this.startX=s.left-o+window.pageXOffset,this.startY=s.top-n+window.pageYOffset):this.startY=n-i.getBoundingClientRect().top}endMove(e){e&&1!==e.which&&!this.touchMove||(this._unbindMouseMove(),this.connection||(this.placeholderElement.parentNode.insertBefore(this.moving.getElement(),this.placeholderElement.nextSibling),this.placeholderElement.parentNode.removeChild(this.placeholderElement)),this.hoverElement.parentNode.removeChild(this.hoverElement),this.table.element.classList.remove("tabulator-block-select"),this.toRow?this.table.rowManager.moveRow(this.moving,this.toRow,this.toRowAfter):this.dispatchExternal("rowMoveCancelled",this.moving.getComponent()),this.moving=!1,this.toRow=!1,this.toRowAfter=!1,document.body.removeEventListener("mousemove",this.moveHover),document.body.removeEventListener("mouseup",this.endMove),this.connection&&(this.table.element.classList.remove("tabulator-movingrow-sending"),this.disconnectFromTables()))}moveRow(e,t){this.toRow=e,this.toRowAfter=t}moveHover(e){this.connection?this.moveHoverConnections.call(this,e):this.moveHoverTable.call(this,e)}moveHoverTable(e){var t=this.table.rowManager.getElement(),i=t.scrollTop,s=(this.touchMove?e.touches[0].pageY:e.pageY)-t.getBoundingClientRect().top+i;this.hoverElement.style.top=Math.min(s-this.startY,this.table.rowManager.element.scrollHeight-this.hoverElement.offsetHeight)+"px"}moveHoverConnections(e){this.hoverElement.style.left=this.startX+(this.touchMove?e.touches[0].pageX:e.pageX)+"px",this.hoverElement.style.top=this.startY+(this.touchMove?e.touches[0].pageY:e.pageY)+"px"}elementRowDrop(e,t,i){this.dispatchExternal("movableRowsElementDrop",e,t,!!i&&i.getComponent())}connectToTables(e){var t;this.connectionSelectorsTables&&(t=this.commsConnections(this.connectionSelectorsTables),this.dispatchExternal("movableRowsSendingStart",t),this.commsSend(this.connectionSelectorsTables,"moveRow","connect",{row:e})),this.connectionSelectorsElements&&(this.connectionElements=[],Array.isArray(this.connectionSelectorsElements)||(this.connectionSelectorsElements=[this.connectionSelectorsElements]),this.connectionSelectorsElements.forEach((e=>{"string"==typeof e?this.connectionElements=this.connectionElements.concat(Array.prototype.slice.call(document.querySelectorAll(e))):this.connectionElements.push(e)})),this.connectionElements.forEach((e=>{var t=t=>{this.elementRowDrop(t,e,this.moving)};e.addEventListener("mouseup",t),e.tabulatorElementDropEvent=t,e.classList.add("tabulator-movingrow-receiving")})))}disconnectFromTables(){var e;this.connectionSelectorsTables&&(e=this.commsConnections(this.connectionSelectorsTables),this.dispatchExternal("movableRowsSendingStop",e),this.commsSend(this.connectionSelectorsTables,"moveRow","disconnect")),this.connectionElements.forEach((e=>{e.classList.remove("tabulator-movingrow-receiving"),e.removeEventListener("mouseup",e.tabulatorElementDropEvent),delete e.tabulatorElementDropEvent}))}connect(e,t){return this.connectedTable?(console.warn("Move Row Error - Table cannot accept connection, already connected to table:",this.connectedTable),!1):(this.connectedTable=e,this.connectedRow=t,this.table.element.classList.add("tabulator-movingrow-receiving"),this.table.rowManager.getDisplayRows().forEach((e=>{"row"===e.type&&e.modules.moveRow&&e.modules.moveRow.mouseup&&e.getElement().addEventListener("mouseup",e.modules.moveRow.mouseup)})),this.tableRowDropEvent=this.tableRowDrop.bind(this),this.table.element.addEventListener("mouseup",this.tableRowDropEvent),this.dispatchExternal("movableRowsReceivingStart",t,e),!0)}disconnect(e){e===this.connectedTable?(this.connectedTable=!1,this.connectedRow=!1,this.table.element.classList.remove("tabulator-movingrow-receiving"),this.table.rowManager.getDisplayRows().forEach((e=>{"row"===e.type&&e.modules.moveRow&&e.modules.moveRow.mouseup&&e.getElement().removeEventListener("mouseup",e.modules.moveRow.mouseup)})),this.table.element.removeEventListener("mouseup",this.tableRowDropEvent),this.dispatchExternal("movableRowsReceivingStop",e)):console.warn("Move Row Error - trying to disconnect from non connected table")}dropComplete(e,t,i){var s=!1;if(i){switch(typeof this.table.options.movableRowsSender){case"string":s=Le.senders[this.table.options.movableRowsSender];break;case"function":s=this.table.options.movableRowsSender}s?s.call(this,this.moving?this.moving.getComponent():void 0,t?t.getComponent():void 0,e):this.table.options.movableRowsSender&&console.warn("Mover Row Error - no matching sender found:",this.table.options.movableRowsSender),this.dispatchExternal("movableRowsSent",this.moving.getComponent(),t?t.getComponent():void 0,e)}else this.dispatchExternal("movableRowsSentFailed",this.moving.getComponent(),t?t.getComponent():void 0,e);this.endMove()}tableRowDrop(e,t){var i=!1,s=!1;switch(e.stopImmediatePropagation(),typeof this.table.options.movableRowsReceiver){case"string":i=Le.receivers[this.table.options.movableRowsReceiver];break;case"function":i=this.table.options.movableRowsReceiver}i?s=i.call(this,this.connectedRow.getComponent(),t?t.getComponent():void 0,this.connectedTable):console.warn("Mover Row Error - no matching receiver found:",this.table.options.movableRowsReceiver),s?this.dispatchExternal("movableRowsReceived",this.connectedRow.getComponent(),t?t.getComponent():void 0,this.connectedTable):this.dispatchExternal("movableRowsReceivedFailed",this.connectedRow.getComponent(),t?t.getComponent():void 0,this.connectedTable),this.commsSend(this.connectedTable,"moveRow","dropcomplete",{row:t,success:s})}commsReceived(e,t,i){switch(t){case"connect":return this.connect(e,i.row);case"disconnect":return this.disconnect(e);case"dropcomplete":return this.dropComplete(e,i.row,i.success)}}}var Se={};class De extends M{static moduleName="mutator";static mutators=Se;constructor(e){super(e),this.allowedTypes=["","data","edit","clipboard"],this.enabled=!0,this.registerColumnOption("mutator"),this.registerColumnOption("mutatorParams"),this.registerColumnOption("mutatorData"),this.registerColumnOption("mutatorDataParams"),this.registerColumnOption("mutatorEdit"),this.registerColumnOption("mutatorEditParams"),this.registerColumnOption("mutatorClipboard"),this.registerColumnOption("mutatorClipboardParams"),this.registerColumnOption("mutateLink")}initialize(){this.subscribe("cell-value-changing",this.transformCell.bind(this)),this.subscribe("cell-value-changed",this.mutateLink.bind(this)),this.subscribe("column-layout",this.initializeColumn.bind(this)),this.subscribe("row-data-init-before",this.rowDataChanged.bind(this)),this.subscribe("row-data-changing",this.rowDataChanged.bind(this))}rowDataChanged(e,t,i){return this.transformRow(t,"data",i)}initializeColumn(e){var t=!1,i={};this.allowedTypes.forEach((s=>{var o,n="mutator"+(s.charAt(0).toUpperCase()+s.slice(1));e.definition[n]&&(o=this.lookupMutator(e.definition[n]))&&(t=!0,i[n]={mutator:o,params:e.definition[n+"Params"]||{}})})),t&&(e.modules.mutate=i)}lookupMutator(e){var t=!1;switch(typeof e){case"string":De.mutators[e]?t=De.mutators[e]:console.warn("Mutator Error - No such mutator found, ignoring: ",e);break;case"function":t=e}return t}transformRow(e,t,i){var s,o="mutator"+(t.charAt(0).toUpperCase()+t.slice(1));return this.enabled&&this.table.columnManager.traverse((n=>{var r,a,l;n.modules.mutate&&(r=n.modules.mutate[o]||n.modules.mutate.mutator||!1)&&(s=n.getFieldValue(void 0!==i?i:e),("data"==t&&!i||void 0!==s)&&(l=n.getComponent(),a="function"==typeof r.params?r.params(s,e,t,l):r.params,n.setFieldValue(e,r.mutator(s,e,t,a,l))))})),e}transformCell(e,t){if(e.column.modules.mutate){var i=e.column.modules.mutate.mutatorEdit||e.column.modules.mutate.mutator||!1,s={};if(i)return s=Object.assign(s,e.row.getData()),e.column.setFieldValue(s,t),i.mutator(t,s,"edit",i.params,e.getComponent())}return t}mutateLink(e){var t=e.column.definition.mutateLink;t&&(Array.isArray(t)||(t=[t]),t.forEach((t=>{var i=e.row.getCell(t);i&&i.setValue(i.getValue(),!0,!0)})))}enable(){this.enabled=!0}disable(){this.enabled=!1}}var ze={rows:function(e,t,i,s,o){var n=document.createElement("span"),r=document.createElement("span"),a=document.createElement("span"),l=document.createElement("span"),h=document.createElement("span"),d=document.createElement("span");return this.table.modules.localize.langBind("pagination|counter|showing",(e=>{r.innerHTML=e})),this.table.modules.localize.langBind("pagination|counter|of",(e=>{l.innerHTML=e})),this.table.modules.localize.langBind("pagination|counter|rows",(e=>{d.innerHTML=e})),s?(a.innerHTML=" "+t+"-"+Math.min(t+e-1,s)+" ",h.innerHTML=" "+s+" ",n.appendChild(r),n.appendChild(a),n.appendChild(l),n.appendChild(h),n.appendChild(d)):(a.innerHTML=" 0 ",n.appendChild(r),n.appendChild(a),n.appendChild(d)),n},pages:function(e,t,i,s,o){var n=document.createElement("span"),r=document.createElement("span"),a=document.createElement("span"),l=document.createElement("span"),h=document.createElement("span"),d=document.createElement("span");return this.table.modules.localize.langBind("pagination|counter|showing",(e=>{r.innerHTML=e})),a.innerHTML=" "+i+" ",this.table.modules.localize.langBind("pagination|counter|of",(e=>{l.innerHTML=e})),h.innerHTML=" "+o+" ",this.table.modules.localize.langBind("pagination|counter|pages",(e=>{d.innerHTML=e})),n.appendChild(r),n.appendChild(a),n.appendChild(l),n.appendChild(h),n.appendChild(d),n}};class Pe extends M{static moduleName="page";static pageCounters=ze;constructor(e){super(e),this.mode="local",this.progressiveLoad=!1,this.element=null,this.pageCounterElement=null,this.pageCounter=null,this.size=0,this.page=1,this.count=5,this.max=1,this.remoteRowCountEstimate=null,this.initialLoad=!0,this.dataChanging=!1,this.pageSizes=[],this.registerTableOption("pagination",!1),this.registerTableOption("paginationMode","local"),this.registerTableOption("paginationSize",!1),this.registerTableOption("paginationInitialPage",1),this.registerTableOption("paginationCounter",!1),this.registerTableOption("paginationCounterElement",!1),this.registerTableOption("paginationButtonCount",5),this.registerTableOption("paginationSizeSelector",!1),this.registerTableOption("paginationElement",!1),this.registerTableOption("paginationAddRow","page"),this.registerTableOption("progressiveLoad",!1),this.registerTableOption("progressiveLoadDelay",0),this.registerTableOption("progressiveLoadScrollMargin",0),this.registerTableFunction("setMaxPage",this.setMaxPage.bind(this)),this.registerTableFunction("setPage",this.setPage.bind(this)),this.registerTableFunction("setPageToRow",this.userSetPageToRow.bind(this)),this.registerTableFunction("setPageSize",this.userSetPageSize.bind(this)),this.registerTableFunction("getPageSize",this.getPageSize.bind(this)),this.registerTableFunction("previousPage",this.previousPage.bind(this)),this.registerTableFunction("nextPage",this.nextPage.bind(this)),this.registerTableFunction("getPage",this.getPage.bind(this)),this.registerTableFunction("getPageMax",this.getPageMax.bind(this)),this.registerComponentFunction("row","pageTo",this.setPageToRow.bind(this))}initialize(){this.table.options.pagination?(this.subscribe("row-deleted",this.rowsUpdated.bind(this)),this.subscribe("row-added",this.rowsUpdated.bind(this)),this.subscribe("data-processed",this.initialLoadComplete.bind(this)),this.subscribe("table-built",this.calculatePageSizes.bind(this)),this.subscribe("footer-redraw",this.footerRedraw.bind(this)),"page"==this.table.options.paginationAddRow&&this.subscribe("row-adding-position",this.rowAddingPosition.bind(this)),"remote"===this.table.options.paginationMode&&(this.subscribe("data-params",this.remotePageParams.bind(this)),this.subscribe("data-loaded",this._parseRemoteData.bind(this))),this.table.options.progressiveLoad&&console.error("Progressive Load Error - Pagination and progressive load cannot be used at the same time"),this.registerDisplayHandler(this.restOnRenderBefore.bind(this),40),this.registerDisplayHandler(this.getRows.bind(this),50),this.createElements(),this.initializePageCounter(),this.initializePaginator()):this.table.options.progressiveLoad&&(this.subscribe("data-params",this.remotePageParams.bind(this)),this.subscribe("data-loaded",this._parseRemoteData.bind(this)),this.subscribe("table-built",this.calculatePageSizes.bind(this)),this.subscribe("data-processed",this.initialLoadComplete.bind(this)),this.initializeProgressive(this.table.options.progressiveLoad),"scroll"===this.table.options.progressiveLoad&&this.subscribe("scroll-vertical",this.scrollVertical.bind(this)))}rowAddingPosition(e,t){var i,s=this.table.rowManager,o=s.getDisplayRows();return t?o.length?i=o[0]:s.activeRows.length&&(i=s.activeRows[s.activeRows.length-1],t=!1):o.length&&(i=o[o.length-1],t=!(o.length{}))}restOnRenderBefore(e,t){return t||"local"===this.mode&&this.reset(),e}rowsUpdated(){this.refreshData(!0,"all")}createElements(){var e;this.element=document.createElement("span"),this.element.classList.add("tabulator-paginator"),this.pagesElement=document.createElement("span"),this.pagesElement.classList.add("tabulator-pages"),(e=document.createElement("button")).classList.add("tabulator-page"),e.setAttribute("type","button"),e.setAttribute("role","button"),e.setAttribute("aria-label",""),e.setAttribute("title",""),this.firstBut=e.cloneNode(!0),this.firstBut.setAttribute("data-page","first"),this.prevBut=e.cloneNode(!0),this.prevBut.setAttribute("data-page","prev"),this.nextBut=e.cloneNode(!0),this.nextBut.setAttribute("data-page","next"),this.lastBut=e.cloneNode(!0),this.lastBut.setAttribute("data-page","last"),this.table.options.paginationSizeSelector&&(this.pageSizeSelect=document.createElement("select"),this.pageSizeSelect.classList.add("tabulator-page-size"))}generatePageSizeSelectList(){var e=[];if(this.pageSizeSelect){if(Array.isArray(this.table.options.paginationSizeSelector))e=this.table.options.paginationSizeSelector,this.pageSizes=e,-1==this.pageSizes.indexOf(this.size)&&e.unshift(this.size);else if(-1==this.pageSizes.indexOf(this.size)){e=[];for(let t=1;t<5;t++)e.push(this.size*t);this.pageSizes=e}else e=this.pageSizes;for(;this.pageSizeSelect.firstChild;)this.pageSizeSelect.removeChild(this.pageSizeSelect.firstChild);e.forEach((e=>{var t=document.createElement("option");t.value=e,!0===e?this.langBind("pagination|all",(function(e){t.innerHTML=e})):t.innerHTML=e,this.pageSizeSelect.appendChild(t)})),this.pageSizeSelect.value=this.size}}initializePageCounter(){var e=this.table.options.paginationCounter,t=null;e&&((t="function"==typeof e?e:Pe.pageCounters[e])?(this.pageCounter=t,this.pageCounterElement=document.createElement("span"),this.pageCounterElement.classList.add("tabulator-page-counter")):console.warn("Pagination Error - No such page counter found: ",e))}initializePaginator(e){var t,i;e||(this.langBind("pagination|first",(e=>{this.firstBut.innerHTML=e})),this.langBind("pagination|first_title",(e=>{this.firstBut.setAttribute("aria-label",e),this.firstBut.setAttribute("title",e)})),this.langBind("pagination|prev",(e=>{this.prevBut.innerHTML=e})),this.langBind("pagination|prev_title",(e=>{this.prevBut.setAttribute("aria-label",e),this.prevBut.setAttribute("title",e)})),this.langBind("pagination|next",(e=>{this.nextBut.innerHTML=e})),this.langBind("pagination|next_title",(e=>{this.nextBut.setAttribute("aria-label",e),this.nextBut.setAttribute("title",e)})),this.langBind("pagination|last",(e=>{this.lastBut.innerHTML=e})),this.langBind("pagination|last_title",(e=>{this.lastBut.setAttribute("aria-label",e),this.lastBut.setAttribute("title",e)})),this.firstBut.addEventListener("click",(()=>{this.setPage(1)})),this.prevBut.addEventListener("click",(()=>{this.previousPage()})),this.nextBut.addEventListener("click",(()=>{this.nextPage()})),this.lastBut.addEventListener("click",(()=>{this.setPage(this.max)})),this.table.options.paginationElement&&(this.element=this.table.options.paginationElement),this.pageSizeSelect&&(t=document.createElement("label"),this.langBind("pagination|page_size",(e=>{this.pageSizeSelect.setAttribute("aria-label",e),this.pageSizeSelect.setAttribute("title",e),t.innerHTML=e})),this.element.appendChild(t),this.element.appendChild(this.pageSizeSelect),this.pageSizeSelect.addEventListener("change",(e=>{this.setPageSize("true"==this.pageSizeSelect.value||this.pageSizeSelect.value),this.setPage(1)}))),this.element.appendChild(this.firstBut),this.element.appendChild(this.prevBut),this.element.appendChild(this.pagesElement),this.element.appendChild(this.nextBut),this.element.appendChild(this.lastBut),this.table.options.paginationElement||(this.table.options.paginationCounter&&(this.table.options.paginationCounterElement?this.table.options.paginationCounterElement instanceof HTMLElement?this.table.options.paginationCounterElement.appendChild(this.pageCounterElement):"string"==typeof this.table.options.paginationCounterElement&&((i=document.querySelector(this.table.options.paginationCounterElement))?i.appendChild(this.pageCounterElement):console.warn("Pagination Error - Unable to find element matching paginationCounterElement selector:",this.table.options.paginationCounterElement)):this.footerAppend(this.pageCounterElement)),this.footerAppend(this.element)),this.page=this.table.options.paginationInitialPage,this.count=this.table.options.paginationButtonCount),this.mode=this.table.options.paginationMode}initializeProgressive(e){this.initializePaginator(!0),this.mode="progressive_"+e,this.progressiveLoad=!0}trackChanges(){this.dispatch("page-changed")}setMaxRows(e){this.max=e?!0===this.size?1:Math.ceil(e/this.size):1,this.page>this.max&&(this.page=this.max)}reset(e){this.initialLoad||("local"==this.mode||e)&&(this.page=1,this.trackChanges())}setMaxPage(e){e=parseInt(e),this.max=e||1,this.page>this.max&&(this.page=this.max,this.trigger())}setPage(e){switch(e){case"first":return this.setPage(1);case"prev":return this.previousPage();case"next":return this.nextPage();case"last":return this.setPage(this.max)}return(e=parseInt(e))>0&&e<=this.max||"local"!==this.mode?(this.page=e,this.trackChanges(),this.trigger()):(console.warn("Pagination Error - Requested page is out of range of 1 - "+this.max+":",e),Promise.reject())}setPageToRow(e){var t=this.displayRows(-1).indexOf(e);if(t>-1){var i=!0===this.size?1:Math.ceil((t+1)/this.size);return this.setPage(i)}return console.warn("Pagination Error - Requested row is not visible"),Promise.reject()}setPageSize(e){!0!==e&&(e=parseInt(e)),e>0&&(this.size=e,this.dispatchExternal("pageSizeChanged",e)),this.pageSizeSelect&&this.generatePageSizeSelectList(),this.trackChanges()}_setPageCounter(e,t,i){var s;if(this.pageCounter)switch("remote"===this.mode&&(t=this.size,i=(this.page-1)*this.size+1,e=this.remoteRowCountEstimate),typeof(s=this.pageCounter.call(this,t,i,this.page,e,this.max))){case"object":if(s instanceof Node){for(;this.pageCounterElement.firstChild;)this.pageCounterElement.removeChild(this.pageCounterElement.firstChild);this.pageCounterElement.appendChild(s)}else this.pageCounterElement.innerHTML="",null!=s&&console.warn("Page Counter Error - Page Counter has returned a type of object, the only valid page counter object return is an instance of Node, the page counter returned:",s);break;case"undefined":this.pageCounterElement.innerHTML="";break;default:this.pageCounterElement.innerHTML=s}}_setPageButtons(){let e=Math.floor((this.count-1)/2),t=Math.ceil((this.count-1)/2),i=this.max-this.page+e+10&&e<=this.max&&this.pagesElement.appendChild(this._generatePageButton(e));this.footerRedraw()}_generatePageButton(e){var t=document.createElement("button");return t.classList.add("tabulator-page"),e==this.page&&t.classList.add("active"),t.setAttribute("type","button"),t.setAttribute("role","button"),this.langBind("pagination|page_title",(i=>{t.setAttribute("aria-label",i+" "+e),t.setAttribute("title",i+" "+e)})),t.setAttribute("data-page",e),t.textContent=e,t.addEventListener("click",(t=>{this.setPage(e)})),t}previousPage(){return this.page>1?(this.page--,this.trackChanges(),this.trigger()):(console.warn("Pagination Error - Previous page would be less than page 1:",0),Promise.reject())}nextPage(){return this.page"row"===e.type));if("local"==this.mode){t=[],this.setMaxRows(e.length),!0===this.size?(i=0,s=e.length):s=(i=this.size*(this.page-1))+parseInt(this.size),this._setPageButtons();for(let r=i;r{this.dataChanging=!1}));case"progressive_load":case"progressive_scroll":return this.reloadData(null,!0);default:return console.warn("Pagination Error - no such pagination mode:",this.mode),Promise.reject()}}_parseRemoteData(e){var t;if(void 0===e.last_page&&console.warn("Remote Pagination Error - Server response missing '"+(this.options("dataReceiveParams").last_page||"last_page")+"' property"),e.data){if(this.max=parseInt(e.last_page)||1,this.remoteRowCountEstimate=void 0!==e.last_row?e.last_row:e.last_page*this.size-(this.page==e.last_page?this.size-e.data.length:0),this.progressiveLoad){switch(this.mode){case"progressive_load":1==this.page?this.table.rowManager.setData(e.data,!1,1==this.page):this.table.rowManager.addRows(e.data),this.page{this.nextPage()}),this.table.options.progressiveLoadDelay);break;case"progressive_scroll":e=1===this.page?e.data:this.table.rowManager.getData().concat(e.data),this.table.rowManager.setData(e,1!==this.page,1==this.page),t=this.table.options.progressiveLoadScrollMargin||2*this.table.rowManager.element.clientHeight,this.table.rowManager.element.scrollHeight<=this.table.rowManager.element.clientHeight+t&&this.page{this.nextPage()}))}return!1}this.dispatchExternal("pageLoaded",this.getPage())}else console.warn("Remote Pagination Error - Server response missing '"+(this.options("dataReceiveParams").data||"data")+"' property");return e.data}footerRedraw(){var e=this.table.footerManager.containerElement;Math.ceil(e.clientWidth)-e.scrollWidth<0?this.pagesElement.style.display="none":(this.pagesElement.style.display="",Math.ceil(e.clientWidth)-e.scrollWidth<0&&(this.pagesElement.style.display="none"))}}var Fe={local:function(e,t){var i=localStorage.getItem(e+"-"+t);return!!i&&JSON.parse(i)},cookie:function(e,t){var i,s,o=document.cookie,n=e+"-"+t,r=o.indexOf(n+"=");return r>-1&&((i=(o=o.slice(r)).indexOf(";"))>-1&&(o=o.slice(0,i)),s=o.replace(n+"=","")),!!s&&JSON.parse(s)}},He={local:function(e,t,i){localStorage.setItem(e+"-"+t,JSON.stringify(i))},cookie:function(e,t,i){var s=new Date;s.setDate(s.getDate()+1e4),document.cookie=e+"-"+t+"="+JSON.stringify(i)+"; expires="+s.toUTCString()}};class _e extends M{static moduleName="persistence";static moduleInitOrder=-10;static readers=Fe;static writers=He;constructor(e){super(e),this.mode="",this.id="",this.defWatcherBlock=!1,this.config={},this.readFunc=!1,this.writeFunc=!1,this.registerTableOption("persistence",!1),this.registerTableOption("persistenceID",""),this.registerTableOption("persistenceMode",!0),this.registerTableOption("persistenceReaderFunc",!1),this.registerTableOption("persistenceWriterFunc",!1)}localStorageTest(){var e="_tabulator_test";try{return window.localStorage.setItem(e,e),window.localStorage.removeItem(e),!0}catch(e){return!1}}initialize(){if(this.table.options.persistence){var e,t=this.table.options.persistenceMode,i=this.table.options.persistenceID;this.mode=!0!==t?t:this.localStorageTest()?"local":"cookie",this.table.options.persistenceReaderFunc?"function"==typeof this.table.options.persistenceReaderFunc?this.readFunc=this.table.options.persistenceReaderFunc:_e.readers[this.table.options.persistenceReaderFunc]?this.readFunc=_e.readers[this.table.options.persistenceReaderFunc]:console.warn("Persistence Read Error - invalid reader set",this.table.options.persistenceReaderFunc):_e.readers[this.mode]?this.readFunc=_e.readers[this.mode]:console.warn("Persistence Read Error - invalid reader set",this.mode),this.table.options.persistenceWriterFunc?"function"==typeof this.table.options.persistenceWriterFunc?this.writeFunc=this.table.options.persistenceWriterFunc:_e.writers[this.table.options.persistenceWriterFunc]?this.writeFunc=_e.writers[this.table.options.persistenceWriterFunc]:console.warn("Persistence Write Error - invalid reader set",this.table.options.persistenceWriterFunc):_e.writers[this.mode]?this.writeFunc=_e.writers[this.mode]:console.warn("Persistence Write Error - invalid writer set",this.mode),this.id="tabulator-"+(i||this.table.element.getAttribute("id")||""),this.config={sort:!0===this.table.options.persistence||this.table.options.persistence.sort,filter:!0===this.table.options.persistence||this.table.options.persistence.filter,headerFilter:!0===this.table.options.persistence||this.table.options.persistence.headerFilter,group:!0===this.table.options.persistence||this.table.options.persistence.group,page:!0===this.table.options.persistence||this.table.options.persistence.page,columns:!0===this.table.options.persistence?["title","width","visible"]:this.table.options.persistence.columns},this.config.page&&(e=this.retrieveData("page"))&&(void 0===e.paginationSize||!0!==this.config.page&&!this.config.page.size||(this.table.options.paginationSize=e.paginationSize),void 0===e.paginationInitialPage||!0!==this.config.page&&!this.config.page.page||(this.table.options.paginationInitialPage=e.paginationInitialPage)),this.config.group&&(e=this.retrieveData("group"))&&(void 0===e.groupBy||!0!==this.config.group&&!this.config.group.groupBy||(this.table.options.groupBy=e.groupBy),void 0===e.groupStartOpen||!0!==this.config.group&&!this.config.group.groupStartOpen||(this.table.options.groupStartOpen=e.groupStartOpen),void 0===e.groupHeader||!0!==this.config.group&&!this.config.group.groupHeader||(this.table.options.groupHeader=e.groupHeader)),this.config.columns&&(this.table.options.columns=this.load("columns",this.table.options.columns),this.subscribe("column-init",this.initializeColumn.bind(this)),this.subscribe("column-show",this.save.bind(this,"columns")),this.subscribe("column-hide",this.save.bind(this,"columns")),this.subscribe("column-moved",this.save.bind(this,"columns"))),this.subscribe("table-built",this.tableBuilt.bind(this),0),this.subscribe("table-redraw",this.tableRedraw.bind(this)),this.subscribe("filter-changed",this.eventSave.bind(this,"filter")),this.subscribe("filter-changed",this.eventSave.bind(this,"headerFilter")),this.subscribe("sort-changed",this.eventSave.bind(this,"sort")),this.subscribe("group-changed",this.eventSave.bind(this,"group")),this.subscribe("page-changed",this.eventSave.bind(this,"page")),this.subscribe("column-resized",this.eventSave.bind(this,"columns")),this.subscribe("column-width",this.eventSave.bind(this,"columns")),this.subscribe("layout-refreshed",this.eventSave.bind(this,"columns"))}this.registerTableFunction("getColumnLayout",this.getColumnLayout.bind(this)),this.registerTableFunction("setColumnLayout",this.setColumnLayout.bind(this))}eventSave(e){this.config[e]&&this.save(e)}tableBuilt(){var e,t,i;this.config.sort&&!1==!(e=this.load("sort"))&&(this.table.options.initialSort=e),this.config.filter&&!1==!(t=this.load("filter"))&&(this.table.options.initialFilter=t),this.config.headerFilter&&!1==!(i=this.load("headerFilter"))&&(this.table.options.initialHeaderFilter=i)}tableRedraw(e){e&&this.config.columns&&this.save("columns")}getColumnLayout(){return this.parseColumns(this.table.columnManager.getColumns())}setColumnLayout(e){return this.table.columnManager.setColumns(this.mergeDefinition(this.table.options.columns,e,!0)),!0}initializeColumn(e){var t;this.config.columns&&(this.defWatcherBlock=!0,t=e.getDefinition(),(!0===this.config.columns?Object.keys(t):this.config.columns).forEach((e=>{var i=Object.getOwnPropertyDescriptor(t,e),s=t[e];i&&Object.defineProperty(t,e,{set:e=>{s=e,this.defWatcherBlock||this.save("columns"),i.set&&i.set(e)},get:()=>(i.get&&i.get(),s)})})),this.defWatcherBlock=!1)}load(e,t){var i=this.retrieveData(e);return t&&(i=i?this.mergeDefinition(t,i):t),i}retrieveData(e){return!!this.readFunc&&this.readFunc(this.id,e)}mergeDefinition(e,t,i){var s=[];return(t=t||[]).forEach(((t,o)=>{var n,r=this._findColumn(e,t);r&&(i?n=Object.keys(t):!0===this.config.columns||null==this.config.columns?(n=Object.keys(r)).push("width"):n=this.config.columns,n.forEach((e=>{"columns"!==e&&void 0!==t[e]&&(r[e]=t[e])})),r.columns&&(r.columns=this.mergeDefinition(r.columns,t.columns)),s.push(r))})),e.forEach(((e,i)=>{this._findColumn(t,e)||(s.length>i?s.splice(i,0,e):s.push(e))})),s}_findColumn(e,t){var i=t.columns?"group":t.field?"field":"object";return e.find((function(e){switch(i){case"group":return e.title===t.title&&e.columns.length===t.columns.length;case"field":return e.field===t.field;case"object":return e===t}}))}save(e){var t={};switch(e){case"columns":t=this.parseColumns(this.table.columnManager.getColumns());break;case"filter":t=this.table.modules.filter.getFilters();break;case"headerFilter":t=this.table.modules.filter.getHeaderFilters();break;case"sort":t=this.validateSorters(this.table.modules.sort.getSort());break;case"group":t=this.getGroupConfig();break;case"page":t=this.getPageConfig()}this.writeFunc&&this.writeFunc(this.id,e,t)}validateSorters(e){return e.forEach((function(e){e.column=e.field,delete e.field})),e}getGroupConfig(){var e={};return this.config.group&&((!0===this.config.group||this.config.group.groupBy)&&(e.groupBy=this.table.options.groupBy),(!0===this.config.group||this.config.group.groupStartOpen)&&(e.groupStartOpen=this.table.options.groupStartOpen),(!0===this.config.group||this.config.group.groupHeader)&&(e.groupHeader=this.table.options.groupHeader)),e}getPageConfig(){var e={};return this.config.page&&((!0===this.config.page||this.config.page.size)&&(e.paginationSize=this.table.modules.page.getPageSize()),(!0===this.config.page||this.config.page.page)&&(e.paginationInitialPage=this.table.modules.page.getPage())),e}parseColumns(e){var t=[],i=["headerContextMenu","headerMenu","contextMenu","clickMenu"];return e.forEach((e=>{var s,o={},n=e.getDefinition();e.isGroup?(o.title=n.title,o.columns=this.parseColumns(e.getColumns())):(o.field=e.getField(),!0===this.config.columns||null==this.config.columns?((s=Object.keys(n)).push("width"),s.push("visible")):s=this.config.columns,s.forEach((t=>{switch(t){case"width":o.width=e.getWidth();break;case"visible":o.visible=e.visible;break;default:"function"!=typeof n[t]&&-1===i.indexOf(t)&&(o[t]=n[t])}}))),t.push(o)})),t}}var Oe={format:{formatters:{responsiveCollapse:function(e,t,i){var s=document.createElement("div"),o=e.getRow()._row.modules.responsiveLayout;function n(e){var t=o.element;o.open=e,t&&(o.open?(s.classList.add("open"),t.style.display=""):(s.classList.remove("open"),t.style.display="none"))}return s.classList.add("tabulator-responsive-collapse-toggle"),s.innerHTML='\n \n \n\n\n\n \n',e.getElement().classList.add("tabulator-row-handle"),s.addEventListener("click",(function(t){t.stopImmediatePropagation(),n(!o.open),e.getTable().rowManager.adjustTableSize()})),n(o.open),s}}}};var Ae={format:{formatters:{rowSelection:function(e,t,i){var s=document.createElement("input"),o=!1;if(s.type="checkbox",s.setAttribute("aria-label","Select Row"),this.table.modExists("selectRow",!0))if(s.addEventListener("click",(e=>{e.stopPropagation()})),"function"==typeof e.getRow){var n=e.getRow();n instanceof m?(s.addEventListener("change",(e=>{"click"===this.table.options.selectableRowsRangeMode&&o?o=!1:n.toggleSelect()})),"click"===this.table.options.selectableRowsRangeMode&&s.addEventListener("click",(e=>{o=!0,this.table.modules.selectRow.handleComplexRowClick(n._row,e)})),s.checked=n.isSelected&&n.isSelected(),this.table.modules.selectRow.registerRowSelectCheckbox(n,s)):s=""}else s.addEventListener("change",(e=>{this.table.modules.selectRow.selectedRows.length?this.table.deselectRow():this.table.selectRow(t.rowRange)})),this.table.modules.selectRow.registerHeaderSelectCheckbox(s);return s}}}};class Be{constructor(e){return this._range=e,new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._range.table.componentFunctionBinder.handle("range",e._range,t)}})}getElement(){return this._range.element}getData(){return this._range.getData()}getCells(){return this._range.getCells(!0,!0)}getStructuredCells(){return this._range.getStructuredCells()}getRows(){return this._range.getRows().map((e=>e.getComponent()))}getColumns(){return this._range.getColumns().map((e=>e.getComponent()))}getBounds(){return this._range.getBounds()}getTopEdge(){return this._range.top}getBottomEdge(){return this._range.bottom}getLeftEdge(){return this._range.left}getRightEdge(){return this._range.right}setBounds(e,t){this._range.destroyedGuard("setBounds")&&this._range.setBounds(e?e._cell:e,t?t._cell:t)}setStartBound(e){this._range.destroyedGuard("setStartBound")&&(this._range.setEndBound(e?e._cell:e),this._range.rangeManager.layoutElement())}setEndBound(e){this._range.destroyedGuard("setEndBound")&&(this._range.setEndBound(e?e._cell:e),this._range.rangeManager.layoutElement())}clearValues(){this._range.destroyedGuard("clearValues")&&this._range.clearValues()}remove(){this._range.destroyedGuard("remove")&&this._range.destroy(!0)}}class Ve extends t{constructor(e,t,i,s){super(e),this.rangeManager=t,this.element=null,this.initialized=!1,this.initializing={start:!1,end:!1},this.destroyed=!1,this.top=0,this.bottom=0,this.left=0,this.right=0,this.table=e,this.start={row:0,col:0},this.end={row:0,col:0},this.rangeManager.rowHeader&&(this.left=1,this.right=1,this.start.col=1,this.end.col=1),this.initElement(),setTimeout((()=>{this.initBounds(i,s)}))}initElement(){this.element=document.createElement("div"),this.element.classList.add("tabulator-range")}initBounds(e,t){this._updateMinMax(),e&&this.setBounds(e,t||e)}setStart(e,t){this.start.row===e&&this.start.col===t||(this.start.row=e,this.start.col=t,this.initializing.start=!0,this._updateMinMax())}setEnd(e,t){this.end.row===e&&this.end.col===t||(this.end.row=e,this.end.col=t,this.initializing.end=!0,this._updateMinMax())}setBounds(e,t,i){e&&this.setStartBound(e),this.setEndBound(t||e),this.rangeManager.layoutElement(i)}setStartBound(e){var t,i;"column"===e.type?this.rangeManager.columnSelection&&this.setStart(0,e.getPosition()-1):(t=e.row.position-1,i=e.column.getPosition()-1,e.column===this.rangeManager.rowHeader?this.setStart(t,1):this.setStart(t,i))}setEndBound(e){var t,i,s,o=this._getTableRows().length;"column"===e.type?this.rangeManager.columnSelection&&("column"===this.rangeManager.selecting?this.setEnd(o-1,e.getPosition()-1):"cell"===this.rangeManager.selecting&&this.setEnd(0,e.getPosition()-1)):(t=e.row.position-1,i=e.column.getPosition()-1,s=e.column===this.rangeManager.rowHeader,"row"===this.rangeManager.selecting?this.setEnd(t,this._getTableColumns().length-1):"row"!==this.rangeManager.selecting&&s?this.setEnd(t,0):"column"===this.rangeManager.selecting?this.setEnd(o-1,i):this.setEnd(t,i))}_updateMinMax(){this.top=Math.min(this.start.row,this.end.row),this.bottom=Math.max(this.start.row,this.end.row),this.left=Math.min(this.start.col,this.end.col),this.right=Math.max(this.start.col,this.end.col),this.initialized?this.dispatchExternal("rangeChanged",this.getComponent()):this.initializing.start&&this.initializing.end&&(this.initialized=!0,this.dispatchExternal("rangeAdded",this.getComponent()))}_getTableColumns(){return this.table.columnManager.getVisibleColumnsByIndex()}_getTableRows(){return this.table.rowManager.getDisplayRows().filter((e=>"row"===e.type))}layout(){var e,t,i,s,o,n,r,a,l,h,d=this.table.rowManager.renderer.vDomTop,c=this.table.rowManager.renderer.vDomBottom,u=this.table.columnManager.renderer.leftCol,m=this.table.columnManager.renderer.rightCol;"virtual"===this.table.options.renderHorizontal&&this.rangeManager.rowHeader&&(m+=1),null==d&&(d=0),null==c&&(c=1/0),null==u&&(u=0),null==m&&(m=1/0),this.overlaps(u,d,m,c)&&(e=Math.max(this.top,d),t=Math.min(this.bottom,c),i=Math.max(this.left,u),s=Math.min(this.right,m),o=this.rangeManager.getCell(e,i),n=this.rangeManager.getCell(t,s),r=o.getElement(),a=n.getElement(),l=o.row.getElement(),h=n.row.getElement(),this.element.classList.add("tabulator-range-active"),this.table.rtl?(this.element.style.right=l.offsetWidth-r.offsetLeft-r.offsetWidth+"px",this.element.style.width=r.offsetLeft+r.offsetWidth-a.offsetLeft+"px"):(this.element.style.left=l.offsetLeft+r.offsetLeft+"px",this.element.style.width=a.offsetLeft+a.offsetWidth-r.offsetLeft+"px"),this.element.style.top=l.offsetTop+"px",this.element.style.height=h.offsetTop+h.offsetHeight-l.offsetTop+"px")}atTopLeft(e){return e.row.position-1===this.top&&e.column.getPosition()-1===this.left}atBottomRight(e){return e.row.position-1===this.bottom&&e.column.getPosition()-1===this.right}occupies(e){return this.occupiesRow(e.row)&&this.occupiesColumn(e.column)}occupiesRow(e){return this.top<=e.position-1&&e.position-1<=this.bottom}occupiesColumn(e){return this.left<=e.getPosition()-1&&e.getPosition()-1<=this.right}overlaps(e,t,i,s){return!(this.left>i||e>this.right||this.top>s||t>this.bottom)}getData(){var e=[],t=this.getRows(),i=this.getColumns();return t.forEach((t=>{var s=t.getData(),o={};i.forEach((e=>{o[e.field]=s[e.field]})),e.push(o)})),e}getCells(e,t){var i=[],s=this.getRows(),o=this.getColumns();return e?i=s.map((e=>{var i=[];return e.getCells().forEach((e=>{o.includes(e.column)&&i.push(t?e.getComponent():e)})),i})):s.forEach((e=>{e.getCells().forEach((e=>{o.includes(e.column)&&i.push(t?e.getComponent():e)}))})),i}getStructuredCells(){return this.getCells(!0,!0)}getRows(){return this._getTableRows().slice(this.top,this.bottom+1)}getColumns(){return this._getTableColumns().slice(this.left,this.right+1)}clearValues(){var e=this.getCells(),t=this.table.options.selectableRangeClearCellsValue;this.table.blockRedraw(),e.forEach((e=>{e.setValue(t)})),this.table.restoreRedraw()}getBounds(e){var t=this.getCells(!1,e),i={start:null,end:null};return t.length?(i.start=t[0],i.end=t[t.length-1]):console.warn("No bounds defined on range"),i}getComponent(){return this.component||(this.component=new Be(this)),this.component}destroy(e){this.destroyed=!0,this.element.remove(),e&&this.rangeManager.rangeRemoved(this),this.initialized&&this.dispatchExternal("rangeRemoved",this.getComponent())}destroyedGuard(e){return this.destroyed&&console.warn("You cannot call the "+e+" function on a destroyed range"),!this.destroyed}}var Ie={keybindings:{bindings:{rangeJumpUp:["ctrl + 38","meta + 38"],rangeJumpDown:["ctrl + 40","meta + 40"],rangeJumpLeft:["ctrl + 37","meta + 37"],rangeJumpRight:["ctrl + 39","meta + 39"],rangeExpandUp:"shift + 38",rangeExpandDown:"shift + 40",rangeExpandLeft:"shift + 37",rangeExpandRight:"shift + 39",rangeExpandJumpUp:["ctrl + shift + 38","meta + shift + 38"],rangeExpandJumpDown:["ctrl + shift + 40","meta + shift + 40"],rangeExpandJumpLeft:["ctrl + shift + 37","meta + shift + 37"],rangeExpandJumpRight:["ctrl + shift + 39","meta + shift + 39"]},actions:{rangeJumpLeft:function(e){this.dispatch("keybinding-nav-range",e,"left",!0,!1)},rangeJumpRight:function(e){this.dispatch("keybinding-nav-range",e,"right",!0,!1)},rangeJumpUp:function(e){this.dispatch("keybinding-nav-range",e,"up",!0,!1)},rangeJumpDown:function(e){this.dispatch("keybinding-nav-range",e,"down",!0,!1)},rangeExpandLeft:function(e){this.dispatch("keybinding-nav-range",e,"left",!1,!0)},rangeExpandRight:function(e){this.dispatch("keybinding-nav-range",e,"right",!1,!0)},rangeExpandUp:function(e){this.dispatch("keybinding-nav-range",e,"up",!1,!0)},rangeExpandDown:function(e){this.dispatch("keybinding-nav-range",e,"down",!1,!0)},rangeExpandJumpLeft:function(e){this.dispatch("keybinding-nav-range",e,"left",!0,!0)},rangeExpandJumpRight:function(e){this.dispatch("keybinding-nav-range",e,"right",!0,!0)},rangeExpandJumpUp:function(e){this.dispatch("keybinding-nav-range",e,"up",!0,!0)},rangeExpandJumpDown:function(e){this.dispatch("keybinding-nav-range",e,"down",!0,!0)}}},clipboard:{pasteActions:{range:function(e){var t,i,s,o,n,r=[],a=this.table.modules.selectRange.activeRange,l=!1;return n=e.length,a&&(i=(t=a.getBounds()).start,t.start===t.end&&(l=!0),i&&(s=(r=this.table.rowManager.activeRows.slice()).indexOf(i.row),o=l?e.length:r.indexOf(t.end.row)-s+1,s>-1&&(this.table.blockRedraw(),(r=r.slice(s,s+o)).forEach(((t,i)=>{t.updateData(e[i%n])})),this.table.restoreRedraw()))),r}},pasteParsers:{range:function(e){var t,i,s,o,n,r=[],a=[],l=this.table.modules.selectRange.activeRange,h=!1;return!!(l&&(i=(t=l.getBounds()).start,t.start===t.end&&(h=!0),i&&((e=e.split("\n")).forEach((function(e){r.push(e.split("\t"))})),r.length&&(n=(o=this.table.columnManager.getVisibleColumnsByIndex()).indexOf(i.column))>-1)))&&(s=h?r[0].length:o.indexOf(t.end.column)-n+1,o=o.slice(n,n+s),r.forEach((e=>{var t={},i=e.length;o.forEach((function(s,o){t[s.field]=e[o%i]})),a.push(t)})),a)}}},export:{columnLookups:{range:function(){var e=this.modules.selectRange.selectedColumns();return this.columnManager.rowHeader&&e.unshift(this.columnManager.rowHeader),e}},rowLookups:{range:function(){return this.modules.selectRange.selectedRows()}}}};function Ne(e,t,i,s,o,n,r){var a=window.DateTime||luxon.DateTime,l=r.format||"dd/MM/yyyy HH:mm:ss",h=r.alignEmptyValues,d=0;if(void 0!==a){if(a.isDateTime(e)||(e="iso"===l?a.fromISO(String(e)):a.fromFormat(String(e),l)),a.isDateTime(t)||(t="iso"===l?a.fromISO(String(t)):a.fromFormat(String(t),l)),e.isValid){if(t.isValid)return e-t;d=1}else d=t.isValid?-1:0;return("top"===h&&"desc"===n||"bottom"===h&&"asc"===n)&&(d*=-1),d}console.error("Sort Error - 'datetime' sorter is dependant on luxon.js")}var We={number:function(e,t,i,s,o,n,r){var a=r.alignEmptyValues,l=r.decimalSeparator,h=r.thousandSeparator,d=0;if(e=String(e),t=String(t),h&&(e=e.split(h).join(""),t=t.split(h).join("")),l&&(e=e.split(l).join("."),t=t.split(l).join(".")),e=parseFloat(e),t=parseFloat(t),isNaN(e))d=isNaN(t)?0:-1;else{if(!isNaN(t))return e-t;d=1}return("top"===a&&"desc"===n||"bottom"===a&&"asc"===n)&&(d*=-1),d},string:function(e,t,i,s,o,n,r){var a,l=r.alignEmptyValues,h=0;if(e){if(t){switch(typeof r.locale){case"boolean":r.locale&&(a=this.langLocale());break;case"string":a=r.locale}return String(e).toLowerCase().localeCompare(String(t).toLowerCase(),a)}h=1}else h=t?-1:0;return("top"===l&&"desc"===n||"bottom"===l&&"asc"===n)&&(h*=-1),h},date:function(e,t,i,s,o,n,r){return r.format||(r.format="dd/MM/yyyy"),Ne.call(this,e,t,i,s,o,n,r)},time:function(e,t,i,s,o,n,r){return r.format||(r.format="HH:mm"),Ne.call(this,e,t,i,s,o,n,r)},datetime:Ne,boolean:function(e,t,i,s,o,n,r){return(!0===e||"true"===e||"True"===e||1===e?1:0)-(!0===t||"true"===t||"True"===t||1===t?1:0)},array:function(e,t,i,s,o,n,r){var a=r.type||"length",l=r.alignEmptyValues,h=0;function d(e){var t;switch(a){case"length":t=e.length;break;case"sum":t=e.reduce((function(e,t){return e+t}));break;case"max":t=Math.max.apply(null,e);break;case"min":t=Math.min.apply(null,e);break;case"avg":t=e.reduce((function(e,t){return e+t}))/e.length}return t}if(Array.isArray(e)){if(Array.isArray(t))return d(t)-d(e);h=1}else h=Array.isArray(t)?-1:0;return("top"===l&&"desc"===n||"bottom"===l&&"asc"===n)&&(h*=-1),h},exists:function(e,t,i,s,o,n,r){return(void 0===e?0:1)-(void 0===t?0:1)},alphanum:function(e,t,i,s,o,n,r){var a,l,h,d,c,u=0,m=/(\d+)|(\D+)/g,p=/\d/,g=r.alignEmptyValues,b=0;if(e||0===e){if(t||0===t){if(isFinite(e)&&isFinite(t))return e-t;if((a=String(e).toLowerCase())===(l=String(t).toLowerCase()))return 0;if(!p.test(a)||!p.test(l))return a>l?1:-1;for(a=a.match(m),l=l.match(m),c=a.length>l.length?l.length:a.length;ud?1:-1;return a.length>l.length}b=1}else b=t||0===t?-1:0;return("top"===g&&"desc"===n||"bottom"===g&&"asc"===n)&&(b*=-1),b}};class je extends M{static moduleName="sort";static sorters=We;constructor(e){super(e),this.sortList=[],this.changed=!1,this.registerTableOption("sortMode","local"),this.registerTableOption("initialSort",!1),this.registerTableOption("columnHeaderSortMulti",!0),this.registerTableOption("sortOrderReverse",!1),this.registerTableOption("headerSortElement","
    "),this.registerTableOption("headerSortClickElement","header"),this.registerColumnOption("sorter"),this.registerColumnOption("sorterParams"),this.registerColumnOption("headerSort",!0),this.registerColumnOption("headerSortStartingDir"),this.registerColumnOption("headerSortTristate")}initialize(){this.subscribe("column-layout",this.initializeColumn.bind(this)),this.subscribe("table-built",this.tableBuilt.bind(this)),this.registerDataHandler(this.sort.bind(this),20),this.registerTableFunction("setSort",this.userSetSort.bind(this)),this.registerTableFunction("getSorters",this.getSort.bind(this)),this.registerTableFunction("clearSort",this.clearSort.bind(this)),"remote"===this.table.options.sortMode&&this.subscribe("data-params",this.remoteSortParams.bind(this))}tableBuilt(){this.table.options.initialSort&&this.setSort(this.table.options.initialSort)}remoteSortParams(e,t,i,s){var o=this.getSort();return o.forEach((e=>{delete e.column})),s.sort=o,s}userSetSort(e,t){this.setSort(e,t),this.refreshSort()}clearSort(){this.clear(),this.refreshSort()}initializeColumn(e){var t,i,s=!1;switch(typeof e.definition.sorter){case"string":je.sorters[e.definition.sorter]?s=je.sorters[e.definition.sorter]:console.warn("Sort Error - No such sorter found: ",e.definition.sorter);break;case"function":s=e.definition.sorter}if(e.modules.sort={sorter:s,dir:"none",params:e.definition.sorterParams||{},startingDir:e.definition.headerSortStartingDir||"asc",tristate:e.definition.headerSortTristate},!1!==e.definition.headerSort){if((t=e.getElement()).classList.add("tabulator-sortable"),(i=document.createElement("div")).classList.add("tabulator-col-sorter"),"icon"===this.table.options.headerSortClickElement)i.classList.add("tabulator-col-sorter-element");else t.classList.add("tabulator-col-sorter-element");switch(this.table.options.headerSortElement){case"function":break;case"object":i.appendChild(this.table.options.headerSortElement);break;default:i.innerHTML=this.table.options.headerSortElement}e.titleHolderElement.appendChild(i),e.modules.sort.element=i,this.setColumnHeaderSortIcon(e,"none"),"icon"===this.table.options.headerSortClickElement&&i.addEventListener("mousedown",(e=>{e.stopPropagation()})),("icon"===this.table.options.headerSortClickElement?i:t).addEventListener("click",(t=>{var i="",s=[],o=!1;if(e.modules.sort){if(e.modules.sort.tristate)i="none"==e.modules.sort.dir?e.modules.sort.startingDir:e.modules.sort.dir==e.modules.sort.startingDir?"asc"==e.modules.sort.dir?"desc":"asc":"none";else switch(e.modules.sort.dir){case"asc":i="desc";break;case"desc":i="asc";break;default:i=e.modules.sort.startingDir}this.table.options.columnHeaderSortMulti&&(t.shiftKey||t.ctrlKey)?(o=(s=this.getSort()).findIndex((t=>t.field===e.getField())),o>-1?(s[o].dir=i,o=s.splice(o,1)[0],"none"!=i&&s.push(o)):"none"!=i&&s.push({column:e,dir:i}),this.setSort(s)):"none"==i?this.clear():this.setSort(e,i),this.refreshSort()}}))}}refreshSort(){"remote"===this.table.options.sortMode?this.reloadData(null,!1,!1):this.refreshData(!0)}hasChanged(){var e=this.changed;return this.changed=!1,e}getSort(){var e=[];return this.sortList.forEach((function(t){t.column&&e.push({column:t.column.getComponent(),field:t.column.getField(),dir:t.dir})})),e}setSort(e,t){var i=this,s=[];Array.isArray(e)||(e=[{column:e,dir:t}]),e.forEach((function(e){var t;(t=i.table.columnManager.findColumn(e.column))?(e.column=t,s.push(e),i.changed=!0):console.warn("Sort Warning - Sort field does not exist and is being ignored: ",e.column)})),i.sortList=s,this.dispatch("sort-changed")}clear(){this.setSort([])}findSorter(e){var t,i=this.table.rowManager.activeRows[0],s="string";if(i&&(i=i.getData(),e.getField()))switch(typeof(t=e.getFieldValue(i))){case"undefined":s="string";break;case"boolean":s="boolean";break;default:isNaN(t)||""===t?t.match(/((^[0-9]+[a-z]+)|(^[a-z]+[0-9]+))+$/i)&&(s="alphanum"):s="number"}return je.sorters[s]}sort(e,t){var i=this,s=this.table.options.sortOrderReverse?i.sortList.slice().reverse():i.sortList,o=[],n=[];return this.subscribedExternal("dataSorting")&&this.dispatchExternal("dataSorting",i.getSort()),t||i.clearColumnHeaders(),"remote"!==this.table.options.sortMode?(s.forEach((function(e,s){var n;e.column&&((n=e.column.modules.sort)&&(n.sorter||(n.sorter=i.findSorter(e.column)),e.params="function"==typeof n.params?n.params(e.column.getComponent(),e.dir):n.params,o.push(e)),t||i.setColumnHeader(e.column,e.dir))})),o.length&&i._sortItems(e,o)):t||s.forEach((function(e,t){i.setColumnHeader(e.column,e.dir)})),this.subscribedExternal("dataSorted")&&(e.forEach((e=>{n.push(e.getComponent())})),this.dispatchExternal("dataSorted",i.getSort(),n)),e}clearColumnHeaders(){this.table.columnManager.getRealColumns().forEach((e=>{e.modules.sort&&(e.modules.sort.dir="none",e.getElement().setAttribute("aria-sort","none"),this.setColumnHeaderSortIcon(e,"none"))}))}setColumnHeader(e,t){e.modules.sort.dir=t,e.getElement().setAttribute("aria-sort","asc"===t?"ascending":"descending"),this.setColumnHeaderSortIcon(e,t)}setColumnHeaderSortIcon(e,t){var i,s=e.modules.sort.element;if(e.definition.headerSort&&"function"==typeof this.table.options.headerSortElement){for(;s.firstChild;)s.removeChild(s.firstChild);"object"==typeof(i=this.table.options.headerSortElement.call(this.table,e.getComponent(),t))?s.appendChild(i):s.innerHTML=i}}_sortItems(e,t){var i=t.length-1;e.sort(((e,s)=>{for(var o,n=i;n>=0;n--){let i=t[n];if(0!==(o=this._sortRow(e,s,i.column,i.dir,i.params)))break}return o}))}_sortRow(e,t,i,s,o){var n,r,a="asc"==s?e:t,l="asc"==s?t:e;return e=void 0!==(e=i.getFieldValue(a.getData()))?e:"",t=void 0!==(t=i.getFieldValue(l.getData()))?t:"",n=a.getComponent(),r=l.getComponent(),i.modules.sort.sorter.call(this,e,t,n,r,i.getComponent(),s,o)}}class Ge{constructor(e,t){this.columnCount=e,this.rowCount=t,this.columnString=[],this.columns=[],this.rows=[]}genColumns(e){var t=Math.max(this.columnCount,Math.max(...e.map((e=>e.length))));this.columnString=[],this.columns=[];for(let e=1;e<=t;e++)this.incrementChar(this.columnString.length-1),this.columns.push(this.columnString.join(""));return this.columns}genRows(e){var t=Math.max(this.rowCount,e.length);this.rows=[];for(let e=1;e<=t;e++)this.rows.push(e);return this.rows}incrementChar(e){let t=this.columnString[e];t?"Z"!==t?this.columnString[e]=String.fromCharCode(this.columnString[e].charCodeAt(0)+1):(this.columnString[e]="A",e?this.incrementChar(e-1):this.columnString.push("A")):this.columnString.push("A")}setRowCount(e){this.rowCount=e}setColumnCount(e){this.columnCount=e}}class Ue{constructor(e){return this._sheet=e,new Proxy(this,{get:function(e,t,i){return void 0!==e[t]?e[t]:e._sheet.table.componentFunctionBinder.handle("sheet",e._sheet,t)}})}getTitle(){return this._sheet.title}getKey(){return this._sheet.key}getDefinition(){return this._sheet.getDefinition()}getData(){return this._sheet.getData()}setData(e){return this._sheet.setData(e)}clear(){return this._sheet.clear()}remove(){return this._sheet.remove()}active(){return this._sheet.active()}setTitle(e){return this._sheet.setTitle(e)}setRows(e){return this._sheet.setRows(e)}setColumns(e){return this._sheet.setColumns(e)}}class Xe extends t{constructor(e,t){super(e.table),this.spreadsheetManager=e,this.definition=t,this.title=this.definition.title||"",this.key=this.definition.key||this.definition.title,this.rowCount=this.definition.rows,this.columnCount=this.definition.columns,this.data=this.definition.data||[],this.element=null,this.isActive=!1,this.grid=new Ge(this.columnCount,this.rowCount),this.defaultColumnDefinition={width:100,headerHozAlign:"center",headerSort:!1},this.columnDefinition=Object.assign(this.defaultColumnDefinition,this.options("spreadsheetColumnDefinition")),this.columnDefs=[],this.rowDefs=[],this.columnFields=[],this.columns=[],this.rows=[],this.scrollTop=null,this.scrollLeft=null,this.initialize(),this.dispatchExternal("sheetAdded",this.getComponent())}initialize(){this.initializeElement(),this.initializeColumns(),this.initializeRows()}reinitialize(){this.initializeColumns(),this.initializeRows()}initializeElement(){this.element=document.createElement("div"),this.element.classList.add("tabulator-spreadsheet-tab"),this.element.innerText=this.title,this.element.addEventListener("click",(()=>{this.spreadsheetManager.loadSheet(this)}))}initializeColumns(){this.grid.setColumnCount(this.columnCount),this.columnFields=this.grid.genColumns(this.data),this.columnDefs=[],this.columnFields.forEach((e=>{var t=Object.assign({},this.columnDefinition);t.field=e,t.title=e,this.columnDefs.push(t)}))}initializeRows(){var e;this.grid.setRowCount(this.rowCount),e=this.grid.genRows(this.data),this.rowDefs=[],e.forEach(((e,t)=>{var i={_id:e},s=this.data[t];s&&s.forEach(((e,t)=>{var s=this.columnFields[t];s&&(i[s]=e)})),this.rowDefs.push(i)}))}unload(){this.isActive=!1,this.scrollTop=this.table.rowManager.scrollTop,this.scrollLeft=this.table.rowManager.scrollLeft,this.data=this.getData(!0),this.element.classList.remove("tabulator-spreadsheet-tab-active")}load(){var e=!this.isActive;this.isActive=!0,this.table.blockRedraw(),this.table.setData([]),this.table.setColumns(this.columnDefs),this.table.setData(this.rowDefs),this.table.restoreRedraw(),e&&null!==this.scrollTop&&(this.table.rowManager.element.scrollLeft=this.scrollLeft,this.table.rowManager.element.scrollTop=this.scrollTop),this.element.classList.add("tabulator-spreadsheet-tab-active"),this.dispatchExternal("sheetLoaded",this.getComponent())}getComponent(){return new Ue(this)}getDefinition(){return{title:this.title,key:this.key,rows:this.rowCount,columns:this.columnCount,data:this.getData()}}getData(e){var t,i,s,o=[];return this.rowDefs.forEach((e=>{var t=[];this.columnFields.forEach((i=>{t.push(e[i])})),o.push(t)})),e||this.options("spreadsheetOutputFull")||(t=o.map((e=>e.findLastIndex((e=>void 0!==e))+1)),i=Math.max(...t),s=t.findLastIndex((e=>e>0))+1,o=(o=o.slice(0,s)).map((e=>e.slice(0,i)))),o}setData(e){this.data=e,this.reinitialize(),this.dispatchExternal("sheetUpdated",this.getComponent()),this.isActive&&this.load()}clear(){this.setData([])}setTitle(e){this.title=e,this.element.innerText=e,this.dispatchExternal("sheetUpdated",this.getComponent())}setRows(e){this.rowCount=e,this.initializeRows(),this.dispatchExternal("sheetUpdated",this.getComponent()),this.isActive&&this.load()}setColumns(e){this.columnCount=e,this.reinitialize(),this.dispatchExternal("sheetUpdated",this.getComponent()),this.isActive&&this.load()}remove(){this.spreadsheetManager.removeSheet(this)}destroy(){this.element.parentNode&&this.element.parentNode.removeChild(this.element),this.dispatchExternal("sheetRemoved",this.getComponent())}active(){this.spreadsheetManager.loadSheet(this)}}var Je={integer:function(e,t,i){return""===t||null==t||(t=Number(t),!isNaN(t)&&isFinite(t)&&Math.floor(t)===t)},float:function(e,t,i){return""===t||null==t||(t=Number(t),!isNaN(t)&&isFinite(t)&&t%1!=0)},numeric:function(e,t,i){return""===t||null==t||!isNaN(t)},string:function(e,t,i){return""===t||null==t||isNaN(t)},alphanumeric:function(e,t,i){return""===t||null==t||new RegExp(/^[a-z0-9]+$/i).test(t)},max:function(e,t,i){return""===t||null==t||parseFloat(t)<=i},min:function(e,t,i){return""===t||null==t||parseFloat(t)>=i},starts:function(e,t,i){return""===t||null==t||String(t).toLowerCase().startsWith(String(i).toLowerCase())},ends:function(e,t,i){return""===t||null==t||String(t).toLowerCase().endsWith(String(i).toLowerCase())},minLength:function(e,t,i){return""===t||null==t||String(t).length>=i},maxLength:function(e,t,i){return""===t||null==t||String(t).length<=i},in:function(e,t,i){return""===t||null==t||("string"==typeof i&&(i=i.split("|")),i.indexOf(t)>-1)},regex:function(e,t,i){return""===t||null==t||new RegExp(i).test(t)},unique:function(e,t,i){if(""===t||null==t)return!0;var s=!0,o=e.getData(),n=e.getColumn()._getSelf();return this.table.rowManager.rows.forEach((function(e){var i=e.getData();i!==o&&t==n.getFieldValue(i)&&(s=!1)})),s},required:function(e,t,i){return""!==t&&null!=t}};class qe extends M{static moduleName="validate";static validators=Je;constructor(e){super(e),this.invalidCells=[],this.registerTableOption("validationMode","blocking"),this.registerColumnOption("validator"),this.registerTableFunction("getInvalidCells",this.getInvalidCells.bind(this)),this.registerTableFunction("clearCellValidation",this.userClearCellValidation.bind(this)),this.registerTableFunction("validate",this.userValidate.bind(this)),this.registerComponentFunction("cell","isValid",this.cellIsValid.bind(this)),this.registerComponentFunction("cell","clearValidation",this.clearValidation.bind(this)),this.registerComponentFunction("cell","validate",this.cellValidate.bind(this)),this.registerComponentFunction("column","validate",this.columnValidate.bind(this)),this.registerComponentFunction("row","validate",this.rowValidate.bind(this))}initialize(){this.subscribe("cell-delete",this.clearValidation.bind(this)),this.subscribe("column-layout",this.initializeColumnCheck.bind(this)),this.subscribe("edit-success",this.editValidate.bind(this)),this.subscribe("edit-editor-clear",this.editorClear.bind(this)),this.subscribe("edit-edited-clear",this.editedClear.bind(this))}editValidate(e,t,i){var s="manual"===this.table.options.validationMode||this.validate(e.column.modules.validate,e,t);return!0!==s&&setTimeout((()=>{e.getElement().classList.add("tabulator-validation-fail"),this.dispatchExternal("validationFailed",e.getComponent(),t,s)})),s}editorClear(e,t){t&&e.column.modules.validate&&this.cellValidate(e),e.getElement().classList.remove("tabulator-validation-fail")}editedClear(e){e.modules.validate&&(e.modules.validate.invalid=!1)}cellIsValid(e){return e.modules.validate&&e.modules.validate.invalid||!0}cellValidate(e){return this.validate(e.column.modules.validate,e,e.getValue())}columnValidate(e){var t=[];return e.cells.forEach((e=>{!0!==this.cellValidate(e)&&t.push(e.getComponent())})),!t.length||t}rowValidate(e){var t=[];return e.cells.forEach((e=>{!0!==this.cellValidate(e)&&t.push(e.getComponent())})),!t.length||t}userClearCellValidation(e){e||(e=this.getInvalidCells()),Array.isArray(e)||(e=[e]),e.forEach((e=>{this.clearValidation(e._getSelf())}))}userValidate(e){var t=[];return this.table.rowManager.rows.forEach((e=>{var i=(e=e.getComponent()).validate();!0!==i&&(t=t.concat(i))})),!t.length||t}initializeColumnCheck(e){void 0!==e.definition.validator&&this.initializeColumn(e)}initializeColumn(e){var t,i=this,s=[];e.definition.validator&&(Array.isArray(e.definition.validator)?e.definition.validator.forEach((e=>{(t=i._extractValidator(e))&&s.push(t)})):(t=this._extractValidator(e.definition.validator))&&s.push(t),e.modules.validate=!!s.length&&s)}_extractValidator(e){var t,i,s;switch(typeof e){case"string":return(s=e.indexOf(":"))>-1?(t=e.substring(0,s),i=e.substring(s+1)):t=e,this._buildValidator(t,i);case"function":return this._buildValidator(e);case"object":return this._buildValidator(e.type,e.parameters)}}_buildValidator(e,t){var i="function"==typeof e?e:qe.validators[e];return i?{type:"function"==typeof e?"function":e,func:i,params:t}:(console.warn("Validator Setup Error - No matching validator found:",e),!1)}validate(e,t,i){var s=this,o=[],n=this.invalidCells.indexOf(t);return e&&e.forEach((e=>{e.func.call(s,t.getComponent(),i,e.params)||o.push({type:e.type,parameters:e.params})})),t.modules.validate||(t.modules.validate={}),o.length?(t.modules.validate.invalid=o,"manual"!==this.table.options.validationMode&&t.getElement().classList.add("tabulator-validation-fail"),-1==n&&this.invalidCells.push(t)):(t.modules.validate.invalid=!1,t.getElement().classList.remove("tabulator-validation-fail"),n>-1&&this.invalidCells.splice(n,1)),!o.length||o}getInvalidCells(){var e=[];return this.invalidCells.forEach((t=>{e.push(t.getComponent())})),e}clearValidation(e){var t;e.modules.validate&&e.modules.validate.invalid&&(e.getElement().classList.remove("tabulator-validation-fail"),e.modules.validate.invalid=!1,(t=this.invalidCells.indexOf(e))>-1&&this.invalidCells.splice(t,1))}}var Ke=Object.freeze({__proto__:null,AccessorModule:B,AjaxModule:X,ClipboardModule:Y,ColumnCalcsModule:Z,DataTreeModule:class extends M{static moduleName="dataTree";constructor(e){super(e),this.indent=10,this.field="",this.collapseEl=null,this.expandEl=null,this.branchEl=null,this.elementField=!1,this.startOpen=function(){},this.registerTableOption("dataTree",!1),this.registerTableOption("dataTreeFilter",!0),this.registerTableOption("dataTreeSort",!0),this.registerTableOption("dataTreeElementColumn",!1),this.registerTableOption("dataTreeBranchElement",!0),this.registerTableOption("dataTreeChildIndent",9),this.registerTableOption("dataTreeChildField","_children"),this.registerTableOption("dataTreeCollapseElement",!1),this.registerTableOption("dataTreeExpandElement",!1),this.registerTableOption("dataTreeStartExpanded",!1),this.registerTableOption("dataTreeChildColumnCalcs",!1),this.registerTableOption("dataTreeSelectPropagate",!1),this.registerComponentFunction("row","treeCollapse",this.collapseRow.bind(this)),this.registerComponentFunction("row","treeExpand",this.expandRow.bind(this)),this.registerComponentFunction("row","treeToggle",this.toggleRow.bind(this)),this.registerComponentFunction("row","getTreeParent",this.getTreeParent.bind(this)),this.registerComponentFunction("row","getTreeChildren",this.getRowChildren.bind(this)),this.registerComponentFunction("row","addTreeChild",this.addTreeChildRow.bind(this)),this.registerComponentFunction("row","isTreeExpanded",this.isRowExpanded.bind(this))}initialize(){if(this.table.options.dataTree){var e=null,t=this.table.options;switch(this.field=t.dataTreeChildField,this.indent=t.dataTreeChildIndent,this.options("movableRows")&&console.warn("The movableRows option is not available with dataTree enabled, moving of child rows could result in unpredictable behavior"),t.dataTreeBranchElement?!0===t.dataTreeBranchElement?(this.branchEl=document.createElement("div"),this.branchEl.classList.add("tabulator-data-tree-branch")):"string"==typeof t.dataTreeBranchElement?((e=document.createElement("div")).innerHTML=t.dataTreeBranchElement,this.branchEl=e.firstChild):this.branchEl=t.dataTreeBranchElement:(this.branchEl=document.createElement("div"),this.branchEl.classList.add("tabulator-data-tree-branch-empty")),t.dataTreeCollapseElement?"string"==typeof t.dataTreeCollapseElement?((e=document.createElement("div")).innerHTML=t.dataTreeCollapseElement,this.collapseEl=e.firstChild):this.collapseEl=t.dataTreeCollapseElement:(this.collapseEl=document.createElement("div"),this.collapseEl.classList.add("tabulator-data-tree-control"),this.collapseEl.tabIndex=0,this.collapseEl.innerHTML="
    "),t.dataTreeExpandElement?"string"==typeof t.dataTreeExpandElement?((e=document.createElement("div")).innerHTML=t.dataTreeExpandElement,this.expandEl=e.firstChild):this.expandEl=t.dataTreeExpandElement:(this.expandEl=document.createElement("div"),this.expandEl.classList.add("tabulator-data-tree-control"),this.expandEl.tabIndex=0,this.expandEl.innerHTML="
    "),typeof t.dataTreeStartExpanded){case"boolean":this.startOpen=function(e,i){return t.dataTreeStartExpanded};break;case"function":this.startOpen=t.dataTreeStartExpanded;break;default:this.startOpen=function(e,i){return t.dataTreeStartExpanded[i]}}this.subscribe("row-init",this.initializeRow.bind(this)),this.subscribe("row-layout-after",this.layoutRow.bind(this)),this.subscribe("row-deleted",this.rowDelete.bind(this),0),this.subscribe("row-data-changed",this.rowDataChanged.bind(this),10),this.subscribe("cell-value-updated",this.cellValueChanged.bind(this)),this.subscribe("edit-cancelled",this.cellValueChanged.bind(this)),this.subscribe("column-moving-rows",this.columnMoving.bind(this)),this.subscribe("table-built",this.initializeElementField.bind(this)),this.subscribe("table-redrawing",this.tableRedrawing.bind(this)),this.registerDisplayHandler(this.getRows.bind(this),30)}}tableRedrawing(e){e&&this.table.rowManager.getRows().forEach((e=>{this.reinitializeRowChildren(e)}))}initializeElementField(){var e=this.table.columnManager.getFirstVisibleColumn();this.elementField=this.table.options.dataTreeElementColumn||!!e&&e.field}getRowChildren(e){return this.getTreeChildren(e,!0)}columnMoving(){var e=[];return this.table.rowManager.rows.forEach((t=>{e=e.concat(this.getTreeChildren(t,!1,!0))})),e}rowDataChanged(e,t,i){this.redrawNeeded(i)&&(this.initializeRow(e),t&&(this.layoutRow(e),this.refreshData(!0)))}cellValueChanged(e){e.column.getField()===this.elementField&&this.layoutRow(e.row)}initializeRow(e){var t=e.getData()[this.field],i=Array.isArray(t),s=i||!i&&"object"==typeof t&&null!==t;!s&&e.modules.dataTree&&e.modules.dataTree.branchEl&&e.modules.dataTree.branchEl.parentNode.removeChild(e.modules.dataTree.branchEl),!s&&e.modules.dataTree&&e.modules.dataTree.controlEl&&e.modules.dataTree.controlEl.parentNode.removeChild(e.modules.dataTree.controlEl),e.modules.dataTree={index:e.modules.dataTree?e.modules.dataTree.index:0,open:!!s&&(e.modules.dataTree?e.modules.dataTree.open:this.startOpen(e.getComponent(),0)),controlEl:!(!e.modules.dataTree||!s)&&e.modules.dataTree.controlEl,branchEl:!(!e.modules.dataTree||!s)&&e.modules.dataTree.branchEl,parent:!!e.modules.dataTree&&e.modules.dataTree.parent,children:s}}reinitializeRowChildren(e){this.getTreeChildren(e,!1,!0).forEach((function(e){e.reinitialize(!0)}))}layoutRow(e){var t=(this.elementField?e.getCell(this.elementField):e.getCells()[0]).getElement(),i=e.modules.dataTree;i.branchEl&&(i.branchEl.parentNode&&i.branchEl.parentNode.removeChild(i.branchEl),i.branchEl=!1),i.controlEl&&(i.controlEl.parentNode&&i.controlEl.parentNode.removeChild(i.controlEl),i.controlEl=!1),this.generateControlElement(e,t),e.getElement().classList.add("tabulator-tree-level-"+i.index),i.index&&(this.branchEl?(i.branchEl=this.branchEl.cloneNode(!0),t.insertBefore(i.branchEl,t.firstChild),this.table.rtl?i.branchEl.style.marginRight=(i.branchEl.offsetWidth+i.branchEl.style.marginLeft)*(i.index-1)+i.index*this.indent+"px":i.branchEl.style.marginLeft=(i.branchEl.offsetWidth+i.branchEl.style.marginRight)*(i.index-1)+i.index*this.indent+"px"):this.table.rtl?t.style.paddingRight=parseInt(window.getComputedStyle(t,null).getPropertyValue("padding-right"))+i.index*this.indent+"px":t.style.paddingLeft=parseInt(window.getComputedStyle(t,null).getPropertyValue("padding-left"))+i.index*this.indent+"px")}generateControlElement(e,t){var i=e.modules.dataTree,s=i.controlEl;t=t||e.getCells()[0].getElement(),!1!==i.children&&(i.open?(i.controlEl=this.collapseEl.cloneNode(!0),i.controlEl.addEventListener("click",(t=>{t.stopPropagation(),this.collapseRow(e)}))):(i.controlEl=this.expandEl.cloneNode(!0),i.controlEl.addEventListener("click",(t=>{t.stopPropagation(),this.expandRow(e)}))),i.controlEl.addEventListener("mousedown",(e=>{e.stopPropagation()})),s&&s.parentNode===t?s.parentNode.replaceChild(i.controlEl,s):t.insertBefore(i.controlEl,t.firstChild))}getRows(e){var t=[];return e.forEach(((e,i)=>{var s;t.push(e),e instanceof p&&(e.create(),(s=e.modules.dataTree).index||!1===s.children||this.getChildren(e,!1,!0).forEach((e=>{e.create(),t.push(e)})))})),t}getChildren(e,t,i){var s=e.modules.dataTree,o=[],n=[];return!1!==s.children&&(s.open||t)&&(Array.isArray(s.children)||(s.children=this.generateChildren(e)),o=this.table.modExists("filter")&&this.table.options.dataTreeFilter?this.table.modules.filter.filter(s.children):s.children,this.table.modExists("sort")&&this.table.options.dataTreeSort&&this.table.modules.sort.sort(o,i),o.forEach((e=>{n.push(e),this.getChildren(e,!1,!0).forEach((e=>{n.push(e)}))}))),n}generateChildren(e){var t=[],i=e.getData()[this.field];return Array.isArray(i)||(i=[i]),i.forEach((i=>{var s=new p(i||{},this.table.rowManager);s.create(),s.modules.dataTree.index=e.modules.dataTree.index+1,s.modules.dataTree.parent=e,s.modules.dataTree.children&&(s.modules.dataTree.open=this.startOpen(s.getComponent(),s.modules.dataTree.index)),t.push(s)})),t}expandRow(e,t){var i=e.modules.dataTree;!1!==i.children&&(i.open=!0,e.reinitialize(),this.refreshData(!0),this.dispatchExternal("dataTreeRowExpanded",e.getComponent(),e.modules.dataTree.index))}collapseRow(e){var t=e.modules.dataTree;!1!==t.children&&(t.open=!1,e.reinitialize(),this.refreshData(!0),this.dispatchExternal("dataTreeRowCollapsed",e.getComponent(),e.modules.dataTree.index))}toggleRow(e){var t=e.modules.dataTree;!1!==t.children&&(t.open?this.collapseRow(e):this.expandRow(e))}isRowExpanded(e){return e.modules.dataTree.open}getTreeParent(e){return!!e.modules.dataTree.parent&&e.modules.dataTree.parent.getComponent()}getTreeParentRoot(e){return e.modules.dataTree&&e.modules.dataTree.parent?this.getTreeParentRoot(e.modules.dataTree.parent):e}getFilteredTreeChildren(e){var t=e.modules.dataTree,i=[];return t.children&&(Array.isArray(t.children)||(t.children=this.generateChildren(e)),(this.table.modExists("filter")&&this.table.options.dataTreeFilter?this.table.modules.filter.filter(t.children):t.children).forEach((e=>{e instanceof p&&i.push(e)}))),i}rowDelete(e){var t,i=e.modules.dataTree.parent;i&&(!1!==(t=this.findChildIndex(e,i))&&i.data[this.field].splice(t,1),i.data[this.field].length||delete i.data[this.field],this.initializeRow(i),this.layoutRow(i)),this.refreshData(!0)}addTreeChildRow(e,t,i,s){var o=!1;"string"==typeof t&&(t=JSON.parse(t)),Array.isArray(e.data[this.field])||(e.data[this.field]=[],e.modules.dataTree.open=this.startOpen(e.getComponent(),e.modules.dataTree.index)),void 0!==s&&!1!==(o=this.findChildIndex(s,e))&&e.data[this.field].splice(i?o:o+1,0,t),!1===o&&(i?e.data[this.field].unshift(t):e.data[this.field].push(t)),this.initializeRow(e),this.layoutRow(e),this.refreshData(!0)}findChildIndex(e,t){var i=!1;return"object"==typeof e?e instanceof p?i=e.data:e instanceof m?i=e._getSelf().data:"undefined"!=typeof HTMLElement&&e instanceof HTMLElement?t.modules.dataTree&&(i=t.modules.dataTree.children.find((t=>t instanceof p&&t.element===e)))&&(i=i.data):null===e&&(i=!1):i=void 0!==e&&t.data[this.field].find((t=>t.data[this.table.options.index]==e)),i&&(Array.isArray(t.data[this.field])&&(i=t.data[this.field].indexOf(i)),-1==i&&(i=!1)),i}getTreeChildren(e,t,i){var s=e.modules.dataTree,o=[];return s&&s.children&&(Array.isArray(s.children)||(s.children=this.generateChildren(e)),s.children.forEach((e=>{e instanceof p&&(o.push(t?e.getComponent():e),i&&this.getTreeChildren(e,t,i).forEach((e=>{o.push(e)})))}))),o}getChildField(){return this.field}redrawNeeded(e){return!!this.field&&void 0!==e[this.field]||!!this.elementField&&void 0!==e[this.elementField]}},DownloadModule:te,EditModule:ne,ExportModule:de,FilterModule:ue,FormatModule:pe,FrozenColumnsModule:class extends M{static moduleName="frozenColumns";constructor(e){super(e),this.leftColumns=[],this.rightColumns=[],this.initializationMode="left",this.active=!1,this.blocked=!0,this.registerColumnOption("frozen")}reset(){this.initializationMode="left",this.leftColumns=[],this.rightColumns=[],this.active=!1}initialize(){this.subscribe("cell-layout",this.layoutCell.bind(this)),this.subscribe("column-init",this.initializeColumn.bind(this)),this.subscribe("column-width",this.layout.bind(this)),this.subscribe("row-layout-after",this.layoutRow.bind(this)),this.subscribe("table-layout",this.layout.bind(this)),this.subscribe("columns-loading",this.reset.bind(this)),this.subscribe("column-add",this.reinitializeColumns.bind(this)),this.subscribe("column-deleted",this.reinitializeColumns.bind(this)),this.subscribe("column-hide",this.reinitializeColumns.bind(this)),this.subscribe("column-show",this.reinitializeColumns.bind(this)),this.subscribe("columns-loaded",this.reinitializeColumns.bind(this)),this.subscribe("table-redraw",this.layout.bind(this)),this.subscribe("layout-refreshing",this.blockLayout.bind(this)),this.subscribe("layout-refreshed",this.unblockLayout.bind(this)),this.subscribe("scrollbar-vertical",this.adjustForScrollbar.bind(this))}blockLayout(){this.blocked=!0}unblockLayout(){this.blocked=!1}layoutCell(e){this.layoutElement(e.element,e.column)}reinitializeColumns(){this.reset(),this.table.columnManager.columnsByIndex.forEach((e=>{this.initializeColumn(e)})),this.layout()}initializeColumn(e){var t={margin:0,edge:!1};e.isGroup||(this.frozenCheck(e)?(t.position=this.initializationMode,"left"==this.initializationMode?this.leftColumns.push(e):this.rightColumns.unshift(e),this.active=!0,e.modules.frozen=t):this.initializationMode="right")}frozenCheck(e){return e.parent.isGroup&&e.definition.frozen&&console.warn("Frozen Column Error - Parent column group must be frozen, not individual columns or sub column groups"),e.parent.isGroup?this.frozenCheck(e.parent):e.definition.frozen}layoutCalcRows(){this.table.modExists("columnCalcs")&&(this.table.modules.columnCalcs.topInitialized&&this.table.modules.columnCalcs.topRow&&this.layoutRow(this.table.modules.columnCalcs.topRow),this.table.modules.columnCalcs.botInitialized&&this.table.modules.columnCalcs.botRow&&this.layoutRow(this.table.modules.columnCalcs.botRow),this.table.modExists("groupRows")&&this.layoutGroupCalcs(this.table.modules.groupRows.getGroups()))}layoutGroupCalcs(e){e.forEach((e=>{e.calcs.top&&this.layoutRow(e.calcs.top),e.calcs.bottom&&this.layoutRow(e.calcs.bottom),e.groupList&&e.groupList.length&&this.layoutGroupCalcs(e.groupList)}))}layoutColumnPosition(e){var t=[],i=0,s=0;this.leftColumns.forEach(((s,o)=>{if(s.modules.frozen.marginValue=i,s.modules.frozen.margin=s.modules.frozen.marginValue+"px",s.visible&&(i+=s.getWidth()),o==this.leftColumns.length-1?s.modules.frozen.edge=!0:s.modules.frozen.edge=!1,s.parent.isGroup){var n=this.getColGroupParentElement(s);t.includes(n)||(this.layoutElement(n,s),t.push(n)),n.classList.toggle("tabulator-frozen-left",s.modules.frozen.edge&&"left"===s.modules.frozen.position),n.classList.toggle("tabulator-frozen-right",s.modules.frozen.edge&&"right"===s.modules.frozen.position)}else this.layoutElement(s.getElement(),s);e&&s.cells.forEach((e=>{this.layoutElement(e.getElement(!0),s)}))})),this.rightColumns.forEach(((t,i)=>{t.modules.frozen.marginValue=s,t.modules.frozen.margin=t.modules.frozen.marginValue+"px",t.visible&&(s+=t.getWidth()),i==this.rightColumns.length-1?t.modules.frozen.edge=!0:t.modules.frozen.edge=!1,t.parent.isGroup?this.layoutElement(this.getColGroupParentElement(t),t):this.layoutElement(t.getElement(),t),e&&t.cells.forEach((e=>{this.layoutElement(e.getElement(!0),t)}))}))}getColGroupParentElement(e){return e.parent.isGroup?this.getColGroupParentElement(e.parent):e.getElement()}layout(){this.active&&!this.blocked&&(this.layoutColumnPosition(),this.reinitializeRows(),this.layoutCalcRows())}reinitializeRows(){var e=this.table.rowManager.getVisibleRows(!0);this.table.rowManager.getRows().filter((t=>!e.includes(t))).forEach((e=>{e.deinitialize()})),e.forEach((e=>{"row"===e.type&&this.layoutRow(e)}))}layoutRow(e){"fitDataFill"===this.table.options.layout&&this.rightColumns.length&&(this.table.rowManager.getTableElement().style.minWidth="calc(100% - "+this.rightMargin+")"),this.leftColumns.forEach((t=>{var i=e.getCell(t);i&&this.layoutElement(i.getElement(!0),t)})),this.rightColumns.forEach((t=>{var i=e.getCell(t);i&&this.layoutElement(i.getElement(!0),t)}))}layoutElement(e,t){var i;t.modules.frozen&&e&&(e.style.position="sticky",i=this.table.rtl?"left"===t.modules.frozen.position?"right":"left":t.modules.frozen.position,e.style[i]=t.modules.frozen.margin,e.classList.add("tabulator-frozen"),e.classList.toggle("tabulator-frozen-left",t.modules.frozen.edge&&"left"===t.modules.frozen.position),e.classList.toggle("tabulator-frozen-right",t.modules.frozen.edge&&"right"===t.modules.frozen.position))}adjustForScrollbar(e){this.rightColumns.length&&(this.table.columnManager.getContentsElement().style.width="calc(100% - "+e+"px)")}getFrozenColumns(){return this.leftColumns.concat(this.rightColumns)}_calcSpace(e,t){var i=0;for(let s=0;s{this.initializeRow(e)}))}initializeRow(e){var t=this.table.options.frozenRows,i=typeof t;"number"===i?e.getPosition()&&e.getPosition()+this.rows.length<=t&&this.freezeRow(e):"function"===i?t.call(this.table,e.getComponent())&&this.freezeRow(e):Array.isArray(t)&&t.includes(e.data[this.options("frozenRowsField")])&&this.freezeRow(e)}isRowFrozen(e){return this.rows.indexOf(e)>-1}isFrozen(){return!!this.rows.length}visibleRows(e,t){return this.rows.forEach((e=>{t.push(e)})),t}getRows(e){var t=e.slice(0);return this.rows.forEach((function(e){var i=t.indexOf(e);i>-1&&t.splice(i,1)})),t}freezeRow(e){e.modules.frozen?console.warn("Freeze Error - Row is already frozen"):(e.modules.frozen=!0,this.topElement.appendChild(e.getElement()),e.initialize(),e.normalizeHeight(),this.rows.push(e),this.refreshData(!1,"display"),this.table.rowManager.adjustTableSize(),this.styleRows())}unfreezeRow(e){e.modules.frozen?(e.modules.frozen=!1,this.detachRow(e),this.table.rowManager.adjustTableSize(),this.refreshData(!1,"display"),this.rows.length&&this.styleRows()):console.warn("Freeze Error - Row is already unfrozen")}detachRow(e){var t=this.rows.indexOf(e);if(t>-1){var i=e.getElement();i.parentNode&&i.parentNode.removeChild(i),this.rows.splice(t,1)}}styleRows(e){this.rows.forEach(((e,t)=>{this.table.rowManager.styleRow(e,t)}))}},GroupRowsModule:class extends M{static moduleName="groupRows";constructor(e){super(e),this.groupIDLookups=!1,this.startOpen=[function(){return!1}],this.headerGenerator=[function(){return""}],this.groupList=[],this.allowedValues=!1,this.groups={},this.displayHandler=this.getRows.bind(this),this.blockRedraw=!1,this.registerTableOption("groupBy",!1),this.registerTableOption("groupStartOpen",!0),this.registerTableOption("groupValues",!1),this.registerTableOption("groupUpdateOnCellEdit",!1),this.registerTableOption("groupHeader",!1),this.registerTableOption("groupHeaderPrint",null),this.registerTableOption("groupHeaderClipboard",null),this.registerTableOption("groupHeaderHtmlOutput",null),this.registerTableOption("groupHeaderDownload",null),this.registerTableOption("groupToggleElement","arrow"),this.registerTableOption("groupClosedShowCalcs",!1),this.registerTableFunction("setGroupBy",this.setGroupBy.bind(this)),this.registerTableFunction("setGroupValues",this.setGroupValues.bind(this)),this.registerTableFunction("setGroupStartOpen",this.setGroupStartOpen.bind(this)),this.registerTableFunction("setGroupHeader",this.setGroupHeader.bind(this)),this.registerTableFunction("getGroups",this.userGetGroups.bind(this)),this.registerTableFunction("getGroupedData",this.userGetGroupedData.bind(this)),this.registerComponentFunction("row","getGroup",this.rowGetGroup.bind(this))}initialize(){this.subscribe("table-destroy",this._blockRedrawing.bind(this)),this.subscribe("rows-wipe",this._blockRedrawing.bind(this)),this.subscribe("rows-wiped",this._restore_redrawing.bind(this)),this.table.options.groupBy&&(this.table.options.groupUpdateOnCellEdit&&(this.subscribe("cell-value-updated",this.cellUpdated.bind(this)),this.subscribe("row-data-changed",this.reassignRowToGroup.bind(this),0)),this.subscribe("table-built",this.configureGroupSetup.bind(this)),this.subscribe("row-deleting",this.rowDeleting.bind(this)),this.subscribe("row-deleted",this.rowsUpdated.bind(this)),this.subscribe("scroll-horizontal",this.scrollHeaders.bind(this)),this.subscribe("rows-wipe",this.wipe.bind(this)),this.subscribe("rows-added",this.rowsUpdated.bind(this)),this.subscribe("row-moving",this.rowMoving.bind(this)),this.subscribe("row-adding-index",this.rowAddingIndex.bind(this)),this.subscribe("rows-sample",this.rowSample.bind(this)),this.subscribe("render-virtual-fill",this.virtualRenderFill.bind(this)),this.registerDisplayHandler(this.displayHandler,20),this.initialized=!0)}_blockRedrawing(){this.blockRedraw=!0}_restore_redrawing(){this.blockRedraw=!1}configureGroupSetup(){if(this.table.options.groupBy){var e=this.table.options.groupBy,t=this.table.options.groupStartOpen,i=this.table.options.groupHeader;if(this.allowedValues=this.table.options.groupValues,Array.isArray(e)&&Array.isArray(i)&&e.length>i.length&&console.warn("Error creating group headers, groupHeader array is shorter than groupBy array"),this.headerGenerator=[function(){return""}],this.startOpen=[function(){return!1}],this.langBind("groups|item",((e,t)=>{this.headerGenerator[0]=(i,s,o)=>(void 0===i?"":i)+"("+s+" "+(1===s?e:t.groups.items)+")"})),this.groupIDLookups=[],e)this.table.modExists("columnCalcs")&&"table"!=this.table.options.columnCalcs&&"both"!=this.table.options.columnCalcs&&this.table.modules.columnCalcs.removeCalcs();else if(this.table.modExists("columnCalcs")&&"group"!=this.table.options.columnCalcs)this.table.columnManager.getRealColumns().forEach((e=>{e.definition.topCalc&&this.table.modules.columnCalcs.initializeTopRow(),e.definition.bottomCalc&&this.table.modules.columnCalcs.initializeBottomRow()}));Array.isArray(e)||(e=[e]),e.forEach(((e,t)=>{var i,s;i="function"==typeof e?e:(s=this.table.columnManager.getColumnByField(e))?function(e){return s.getFieldValue(e)}:function(t){return t[e]},this.groupIDLookups.push({field:"function"!=typeof e&&e,func:i,values:!!this.allowedValues&&this.allowedValues[t]})})),t&&(Array.isArray(t)||(t=[t]),t.forEach((e=>{})),this.startOpen=t),i&&(this.headerGenerator=Array.isArray(i)?i:[i])}else this.groupList=[],this.groups={}}rowSample(e,t){if(this.table.options.groupBy){var i=this.getGroups(!1)[0];t.push(i.getRows(!1)[0])}return t}virtualRenderFill(){var e=this.table.rowManager.tableElement,t=this.table.rowManager.getVisibleRows();if(!this.table.options.groupBy)return t;t=t.filter((e=>"group"!==e.type)),e.style.minWidth=t.length?"":this.table.columnManager.getWidth()+"px"}rowAddingIndex(e,t,i){if(this.table.options.groupBy){this.assignRowToGroup(e);var s=e.modules.group.rows;return s.length>1&&(!t||t&&-1==s.indexOf(t)?i?s[0]!==e&&(t=s[0],this.table.rowManager.moveRowInArray(e.modules.group.rows,e,t,!i)):s[s.length-1]!==e&&(t=s[s.length-1],this.table.rowManager.moveRowInArray(e.modules.group.rows,e,t,!i)):this.table.rowManager.moveRowInArray(e.modules.group.rows,e,t,!i)),t}}trackChanges(){this.dispatch("group-changed")}setGroupBy(e){this.table.options.groupBy=e,this.initialized||this.initialize(),this.configureGroupSetup(),!e&&this.table.modExists("columnCalcs")&&!0===this.table.options.columnCalcs&&this.table.modules.columnCalcs.reinitializeCalcs(),this.refreshData(),this.trackChanges()}setGroupValues(e){this.table.options.groupValues=e,this.configureGroupSetup(),this.refreshData(),this.trackChanges()}setGroupStartOpen(e){this.table.options.groupStartOpen=e,this.configureGroupSetup(),this.table.options.groupBy?(this.refreshData(),this.trackChanges()):console.warn("Grouping Update - cant refresh view, no groups have been set")}setGroupHeader(e){this.table.options.groupHeader=e,this.configureGroupSetup(),this.table.options.groupBy?(this.refreshData(),this.trackChanges()):console.warn("Grouping Update - cant refresh view, no groups have been set")}userGetGroups(e){return this.getGroups(!0)}userGetGroupedData(){return this.table.options.groupBy?this.getGroupedData():this.getData()}rowGetGroup(e){return!!e.modules.group&&e.modules.group.getComponent()}rowMoving(e,t,i){if(this.table.options.groupBy){!i&&t instanceof be&&(t=this.table.rowManager.prevDisplayRow(e)||t);var s=t instanceof be?t:t.modules.group,o=e instanceof be?e:e.modules.group;s===o?this.table.rowManager.moveRowInArray(s.rows,e,t,i):(o&&o.removeRow(e),s.insertRow(e,t,i))}}rowDeleting(e){this.table.options.groupBy&&e.modules.group&&e.modules.group.removeRow(e)}rowsUpdated(e){this.table.options.groupBy&&this.updateGroupRows(!0)}cellUpdated(e){this.table.options.groupBy&&this.reassignRowToGroup(e.row)}getRows(e){return this.table.options.groupBy&&this.groupIDLookups.length?(this.dispatchExternal("dataGrouping"),this.generateGroups(e),this.subscribedExternal("dataGrouped")&&this.dispatchExternal("dataGrouped",this.getGroups(!0)),this.updateGroupRows()):e.slice(0)}getGroups(e){var t=[];return this.groupList.forEach((function(i){t.push(e?i.getComponent():i)})),t}getChildGroups(e){var t=[];return e||(e=this),e.groupList.forEach((e=>{e.groupList.length?t=t.concat(this.getChildGroups(e)):t.push(e)})),t}wipe(){this.table.options.groupBy&&(this.groupList.forEach((function(e){e.wipe()})),this.groupList=[],this.groups={})}pullGroupListData(e){var t=[];return e.forEach((e=>{var i={level:0,rowCount:0,headerContent:""},s=[];e.hasSubGroups?(s=this.pullGroupListData(e.groupList),i.level=e.level,i.rowCount=s.length-e.groupList.length,i.headerContent=e.generator(e.key,i.rowCount,e.rows,e),t.push(i),t=t.concat(s)):(i.level=e.level,i.headerContent=e.generator(e.key,e.rows.length,e.rows,e),i.rowCount=e.getRows().length,t.push(i),e.getRows().forEach((e=>{t.push(e.getData("data"))})))})),t}getGroupedData(){return this.pullGroupListData(this.groupList)}getRowGroup(e){var t=!1;return this.options("dataTree")&&(e=this.table.modules.dataTree.getTreeParentRoot(e)),this.groupList.forEach((i=>{var s=i.getRowGroup(e);s&&(t=s)})),t}countGroups(){return this.groupList.length}generateGroups(e){var t=this.groups;this.groups={},this.groupList=[],this.allowedValues&&this.allowedValues[0]?(this.allowedValues[0].forEach((e=>{this.createGroup(e,0,t)})),e.forEach((e=>{this.assignRowToExistingGroup(e,t)}))):e.forEach((e=>{this.assignRowToGroup(e,t)})),Object.values(t).forEach((e=>{e.wipe(!0)}))}createGroup(e,t,i){var s,o=t+"_"+e;i=i||[],s=new be(this,!1,t,e,this.groupIDLookups[0].field,this.headerGenerator[0],i[o]),this.groups[o]=s,this.groupList.push(s)}assignRowToExistingGroup(e,t){var i="0_"+this.groupIDLookups[0].func(e.getData());this.groups[i]&&this.groups[i].addRow(e)}assignRowToGroup(e,t){var i=this.groupIDLookups[0].func(e.getData()),s=!this.groups["0_"+i];return s&&this.createGroup(i,0,t),this.groups["0_"+i].addRow(e),!s}reassignRowToGroup(e){if("row"===e.type){var t=e.modules.group,i=t.getPath(),s=this.getExpectedPath(e);i.length==s.length&&i.every(((e,t)=>e===s[t]))||(t.removeRow(e),this.assignRowToGroup(e,this.groups),this.refreshData(!0))}}getExpectedPath(e){var t=[],i=e.getData();return this.groupIDLookups.forEach((e=>{t.push(e.func(i))})),t}updateGroupRows(e){var t=[];return this.blockRedraw||(this.groupList.forEach((e=>{t=t.concat(e.getHeadersAndRows())})),e&&this.refreshData(!0)),t}scrollHeaders(e){this.table.options.groupBy&&("virtual"===this.table.options.renderHorizontal&&(e-=this.table.columnManager.renderer.vDomPadLeft),e+="px",this.groupList.forEach((t=>{t.scrollHeader(e)})))}removeGroup(e){var t,i=e.level+"_"+e.key;this.groups[i]&&(delete this.groups[i],(t=this.groupList.indexOf(e))>-1&&this.groupList.splice(t,1))}checkBasicModeGroupHeaderWidth(){var e=this.table.rowManager.tableElement,t=!0;this.table.rowManager.getDisplayRows().forEach(((i,s)=>{this.table.rowManager.styleRow(i,s),e.appendChild(i.getElement()),i.initialize(!0),"group"!==i.type&&(t=!1)})),e.style.minWidth=t?this.table.columnManager.getWidth()+"px":""}},HistoryModule:Ce,HtmlTableImportModule:class extends M{static moduleName="htmlTableImport";constructor(e){super(e),this.fieldIndex=[],this.hasIndex=!1}initialize(){this.tableElementCheck()}tableElementCheck(){this.table.originalElement&&"TABLE"===this.table.originalElement.tagName&&(this.table.originalElement.childNodes.length?this.parseTable():console.warn("Unable to parse data from empty table tag, Tabulator should be initialized on a div tag unless importing data from a table element."))}parseTable(){var e=this.table.originalElement,t=this.table.options,i=e.getElementsByTagName("th"),s=e.getElementsByTagName("tbody")[0],o=[];this.hasIndex=!1,this.dispatchExternal("htmlImporting"),s=s?s.getElementsByTagName("tr"):[],this._extractOptions(e,t),i.length?this._extractHeaders(i,s):this._generateBlankHeaders(i,s);for(var n=0;n{n[e.toLowerCase()]=e})),s){var a,l=s[r];l&&"object"==typeof l&&l.name&&0===l.name.indexOf("tabulator-")&&(a=l.name.replace("tabulator-",""),void 0!==n[a]&&(t[n[a]]=this._attribValue(l.value)))}}_attribValue(e){return"true"===e||"false"!==e&&e}_findCol(e){return this.table.options.columns.find((t=>t.title===e))||!1}_extractHeaders(e,t){for(var i=0;i{for(let t in e)e[t]=null}))}cellContentsSelectionFixer(e,t){var i;if(!this.table.modExists("edit")||this.table.modules.edit.currentCell!==t){e.preventDefault();try{document.selection?((i=document.body.createTextRange()).moveToElementText(t.getElement()),i.select()):window.getSelection&&((i=document.createRange()).selectNode(t.getElement()),window.getSelection().removeAllRanges(),window.getSelection().addRange(i))}catch(e){}}}initializeExternalEvents(){for(let e in this.eventMap)this.subscriptionChangeExternal(e,this.subscriptionChanged.bind(this,e))}subscriptionChanged(e,t){t?this.subscribers[e]||(this.eventMap[e].includes("-")?(this.subscribers[e]=this.handle.bind(this,e),this.subscribe(this.eventMap[e],this.subscribers[e])):this.subscribeTouchEvents(e)):this.eventMap[e].includes("-")?!this.subscribers[e]||this.columnSubscribers[e]||this.subscribedExternal(e)||(this.unsubscribe(this.eventMap[e],this.subscribers[e]),delete this.subscribers[e]):this.unsubscribeTouchEvents(e)}subscribeTouchEvents(e){var t=this.eventMap[e];this.touchSubscribers[t+"-touchstart"]||(this.touchSubscribers[t+"-touchstart"]=this.handleTouch.bind(this,t,"start"),this.touchSubscribers[t+"-touchend"]=this.handleTouch.bind(this,t,"end"),this.subscribe(t+"-touchstart",this.touchSubscribers[t+"-touchstart"]),this.subscribe(t+"-touchend",this.touchSubscribers[t+"-touchend"])),this.subscribers[e]=!0}unsubscribeTouchEvents(e){var t=!0,i=this.eventMap[e];if(this.subscribers[e]&&!this.subscribedExternal(e)){delete this.subscribers[e];for(let e in this.eventMap)this.eventMap[e]===i&&this.subscribers[e]&&(t=!1);t&&(this.unsubscribe(i+"-touchstart",this.touchSubscribers[i+"-touchstart"]),this.unsubscribe(i+"-touchend",this.touchSubscribers[i+"-touchend"]),delete this.touchSubscribers[i+"-touchstart"],delete this.touchSubscribers[i+"-touchend"])}}initializeColumn(e){var t=e.definition;for(let i in this.eventMap)t[i]&&(this.subscriptionChanged(i,!0),this.columnSubscribers[i]||(this.columnSubscribers[i]=[]),this.columnSubscribers[i].push(e))}handle(e,t,i){this.dispatchEvent(e,t,i)}handleTouch(e,t,i,s){var o=this.touchWatchers[e];switch("column"===e&&(e="header"),t){case"start":o.tap=!0,clearTimeout(o.tapHold),o.tapHold=setTimeout((()=>{clearTimeout(o.tapHold),o.tapHold=null,o.tap=null,clearTimeout(o.tapDbl),o.tapDbl=null,this.dispatchEvent(e+"TapHold",i,s)}),1e3);break;case"end":o.tap&&(o.tap=null,this.dispatchEvent(e+"Tap",i,s)),o.tapDbl?(clearTimeout(o.tapDbl),o.tapDbl=null,this.dispatchEvent(e+"DblTap",i,s)):o.tapDbl=setTimeout((()=>{clearTimeout(o.tapDbl),o.tapDbl=null}),300),clearTimeout(o.tapHold),o.tapHold=null}}dispatchEvent(e,t,i){var s,o=i.getComponent();this.columnSubscribers[e]&&(i instanceof n?s=i.column.definition[e]:i instanceof r&&(s=i.definition[e]),s&&s(t,o)),this.dispatchExternal(e,t,o)}},KeybindingsModule:Te,MenuModule:class extends M{static moduleName="menu";constructor(e){super(e),this.menuContainer=null,this.nestedMenuBlock=!1,this.currentComponent=null,this.rootPopup=null,this.columnSubscribers={},this.registerTableOption("rowContextMenu",!1),this.registerTableOption("rowClickMenu",!1),this.registerTableOption("rowDblClickMenu",!1),this.registerTableOption("groupContextMenu",!1),this.registerTableOption("groupClickMenu",!1),this.registerTableOption("groupDblClickMenu",!1),this.registerColumnOption("headerContextMenu"),this.registerColumnOption("headerClickMenu"),this.registerColumnOption("headerDblClickMenu"),this.registerColumnOption("headerMenu"),this.registerColumnOption("headerMenuIcon"),this.registerColumnOption("contextMenu"),this.registerColumnOption("clickMenu"),this.registerColumnOption("dblClickMenu")}initialize(){this.deprecatedOptionsCheck(),this.initializeRowWatchers(),this.initializeGroupWatchers(),this.subscribe("column-init",this.initializeColumn.bind(this))}deprecatedOptionsCheck(){}initializeRowWatchers(){this.table.options.rowContextMenu&&(this.subscribe("row-contextmenu",this.loadMenuEvent.bind(this,this.table.options.rowContextMenu)),this.table.on("rowTapHold",this.loadMenuEvent.bind(this,this.table.options.rowContextMenu))),this.table.options.rowClickMenu&&this.subscribe("row-click",this.loadMenuEvent.bind(this,this.table.options.rowClickMenu)),this.table.options.rowDblClickMenu&&this.subscribe("row-dblclick",this.loadMenuEvent.bind(this,this.table.options.rowDblClickMenu))}initializeGroupWatchers(){this.table.options.groupContextMenu&&(this.subscribe("group-contextmenu",this.loadMenuEvent.bind(this,this.table.options.groupContextMenu)),this.table.on("groupTapHold",this.loadMenuEvent.bind(this,this.table.options.groupContextMenu))),this.table.options.groupClickMenu&&this.subscribe("group-click",this.loadMenuEvent.bind(this,this.table.options.groupClickMenu)),this.table.options.groupDblClickMenu&&this.subscribe("group-dblclick",this.loadMenuEvent.bind(this,this.table.options.groupDblClickMenu))}initializeColumn(e){var t=e.definition;t.headerContextMenu&&!this.columnSubscribers.headerContextMenu&&(this.columnSubscribers.headerContextMenu=this.loadMenuTableColumnEvent.bind(this,"headerContextMenu"),this.subscribe("column-contextmenu",this.columnSubscribers.headerContextMenu),this.table.on("headerTapHold",this.loadMenuTableColumnEvent.bind(this,"headerContextMenu"))),t.headerClickMenu&&!this.columnSubscribers.headerClickMenu&&(this.columnSubscribers.headerClickMenu=this.loadMenuTableColumnEvent.bind(this,"headerClickMenu"),this.subscribe("column-click",this.columnSubscribers.headerClickMenu)),t.headerDblClickMenu&&!this.columnSubscribers.headerDblClickMenu&&(this.columnSubscribers.headerDblClickMenu=this.loadMenuTableColumnEvent.bind(this,"headerDblClickMenu"),this.subscribe("column-dblclick",this.columnSubscribers.headerDblClickMenu)),t.headerMenu&&this.initializeColumnHeaderMenu(e),t.contextMenu&&!this.columnSubscribers.contextMenu&&(this.columnSubscribers.contextMenu=this.loadMenuTableCellEvent.bind(this,"contextMenu"),this.subscribe("cell-contextmenu",this.columnSubscribers.contextMenu),this.table.on("cellTapHold",this.loadMenuTableCellEvent.bind(this,"contextMenu"))),t.clickMenu&&!this.columnSubscribers.clickMenu&&(this.columnSubscribers.clickMenu=this.loadMenuTableCellEvent.bind(this,"clickMenu"),this.subscribe("cell-click",this.columnSubscribers.clickMenu)),t.dblClickMenu&&!this.columnSubscribers.dblClickMenu&&(this.columnSubscribers.dblClickMenu=this.loadMenuTableCellEvent.bind(this,"dblClickMenu"),this.subscribe("cell-dblclick",this.columnSubscribers.dblClickMenu))}initializeColumnHeaderMenu(e){var t,i=e.definition.headerMenuIcon;(t=document.createElement("span")).classList.add("tabulator-header-popup-button"),i?("function"==typeof i&&(i=i(e.getComponent())),i instanceof HTMLElement?t.appendChild(i):t.innerHTML=i):t.innerHTML="⋮",t.addEventListener("click",(t=>{t.stopPropagation(),t.preventDefault(),this.loadMenuEvent(e.definition.headerMenu,t,e)})),e.titleElement.insertBefore(t,e.titleElement.firstChild)}loadMenuTableCellEvent(e,t,i){i._cell&&(i=i._cell),i.column.definition[e]&&this.loadMenuEvent(i.column.definition[e],t,i)}loadMenuTableColumnEvent(e,t,i){i._column&&(i=i._column),i.definition[e]&&this.loadMenuEvent(i.definition[e],t,i)}loadMenuEvent(e,t,i){i._group?i=i._group:i._row&&(i=i._row),e="function"==typeof e?e.call(this.table,t,i.getComponent()):e,this.loadMenu(t,i,e)}loadMenu(e,t,i,s,o){var n,r=!(e instanceof MouseEvent),a=document.createElement("div");if(a.classList.add("tabulator-menu"),r||e.preventDefault(),i&&i.length){if(s)n=o.child(a);else{if(this.nestedMenuBlock){if(this.rootPopup)return}else this.nestedMenuBlock=setTimeout((()=>{this.nestedMenuBlock=!1}),100);this.rootPopup&&this.rootPopup.hide(),this.rootPopup=n=this.popup(a)}i.forEach((e=>{var i=document.createElement("div"),s=e.label,o=e.disabled;e.separator?i.classList.add("tabulator-menu-separator"):(i.classList.add("tabulator-menu-item"),"function"==typeof s&&(s=s.call(this.table,t.getComponent())),s instanceof Node?i.appendChild(s):i.innerHTML=s,"function"==typeof o&&(o=o.call(this.table,t.getComponent())),o?(i.classList.add("tabulator-menu-item-disabled"),i.addEventListener("click",(e=>{e.stopPropagation()}))):e.menu&&e.menu.length?i.addEventListener("click",(s=>{s.stopPropagation(),this.loadMenu(s,t,e.menu,i,n)})):e.action&&i.addEventListener("click",(i=>{e.action(i,t.getComponent())})),e.menu&&e.menu.length&&i.classList.add("tabulator-menu-item-submenu")),a.appendChild(i)})),a.addEventListener("click",(e=>{this.rootPopup&&this.rootPopup.hide()})),n.show(s||e),n===this.rootPopup&&(this.rootPopup.hideOnBlur((()=>{this.rootPopup=null,this.currentComponent&&(this.dispatch("menu-closed",i,n),this.dispatchExternal("menuClosed",this.currentComponent.getComponent()),this.currentComponent=null)})),this.currentComponent=t,this.dispatch("menu-opened",i,n),this.dispatchExternal("menuOpened",t.getComponent()))}}},MoveColumnsModule:class extends M{static moduleName="moveColumn";constructor(e){super(e),this.placeholderElement=this.createPlaceholderElement(),this.hoverElement=!1,this.checkTimeout=!1,this.checkPeriod=250,this.moving=!1,this.toCol=!1,this.toColAfter=!1,this.startX=0,this.autoScrollMargin=40,this.autoScrollStep=5,this.autoScrollTimeout=!1,this.touchMove=!1,this.moveHover=this.moveHover.bind(this),this.endMove=this.endMove.bind(this),this.registerTableOption("movableColumns",!1)}createPlaceholderElement(){var e=document.createElement("div");return e.classList.add("tabulator-col"),e.classList.add("tabulator-col-placeholder"),e}initialize(){this.table.options.movableColumns&&(this.subscribe("column-init",this.initializeColumn.bind(this)),this.subscribe("alert-show",this.abortMove.bind(this)))}abortMove(){clearTimeout(this.checkTimeout)}initializeColumn(e){var t,i=this,s={};e.modules.frozen||e.isGroup||e.isRowHeader||(t=e.getElement(),s.mousemove=function(s){e.parent===i.moving.parent&&((i.touchMove?s.touches[0].pageX:s.pageX)-a.elOffset(t).left+i.table.columnManager.contentsElement.scrollLeft>e.getWidth()/2?i.toCol===e&&i.toColAfter||(t.parentNode.insertBefore(i.placeholderElement,t.nextSibling),i.moveColumn(e,!0)):(i.toCol!==e||i.toColAfter)&&(t.parentNode.insertBefore(i.placeholderElement,t),i.moveColumn(e,!1)))}.bind(i),t.addEventListener("mousedown",(function(t){i.touchMove=!1,1===t.which&&(i.checkTimeout=setTimeout((function(){i.startMove(t,e)}),i.checkPeriod))})),t.addEventListener("mouseup",(function(e){1===e.which&&i.checkTimeout&&clearTimeout(i.checkTimeout)})),i.bindTouchEvents(e)),e.modules.moveColumn=s}bindTouchEvents(e){var t,i,s,o,n,r,a=e.getElement(),l=!1;a.addEventListener("touchstart",(a=>{this.checkTimeout=setTimeout((()=>{this.touchMove=!0,t=e.nextColumn(),s=t?t.getWidth()/2:0,i=e.prevColumn(),o=i?i.getWidth()/2:0,n=0,r=0,l=!1,this.startMove(a,e)}),this.checkPeriod)}),{passive:!0}),a.addEventListener("touchmove",(a=>{var h,d;this.moving&&(this.moveHover(a),l||(l=a.touches[0].pageX),(h=a.touches[0].pageX-l)>0?t&&h-n>s&&(d=t)!==e&&(l=a.touches[0].pageX,d.getElement().parentNode.insertBefore(this.placeholderElement,d.getElement().nextSibling),this.moveColumn(d,!0)):i&&-h-r>o&&(d=i)!==e&&(l=a.touches[0].pageX,d.getElement().parentNode.insertBefore(this.placeholderElement,d.getElement()),this.moveColumn(d,!1)),d&&(t=d.nextColumn(),n=s,s=t?t.getWidth()/2:0,i=d.prevColumn(),r=o,o=i?i.getWidth()/2:0))}),{passive:!0}),a.addEventListener("touchend",(e=>{this.checkTimeout&&clearTimeout(this.checkTimeout),this.moving&&this.endMove(e)}))}startMove(e,t){var i=t.getElement(),s=this.table.columnManager.getContentsElement(),o=this.table.columnManager.getHeadersElement();this.table.modules.selectRange&&this.table.modules.selectRange.columnSelection&&this.table.modules.selectRange.mousedown&&"column"===this.table.modules.selectRange.selecting||(this.moving=t,this.startX=(this.touchMove?e.touches[0].pageX:e.pageX)-a.elOffset(i).left,this.table.element.classList.add("tabulator-block-select"),this.placeholderElement.style.width=t.getWidth()+"px",this.placeholderElement.style.height=t.getHeight()+"px",i.parentNode.insertBefore(this.placeholderElement,i),i.parentNode.removeChild(i),this.hoverElement=i.cloneNode(!0),this.hoverElement.classList.add("tabulator-moving"),s.appendChild(this.hoverElement),this.hoverElement.style.left="0",this.hoverElement.style.bottom=s.clientHeight-o.offsetHeight+"px",this.touchMove||(this._bindMouseMove(),document.body.addEventListener("mousemove",this.moveHover),document.body.addEventListener("mouseup",this.endMove)),this.moveHover(e),this.dispatch("column-moving",e,this.moving))}_bindMouseMove(){this.table.columnManager.columnsByIndex.forEach((function(e){e.modules.moveColumn.mousemove&&e.getElement().addEventListener("mousemove",e.modules.moveColumn.mousemove)}))}_unbindMouseMove(){this.table.columnManager.columnsByIndex.forEach((function(e){e.modules.moveColumn.mousemove&&e.getElement().removeEventListener("mousemove",e.modules.moveColumn.mousemove)}))}moveColumn(e,t){var i=this.moving.getCells();this.toCol=e,this.toColAfter=t,t?e.getCells().forEach((function(e,t){var s=e.getElement(!0);s.parentNode&&i[t]&&s.parentNode.insertBefore(i[t].getElement(),s.nextSibling)})):e.getCells().forEach((function(e,t){var s=e.getElement(!0);s.parentNode&&i[t]&&s.parentNode.insertBefore(i[t].getElement(),s)}))}endMove(e){(1===e.which||this.touchMove)&&(this._unbindMouseMove(),this.placeholderElement.parentNode.insertBefore(this.moving.getElement(),this.placeholderElement.nextSibling),this.placeholderElement.parentNode.removeChild(this.placeholderElement),this.hoverElement.parentNode.removeChild(this.hoverElement),this.table.element.classList.remove("tabulator-block-select"),this.toCol&&this.table.columnManager.moveColumnActual(this.moving,this.toCol,this.toColAfter),this.moving=!1,this.toCol=!1,this.toColAfter=!1,this.touchMove||(document.body.removeEventListener("mousemove",this.moveHover),document.body.removeEventListener("mouseup",this.endMove)))}moveHover(e){var t,i=this.table.columnManager.getContentsElement(),s=i.scrollLeft,o=(this.touchMove?e.touches[0].pageX:e.pageX)-a.elOffset(i).left+s;this.hoverElement.style.left=o-this.startX+"px",o-s{t=Math.max(0,s-5),this.table.rowManager.getElement().scrollLeft=t,this.autoScrollTimeout=!1}),1))),s+i.clientWidth-o{t=Math.min(i.clientWidth,s+5),this.table.rowManager.getElement().scrollLeft=t,this.autoScrollTimeout=!1}),1)))}},MoveRowsModule:Le,MutatorModule:De,PageModule:Pe,PersistenceModule:_e,PopupModule:class extends M{static moduleName="popup";constructor(e){super(e),this.columnSubscribers={},this.registerTableOption("rowContextPopup",!1),this.registerTableOption("rowClickPopup",!1),this.registerTableOption("rowDblClickPopup",!1),this.registerTableOption("groupContextPopup",!1),this.registerTableOption("groupClickPopup",!1),this.registerTableOption("groupDblClickPopup",!1),this.registerColumnOption("headerContextPopup"),this.registerColumnOption("headerClickPopup"),this.registerColumnOption("headerDblClickPopup"),this.registerColumnOption("headerPopup"),this.registerColumnOption("headerPopupIcon"),this.registerColumnOption("contextPopup"),this.registerColumnOption("clickPopup"),this.registerColumnOption("dblClickPopup"),this.registerComponentFunction("cell","popup",this._componentPopupCall.bind(this)),this.registerComponentFunction("column","popup",this._componentPopupCall.bind(this)),this.registerComponentFunction("row","popup",this._componentPopupCall.bind(this)),this.registerComponentFunction("group","popup",this._componentPopupCall.bind(this))}initialize(){this.initializeRowWatchers(),this.initializeGroupWatchers(),this.subscribe("column-init",this.initializeColumn.bind(this))}_componentPopupCall(e,t,i){this.loadPopupEvent(t,null,e,i)}initializeRowWatchers(){this.table.options.rowContextPopup&&(this.subscribe("row-contextmenu",this.loadPopupEvent.bind(this,this.table.options.rowContextPopup)),this.table.on("rowTapHold",this.loadPopupEvent.bind(this,this.table.options.rowContextPopup))),this.table.options.rowClickPopup&&this.subscribe("row-click",this.loadPopupEvent.bind(this,this.table.options.rowClickPopup)),this.table.options.rowDblClickPopup&&this.subscribe("row-dblclick",this.loadPopupEvent.bind(this,this.table.options.rowDblClickPopup))}initializeGroupWatchers(){this.table.options.groupContextPopup&&(this.subscribe("group-contextmenu",this.loadPopupEvent.bind(this,this.table.options.groupContextPopup)),this.table.on("groupTapHold",this.loadPopupEvent.bind(this,this.table.options.groupContextPopup))),this.table.options.groupClickPopup&&this.subscribe("group-click",this.loadPopupEvent.bind(this,this.table.options.groupClickPopup)),this.table.options.groupDblClickPopup&&this.subscribe("group-dblclick",this.loadPopupEvent.bind(this,this.table.options.groupDblClickPopup))}initializeColumn(e){var t=e.definition;t.headerContextPopup&&!this.columnSubscribers.headerContextPopup&&(this.columnSubscribers.headerContextPopup=this.loadPopupTableColumnEvent.bind(this,"headerContextPopup"),this.subscribe("column-contextmenu",this.columnSubscribers.headerContextPopup),this.table.on("headerTapHold",this.loadPopupTableColumnEvent.bind(this,"headerContextPopup"))),t.headerClickPopup&&!this.columnSubscribers.headerClickPopup&&(this.columnSubscribers.headerClickPopup=this.loadPopupTableColumnEvent.bind(this,"headerClickPopup"),this.subscribe("column-click",this.columnSubscribers.headerClickPopup)),t.headerDblClickPopup&&!this.columnSubscribers.headerDblClickPopup&&(this.columnSubscribers.headerDblClickPopup=this.loadPopupTableColumnEvent.bind(this,"headerDblClickPopup"),this.subscribe("column-dblclick",this.columnSubscribers.headerDblClickPopup)),t.headerPopup&&this.initializeColumnHeaderPopup(e),t.contextPopup&&!this.columnSubscribers.contextPopup&&(this.columnSubscribers.contextPopup=this.loadPopupTableCellEvent.bind(this,"contextPopup"),this.subscribe("cell-contextmenu",this.columnSubscribers.contextPopup),this.table.on("cellTapHold",this.loadPopupTableCellEvent.bind(this,"contextPopup"))),t.clickPopup&&!this.columnSubscribers.clickPopup&&(this.columnSubscribers.clickPopup=this.loadPopupTableCellEvent.bind(this,"clickPopup"),this.subscribe("cell-click",this.columnSubscribers.clickPopup)),t.dblClickPopup&&!this.columnSubscribers.dblClickPopup&&(this.columnSubscribers.dblClickPopup=this.loadPopupTableCellEvent.bind(this,"dblClickPopup"),this.subscribe("cell-click",this.columnSubscribers.dblClickPopup))}initializeColumnHeaderPopup(e){var t,i=e.definition.headerPopupIcon;(t=document.createElement("span")).classList.add("tabulator-header-popup-button"),i?("function"==typeof i&&(i=i(e.getComponent())),i instanceof HTMLElement?t.appendChild(i):t.innerHTML=i):t.innerHTML="⋮",t.addEventListener("click",(t=>{t.stopPropagation(),t.preventDefault(),this.loadPopupEvent(e.definition.headerPopup,t,e)})),e.titleElement.insertBefore(t,e.titleElement.firstChild)}loadPopupTableCellEvent(e,t,i){i._cell&&(i=i._cell),i.column.definition[e]&&this.loadPopupEvent(i.column.definition[e],t,i)}loadPopupTableColumnEvent(e,t,i){i._column&&(i=i._column),i.definition[e]&&this.loadPopupEvent(i.definition[e],t,i)}loadPopupEvent(e,t,i,s){var o;i._group?i=i._group:i._row&&(i=i._row),e="function"==typeof e?e.call(this.table,t,i.getComponent(),(function(e){o=e})):e,this.loadPopup(t,i,e,o,s)}loadPopup(e,t,i,s,o){var n,r,a=!(e instanceof MouseEvent);i instanceof HTMLElement?n=i:(n=document.createElement("div")).innerHTML=i,n.classList.add("tabulator-popup"),n.addEventListener("click",(e=>{e.stopPropagation()})),a||e.preventDefault(),r=this.popup(n),"function"==typeof s&&r.renderCallback(s),e?r.show(e):r.show(t.getElement(),o||"center"),r.hideOnBlur((()=>{this.dispatchExternal("popupClosed",t.getComponent())})),this.dispatchExternal("popupOpened",t.getComponent())}},PrintModule:class extends M{static moduleName="print";constructor(e){super(e),this.element=!1,this.manualBlock=!1,this.beforeprintEventHandler=null,this.afterprintEventHandler=null,this.registerTableOption("printAsHtml",!1),this.registerTableOption("printFormatter",!1),this.registerTableOption("printHeader",!1),this.registerTableOption("printFooter",!1),this.registerTableOption("printStyled",!0),this.registerTableOption("printRowRange","visible"),this.registerTableOption("printConfig",{}),this.registerColumnOption("print"),this.registerColumnOption("titlePrint")}initialize(){this.table.options.printAsHtml&&(this.beforeprintEventHandler=this.replaceTable.bind(this),this.afterprintEventHandler=this.cleanup.bind(this),window.addEventListener("beforeprint",this.beforeprintEventHandler),window.addEventListener("afterprint",this.afterprintEventHandler),this.subscribe("table-destroy",this.destroy.bind(this))),this.registerTableFunction("print",this.printFullscreen.bind(this))}destroy(){this.table.options.printAsHtml&&(window.removeEventListener("beforeprint",this.beforeprintEventHandler),window.removeEventListener("afterprint",this.afterprintEventHandler))}replaceTable(){this.manualBlock||(this.element=document.createElement("div"),this.element.classList.add("tabulator-print-table"),this.element.appendChild(this.table.modules.export.generateTable(this.table.options.printConfig,this.table.options.printStyled,this.table.options.printRowRange,"print")),this.table.element.style.display="none",this.table.element.parentNode.insertBefore(this.element,this.table.element))}cleanup(){document.body.classList.remove("tabulator-print-fullscreen-hide"),this.element&&this.element.parentNode&&(this.element.parentNode.removeChild(this.element),this.table.element.style.display="")}printFullscreen(e,t,i){var s,o,n=window.scrollX,r=window.scrollY,a=document.createElement("div"),l=document.createElement("div"),h=this.table.modules.export.generateTable(void 0!==i?i:this.table.options.printConfig,void 0!==t?t:this.table.options.printStyled,e||this.table.options.printRowRange,"print");this.manualBlock=!0,this.element=document.createElement("div"),this.element.classList.add("tabulator-print-fullscreen"),this.table.options.printHeader&&(a.classList.add("tabulator-print-header"),"string"==typeof(s="function"==typeof this.table.options.printHeader?this.table.options.printHeader.call(this.table):this.table.options.printHeader)?a.innerHTML=s:a.appendChild(s),this.element.appendChild(a)),this.element.appendChild(h),this.table.options.printFooter&&(l.classList.add("tabulator-print-footer"),"string"==typeof(o="function"==typeof this.table.options.printFooter?this.table.options.printFooter.call(this.table):this.table.options.printFooter)?l.innerHTML=o:l.appendChild(o),this.element.appendChild(l)),document.body.classList.add("tabulator-print-fullscreen-hide"),document.body.appendChild(this.element),this.table.options.printFormatter&&this.table.options.printFormatter(this.element,h),window.print(),this.cleanup(),window.scrollTo(n,r),this.manualBlock=!1}},ReactiveDataModule:class extends M{static moduleName="reactiveData";constructor(e){super(e),this.data=!1,this.blocked=!1,this.origFuncs={},this.currentVersion=0,this.registerTableOption("reactiveData",!1)}initialize(){this.table.options.reactiveData&&(this.subscribe("cell-value-save-before",this.block.bind(this,"cellsave")),this.subscribe("cell-value-save-after",this.unblock.bind(this,"cellsave")),this.subscribe("row-data-save-before",this.block.bind(this,"rowsave")),this.subscribe("row-data-save-after",this.unblock.bind(this,"rowsave")),this.subscribe("row-data-init-after",this.watchRow.bind(this)),this.subscribe("data-processing",this.watchData.bind(this)),this.subscribe("table-destroy",this.unwatchData.bind(this)))}watchData(e){var t,i=this;this.currentVersion++,t=this.currentVersion,this.unwatchData(),this.data=e,this.origFuncs.push=e.push,Object.defineProperty(this.data,"push",{enumerable:!1,configurable:!0,value:function(){var s,o=Array.from(arguments);return i.blocked||t!==i.currentVersion||(i.block("data-push"),o.forEach((e=>{i.table.rowManager.addRowActual(e,!1)})),s=i.origFuncs.push.apply(e,arguments),i.unblock("data-push")),s}}),this.origFuncs.unshift=e.unshift,Object.defineProperty(this.data,"unshift",{enumerable:!1,configurable:!0,value:function(){var s,o=Array.from(arguments);return i.blocked||t!==i.currentVersion||(i.block("data-unshift"),o.forEach((e=>{i.table.rowManager.addRowActual(e,!0)})),s=i.origFuncs.unshift.apply(e,arguments),i.unblock("data-unshift")),s}}),this.origFuncs.shift=e.shift,Object.defineProperty(this.data,"shift",{enumerable:!1,configurable:!0,value:function(){var s,o;return i.blocked||t!==i.currentVersion||(i.block("data-shift"),i.data.length&&(s=i.table.rowManager.getRowFromDataObject(i.data[0]))&&s.deleteActual(),o=i.origFuncs.shift.call(e),i.unblock("data-shift")),o}}),this.origFuncs.pop=e.pop,Object.defineProperty(this.data,"pop",{enumerable:!1,configurable:!0,value:function(){var s,o;return i.blocked||t!==i.currentVersion||(i.block("data-pop"),i.data.length&&(s=i.table.rowManager.getRowFromDataObject(i.data[i.data.length-1]))&&s.deleteActual(),o=i.origFuncs.pop.call(e),i.unblock("data-pop")),o}}),this.origFuncs.splice=e.splice,Object.defineProperty(this.data,"splice",{enumerable:!1,configurable:!0,value:function(){var s,o,n=Array.from(arguments),r=n[0]<0?e.length+n[0]:n[0],a=n[1],l=!!n[2]&&n.slice(2);if(!i.blocked&&t===i.currentVersion){if(i.block("data-splice"),l&&((s=!!e[r]&&i.table.rowManager.getRowFromDataObject(e[r]))?l.forEach((e=>{i.table.rowManager.addRowActual(e,!0,s,!0)})):(l=l.slice().reverse()).forEach((e=>{i.table.rowManager.addRowActual(e,!0,!1,!0)}))),0!==a){var h=e.slice(r,void 0===n[1]?n[1]:r+a);h.forEach(((e,t)=>{var s=i.table.rowManager.getRowFromDataObject(e);s&&s.deleteActual(t!==h.length-1)}))}(l||0!==a)&&i.table.rowManager.reRenderInPosition(),o=i.origFuncs.splice.apply(e,arguments),i.unblock("data-splice")}return o}})}unwatchData(){if(!1!==this.data)for(var e in this.origFuncs)Object.defineProperty(this.data,e,{enumerable:!0,configurable:!0,writable:!0,value:this.origFuncs.key})}watchRow(e){var t=e.getData();for(var i in t)this.watchKey(e,t,i);this.table.options.dataTree&&this.watchTreeChildren(e)}watchTreeChildren(e){var t=this,i=e.getData()[this.table.options.dataTreeChildField],s={};i&&(s.push=i.push,Object.defineProperty(i,"push",{enumerable:!1,configurable:!0,value:()=>{if(!t.blocked){t.block("tree-push");var o=s.push.apply(i,arguments);this.rebuildTree(e),t.unblock("tree-push")}return o}}),s.unshift=i.unshift,Object.defineProperty(i,"unshift",{enumerable:!1,configurable:!0,value:()=>{if(!t.blocked){t.block("tree-unshift");var o=s.unshift.apply(i,arguments);this.rebuildTree(e),t.unblock("tree-unshift")}return o}}),s.shift=i.shift,Object.defineProperty(i,"shift",{enumerable:!1,configurable:!0,value:()=>{if(!t.blocked){t.block("tree-shift");var o=s.shift.call(i);this.rebuildTree(e),t.unblock("tree-shift")}return o}}),s.pop=i.pop,Object.defineProperty(i,"pop",{enumerable:!1,configurable:!0,value:()=>{if(!t.blocked){t.block("tree-pop");var o=s.pop.call(i);this.rebuildTree(e),t.unblock("tree-pop")}return o}}),s.splice=i.splice,Object.defineProperty(i,"splice",{enumerable:!1,configurable:!0,value:()=>{if(!t.blocked){t.block("tree-splice");var o=s.splice.apply(i,arguments);this.rebuildTree(e),t.unblock("tree-splice")}return o}}))}rebuildTree(e){this.table.modules.dataTree.initializeRow(e),this.table.modules.dataTree.layoutRow(e),this.table.rowManager.refreshActiveData("tree",!1,!0)}watchKey(e,t,i){var s=this,o=Object.getOwnPropertyDescriptor(t,i),n=t[i],r=this.currentVersion;Object.defineProperty(t,i,{set:t=>{if(n=t,!s.blocked&&r===s.currentVersion){s.block("key");var a={};a[i]=t,e.updateData(a),s.unblock("key")}o.set&&o.set(t)},get:()=>(o.get&&o.get(),n)})}unwatchRow(e){var t=e.getData();for(var i in t)Object.defineProperty(t,i,{value:t[i]})}block(e){this.blocked||(this.blocked=e)}unblock(e){this.blocked===e&&(this.blocked=!1)}},ResizeColumnsModule:class extends M{static moduleName="resizeColumns";constructor(e){super(e),this.startColumn=!1,this.startX=!1,this.startWidth=!1,this.latestX=!1,this.handle=null,this.initialNextColumn=null,this.nextColumn=null,this.initialized=!1,this.registerColumnOption("resizable",!0),this.registerTableOption("resizableColumnFit",!1),this.registerTableOption("resizableColumnGuide",!1)}initialize(){this.subscribe("column-rendered",this.layoutColumnHeader.bind(this))}initializeEventWatchers(){this.initialized||(this.subscribe("cell-rendered",this.layoutCellHandles.bind(this)),this.subscribe("cell-delete",this.deInitializeComponent.bind(this)),this.subscribe("cell-height",this.resizeHandle.bind(this)),this.subscribe("column-moved",this.columnLayoutUpdated.bind(this)),this.subscribe("column-hide",this.deInitializeColumn.bind(this)),this.subscribe("column-show",this.columnLayoutUpdated.bind(this)),this.subscribe("column-width",this.columnWidthUpdated.bind(this)),this.subscribe("column-delete",this.deInitializeComponent.bind(this)),this.subscribe("column-height",this.resizeHandle.bind(this)),this.initialized=!0)}layoutCellHandles(e){"row"===e.row.type&&(this.deInitializeComponent(e),this.initializeColumn("cell",e,e.column,e.element))}layoutColumnHeader(e){e.definition.resizable&&(this.initializeEventWatchers(),this.deInitializeComponent(e),this.initializeColumn("header",e,e,e.element))}columnLayoutUpdated(e){var t=e.prevColumn();this.reinitializeColumn(e),t&&this.reinitializeColumn(t)}columnWidthUpdated(e){e.modules.frozen&&(this.table.modules.frozenColumns.leftColumns.includes(e)?this.table.modules.frozenColumns.leftColumns.forEach((e=>{this.reinitializeColumn(e)})):this.table.modules.frozenColumns.rightColumns.includes(e)&&this.table.modules.frozenColumns.rightColumns.forEach((e=>{this.reinitializeColumn(e)})))}frozenColumnOffset(e){var t=!1;return e.modules.frozen&&(t=e.modules.frozen.marginValue,"left"===e.modules.frozen.position?t+=e.getWidth()-3:t&&(t-=3)),!1!==t&&t+"px"}reinitializeColumn(e){var t=this.frozenColumnOffset(e);e.cells.forEach((i=>{i.modules.resize&&i.modules.resize.handleEl&&(t&&(i.modules.resize.handleEl.style[e.modules.frozen.position]=t,i.modules.resize.handleEl.style["z-index"]=11),i.element.after(i.modules.resize.handleEl))})),e.modules.resize&&e.modules.resize.handleEl&&(t&&(e.modules.resize.handleEl.style[e.modules.frozen.position]=t),e.element.after(e.modules.resize.handleEl))}initializeColumn(e,t,i,s){var o=this,n=i.definition.resizable,r={},a=i.getLastColumn();if("header"===e&&(r={variableHeight:"textarea"==i.definition.formatter||i.definition.variableHeight}),(!0===n||n==e)&&this._checkResizability(a)){var l=document.createElement("span");l.className="tabulator-col-resize-handle",l.addEventListener("click",(function(e){e.stopPropagation()}));var h=function(e){o.startColumn=i,o.initialNextColumn=o.nextColumn=a.nextColumn(),o._mouseDown(e,a,l)};l.addEventListener("mousedown",h),l.addEventListener("touchstart",h,{passive:!0}),l.addEventListener("dblclick",(e=>{var t=a.getWidth();e.stopPropagation(),a.reinitializeWidth(!0),t!==a.getWidth()&&(o.dispatch("column-resized",a),o.dispatchExternal("columnResized",a.getComponent()))})),i.modules.frozen&&(l.style.position="sticky",l.style[i.modules.frozen.position]=this.frozenColumnOffset(i)),r.handleEl=l,s.parentNode&&i.visible&&s.after(l)}t.modules.resize=r}deInitializeColumn(e){this.deInitializeComponent(e),e.cells.forEach((e=>{this.deInitializeComponent(e)}))}deInitializeComponent(e){var t;e.modules.resize&&(t=e.modules.resize.handleEl)&&t.parentElement&&t.parentElement.removeChild(t)}resizeHandle(e,t){e.modules.resize&&e.modules.resize.handleEl&&(e.modules.resize.handleEl.style.height=t)}resize(e,t){var i,s,o=void 0===e.clientX?e.touches[0].clientX:e.clientX,n=o-this.startX,r=o-this.latestX;if(this.latestX=o,this.table.rtl&&(n=-n,r=-r),i=t.width==t.minWidth||t.width==t.maxWidth,t.setWidth(this.startWidth+n),s=t.width==t.minWidth||t.width==t.maxWidth,r<0&&(this.nextColumn=this.initialNextColumn),this.table.options.resizableColumnFit&&this.nextColumn&&(!i||!s)){let e=this.nextColumn.getWidth();r>0&&e<=this.nextColumn.minWidth&&(this.nextColumn=this.nextColumn.nextColumn()),this.nextColumn&&this.nextColumn.setWidth(this.nextColumn.getWidth()-r)}this.table.columnManager.rerenderColumns(!0),!this.table.browserSlow&&t.modules.resize&&t.modules.resize.variableHeight&&t.checkCellHeights()}calcGuidePosition(e,t,i){var s=void 0===e.clientX?e.touches[0].clientX:e.clientX,o=i.getBoundingClientRect().x-this.table.element.getBoundingClientRect().x,n=this.table.element.getBoundingClientRect().x,r=t.element.getBoundingClientRect().left-n,a=s-this.startX,l=Math.max(o+a,r+t.minWidth);return t.maxWidth&&(l=Math.min(l,r+t.maxWidth)),l}_checkResizability(e){return e.definition.resizable}_mouseDown(e,t,i){var s,o=this;function n(e){o.table.options.resizableColumnGuide?s.style.left=o.calcGuidePosition(e,t,i)+"px":o.resize(e,t)}function r(e){o.table.options.resizableColumnGuide&&(o.resize(e,t),s.remove()),o.startColumn.modules.edit&&(o.startColumn.modules.edit.blocked=!1),o.table.browserSlow&&t.modules.resize&&t.modules.resize.variableHeight&&t.checkCellHeights(),document.body.removeEventListener("mouseup",r),document.body.removeEventListener("mousemove",n),i.removeEventListener("touchmove",n),i.removeEventListener("touchend",r),o.table.element.classList.remove("tabulator-block-select"),o.startWidth!==t.getWidth()&&(o.table.columnManager.verticalAlignHeaders(),o.dispatch("column-resized",t),o.dispatchExternal("columnResized",t.getComponent()))}this.dispatchExternal("columnResizing",t.getComponent()),o.table.options.resizableColumnGuide&&((s=document.createElement("span")).classList.add("tabulator-col-resize-guide"),o.table.element.appendChild(s),setTimeout((()=>{s.style.left=o.calcGuidePosition(e,t,i)+"px"}))),o.table.element.classList.add("tabulator-block-select"),e.stopPropagation(),o.startColumn.modules.edit&&(o.startColumn.modules.edit.blocked=!0),o.startX=void 0===e.clientX?e.touches[0].clientX:e.clientX,o.latestX=o.startX,o.startWidth=t.getWidth(),document.body.addEventListener("mousemove",n),document.body.addEventListener("mouseup",r),i.addEventListener("touchmove",n,{passive:!0}),i.addEventListener("touchend",r)}},ResizeRowsModule:class extends M{static moduleName="resizeRows";constructor(e){super(e),this.startColumn=!1,this.startY=!1,this.startHeight=!1,this.handle=null,this.prevHandle=null,this.registerTableOption("resizableRows",!1),this.registerTableOption("resizableRowGuide",!1)}initialize(){this.table.options.resizableRows&&this.subscribe("row-layout-after",this.initializeRow.bind(this))}initializeRow(e){var t=this,i=e.getElement(),s=document.createElement("div");s.className="tabulator-row-resize-handle";var o=document.createElement("div");o.className="tabulator-row-resize-handle prev",s.addEventListener("click",(function(e){e.stopPropagation()}));var n=function(i){t.startRow=e,t._mouseDown(i,e,s)};s.addEventListener("mousedown",n),s.addEventListener("touchstart",n,{passive:!0}),o.addEventListener("click",(function(e){e.stopPropagation()}));var r=function(i){var s=t.table.rowManager.prevDisplayRow(e);s&&(t.startRow=s,t._mouseDown(i,s,o))};o.addEventListener("mousedown",r),o.addEventListener("touchstart",r,{passive:!0}),i.appendChild(s),i.appendChild(o)}resize(e,t){t.setHeight(this.startHeight+((void 0===e.screenY?e.touches[0].screenY:e.screenY)-this.startY))}calcGuidePosition(e,t,i){var s=void 0===e.screenY?e.touches[0].screenY:e.screenY,o=i.getBoundingClientRect().y-this.table.element.getBoundingClientRect().y,n=this.table.element.getBoundingClientRect().y,r=t.element.getBoundingClientRect().top-n,a=s-this.startY;return Math.max(o+a,r)}_mouseDown(e,t,i){var s,o=this;function n(e){o.table.options.resizableRowGuide?s.style.top=o.calcGuidePosition(e,t,i)+"px":o.resize(e,t)}function r(e){o.table.options.resizableRowGuide&&(o.resize(e,t),s.remove()),document.body.removeEventListener("mouseup",n),document.body.removeEventListener("mousemove",n),i.removeEventListener("touchmove",n),i.removeEventListener("touchend",r),o.table.element.classList.remove("tabulator-block-select"),o.dispatchExternal("rowResized",t.getComponent())}o.dispatchExternal("rowResizing",t.getComponent()),o.table.options.resizableRowGuide&&((s=document.createElement("span")).classList.add("tabulator-row-resize-guide"),o.table.element.appendChild(s),setTimeout((()=>{s.style.top=o.calcGuidePosition(e,t,i)+"px"}))),o.table.element.classList.add("tabulator-block-select"),e.stopPropagation(),o.startY=void 0===e.screenY?e.touches[0].screenY:e.screenY,o.startHeight=t.getHeight(),document.body.addEventListener("mousemove",n),document.body.addEventListener("mouseup",r),i.addEventListener("touchmove",n,{passive:!0}),i.addEventListener("touchend",r)}},ResizeTableModule:class extends M{static moduleName="resizeTable";constructor(e){super(e),this.binding=!1,this.visibilityObserver=!1,this.resizeObserver=!1,this.containerObserver=!1,this.tableHeight=0,this.tableWidth=0,this.containerHeight=0,this.containerWidth=0,this.autoResize=!1,this.visible=!1,this.initialized=!1,this.initialRedraw=!1,this.registerTableOption("autoResize",!0)}initialize(){if(this.table.options.autoResize){var e,t=this.table;this.tableHeight=t.element.clientHeight,this.tableWidth=t.element.clientWidth,t.element.parentNode&&(this.containerHeight=t.element.parentNode.clientHeight,this.containerWidth=t.element.parentNode.clientWidth),"undefined"!=typeof IntersectionObserver&&"undefined"!=typeof ResizeObserver&&"virtual"===t.rowManager.getRenderMode()?(this.initializeVisibilityObserver(),this.autoResize=!0,this.resizeObserver=new ResizeObserver((e=>{if(!t.browserMobile||t.browserMobile&&(!t.modules.edit||t.modules.edit&&!t.modules.edit.currentCell)){var i=Math.floor(e[0].contentRect.height),s=Math.floor(e[0].contentRect.width);this.tableHeight==i&&this.tableWidth==s||(this.tableHeight=i,this.tableWidth=s,t.element.parentNode&&(this.containerHeight=t.element.parentNode.clientHeight,this.containerWidth=t.element.parentNode.clientWidth),this.redrawTable())}})),this.resizeObserver.observe(t.element),e=window.getComputedStyle(t.element),this.table.element.parentNode&&!this.table.rowManager.fixedHeight&&(e.getPropertyValue("max-height")||e.getPropertyValue("min-height"))&&(this.containerObserver=new ResizeObserver((e=>{if(!t.browserMobile||t.browserMobile&&(!t.modules.edit||t.modules.edit&&!t.modules.edit.currentCell)){var i=Math.floor(e[0].contentRect.height),s=Math.floor(e[0].contentRect.width);this.containerHeight==i&&this.containerWidth==s||(this.containerHeight=i,this.containerWidth=s,this.tableHeight=t.element.clientHeight,this.tableWidth=t.element.clientWidth),this.redrawTable()}})),this.containerObserver.observe(this.table.element.parentNode)),this.subscribe("table-resize",this.tableResized.bind(this))):(this.binding=function(){(!t.browserMobile||t.browserMobile&&(!t.modules.edit||t.modules.edit&&!t.modules.edit.currentCell))&&(t.columnManager.rerenderColumns(!0),t.redraw())},window.addEventListener("resize",this.binding)),this.subscribe("table-destroy",this.clearBindings.bind(this))}}initializeVisibilityObserver(){this.visibilityObserver=new IntersectionObserver((e=>{this.visible=e[0].isIntersecting,this.initialized?this.visible&&(this.redrawTable(this.initialRedraw),this.initialRedraw=!1):(this.initialized=!0,this.initialRedraw=!this.visible)})),this.visibilityObserver.observe(this.table.element)}redrawTable(e){this.initialized&&this.visible&&(this.table.columnManager.rerenderColumns(!0),this.table.redraw(e))}tableResized(){this.table.rowManager.redraw()}clearBindings(){this.binding&&window.removeEventListener("resize",this.binding),this.resizeObserver&&this.resizeObserver.unobserve(this.table.element),this.visibilityObserver&&this.visibilityObserver.unobserve(this.table.element),this.containerObserver&&this.containerObserver.unobserve(this.table.element.parentNode)}},ResponsiveLayoutModule:class extends M{static moduleName="responsiveLayout";static moduleExtensions=Oe;constructor(e){super(e),this.columns=[],this.hiddenColumns=[],this.mode="",this.index=0,this.collapseFormatter=[],this.collapseStartOpen=!0,this.collapseHandleColumn=!1,this.registerTableOption("responsiveLayout",!1),this.registerTableOption("responsiveLayoutCollapseStartOpen",!0),this.registerTableOption("responsiveLayoutCollapseUseFormatters",!0),this.registerTableOption("responsiveLayoutCollapseFormatter",!1),this.registerColumnOption("responsive")}initialize(){this.table.options.responsiveLayout&&(this.subscribe("column-layout",this.initializeColumn.bind(this)),this.subscribe("column-show",this.updateColumnVisibility.bind(this)),this.subscribe("column-hide",this.updateColumnVisibility.bind(this)),this.subscribe("columns-loaded",this.initializeResponsivity.bind(this)),this.subscribe("column-moved",this.initializeResponsivity.bind(this)),this.subscribe("column-add",this.initializeResponsivity.bind(this)),this.subscribe("column-delete",this.initializeResponsivity.bind(this)),this.subscribe("table-redrawing",this.tableRedraw.bind(this)),"collapse"===this.table.options.responsiveLayout&&(this.subscribe("row-data-changed",this.generateCollapsedRowContent.bind(this)),this.subscribe("row-init",this.initializeRow.bind(this)),this.subscribe("row-layout",this.layoutRow.bind(this))))}tableRedraw(e){-1===["fitColumns","fitDataStretch"].indexOf(this.layoutMode())&&(e||this.update())}initializeResponsivity(){var e=[];this.mode=this.table.options.responsiveLayout,this.collapseFormatter=this.table.options.responsiveLayoutCollapseFormatter||this.formatCollapsedData,this.collapseStartOpen=this.table.options.responsiveLayoutCollapseStartOpen,this.hiddenColumns=[],this.collapseFormatter&&(this.collapseFormatter=this.collapseFormatter.bind(this.table)),this.table.columnManager.columnsByIndex.forEach(((t,i)=>{t.modules.responsive&&t.modules.responsive.order&&t.modules.responsive.visible&&(t.modules.responsive.index=i,e.push(t),t.visible||"collapse"!==this.mode||this.hiddenColumns.push(t))})),e=(e=e.reverse()).sort(((e,t)=>t.modules.responsive.order-e.modules.responsive.order||t.modules.responsive.index-e.modules.responsive.index)),this.columns=e,"collapse"===this.mode&&this.generateCollapsedContent();for(let e of this.table.columnManager.columnsByIndex)if("responsiveCollapse"==e.definition.formatter){this.collapseHandleColumn=e;break}this.collapseHandleColumn&&(this.hiddenColumns.length?this.collapseHandleColumn.show():this.collapseHandleColumn.hide())}initializeColumn(e){var t=e.getDefinition();e.modules.responsive={order:void 0===t.responsive?1:t.responsive,visible:!1!==t.visible}}initializeRow(e){var t;"calc"!==e.type&&((t=document.createElement("div")).classList.add("tabulator-responsive-collapse"),e.modules.responsiveLayout={element:t,open:this.collapseStartOpen},this.collapseStartOpen||(t.style.display="none"))}layoutRow(e){var t=e.getElement();e.modules.responsiveLayout&&(t.appendChild(e.modules.responsiveLayout.element),this.generateCollapsedRowContent(e))}updateColumnVisibility(e,t){!t&&e.modules.responsive&&(e.modules.responsive.visible=e.visible,this.initializeResponsivity())}hideColumn(e){var t=this.hiddenColumns.length;e.hide(!1,!0),"collapse"===this.mode&&(this.hiddenColumns.unshift(e),this.generateCollapsedContent(),this.collapseHandleColumn&&!t&&this.collapseHandleColumn.show())}showColumn(e){var t;e.show(!1,!0),e.setWidth(e.getWidth()),"collapse"===this.mode&&((t=this.hiddenColumns.indexOf(e))>-1&&this.hiddenColumns.splice(t,1),this.generateCollapsedContent(),this.collapseHandleColumn&&!this.hiddenColumns.length&&this.collapseHandleColumn.hide())}update(){for(var e=!0;e;){let t="fitColumns"==this.table.modules.layout.getMode()?this.table.columnManager.getFlexBaseWidth():this.table.columnManager.getWidth(),i=(this.table.options.headerVisible?this.table.columnManager.element.clientWidth:this.table.element.clientWidth)-t;if(i<0){let t=this.columns[this.index];t?(this.hideColumn(t),this.index++):e=!1}else{let t=this.columns[this.index-1];t&&i>0&&i>=t.getWidth()?(this.showColumn(t),this.index--):e=!1}this.table.rowManager.activeRowsCount||this.table.rowManager.renderEmptyScroll()}}generateCollapsedContent(){this.table.rowManager.getDisplayRows().forEach((e=>{this.generateCollapsedRowContent(e)}))}generateCollapsedRowContent(e){var t,i;if(e.modules.responsiveLayout){for(t=e.modules.responsiveLayout.element;t.firstChild;)t.removeChild(t.firstChild);(i=this.collapseFormatter(this.generateCollapsedRowData(e)))&&t.appendChild(i),e.calcHeight(!0)}}generateCollapsedRowData(e){var t,i=e.getData(),s=[];return this.hiddenColumns.forEach((o=>{var n=o.getFieldValue(i);if(o.definition.title&&o.field)if(o.modules.format&&this.table.options.responsiveLayoutCollapseUseFormatters){function r(e){e()}t={value:!1,data:{},getValue:function(){return n},getData:function(){return i},getType:function(){return"cell"},getElement:function(){return document.createElement("div")},getRow:function(){return e.getComponent()},getColumn:function(){return o.getComponent()},getTable:()=>this.table},s.push({field:o.field,title:o.definition.title,value:o.modules.format.formatter.call(this.table.modules.format,t,o.modules.format.params,r)})}else s.push({field:o.field,title:o.definition.title,value:n})})),s}formatCollapsedData(e){var t=document.createElement("table");return e.forEach((e=>{var i,s=document.createElement("tr"),o=document.createElement("td"),n=document.createElement("td"),r=document.createElement("strong");o.appendChild(r),this.modules.localize.bind("columns|"+e.field,(function(t){r.innerHTML=t||e.title})),e.value instanceof Node?((i=document.createElement("div")).appendChild(e.value),n.appendChild(i)):n.innerHTML=e.value,s.appendChild(o),s.appendChild(n),t.appendChild(s)})),Object.keys(e).length?t:""}},SelectRangeModule:class extends M{static moduleName="selectRange";static moduleInitOrder=1;static moduleExtensions=Ie;constructor(e){super(e),this.selecting="cell",this.mousedown=!1,this.ranges=[],this.overlay=null,this.rowHeader=null,this.layoutChangeTimeout=null,this.columnSelection=!1,this.rowSelection=!1,this.maxRanges=0,this.activeRange=!1,this.blockKeydown=!1,this.keyDownEvent=this._handleKeyDown.bind(this),this.mouseUpEvent=this._handleMouseUp.bind(this),this.registerTableOption("selectableRange",!1),this.registerTableOption("selectableRangeColumns",!1),this.registerTableOption("selectableRangeRows",!1),this.registerTableOption("selectableRangeClearCells",!1),this.registerTableOption("selectableRangeClearCellsValue",void 0),this.registerTableFunction("getRangesData",this.getRangesData.bind(this)),this.registerTableFunction("getRanges",this.getRanges.bind(this)),this.registerTableFunction("addRange",this.addRangeFromComponent.bind(this)),this.registerComponentFunction("cell","getRanges",this.cellGetRanges.bind(this)),this.registerComponentFunction("row","getRanges",this.rowGetRanges.bind(this)),this.registerComponentFunction("column","getRanges",this.colGetRanges.bind(this))}initialize(){this.options("selectableRange")&&(this.options("selectableRows")?console.warn("SelectRange functionality cannot be used in conjunction with row selection"):(this.maxRanges=this.options("selectableRange"),this.initializeTable(),this.initializeWatchers()),this.options("columns").findIndex((e=>e.frozen))>0&&console.warn("Having frozen column in arbitrary position with selectRange option may result in unpredictable behavior."),this.options("columns").filter((e=>e.frozen))>1&&console.warn("Having multiple frozen columns with selectRange option may result in unpredictable behavior."))}initializeTable(){this.overlay=document.createElement("div"),this.overlay.classList.add("tabulator-range-overlay"),this.rangeContainer=document.createElement("div"),this.rangeContainer.classList.add("tabulator-range-container"),this.activeRangeCellElement=document.createElement("div"),this.activeRangeCellElement.classList.add("tabulator-range-cell-active"),this.overlay.appendChild(this.rangeContainer),this.overlay.appendChild(this.activeRangeCellElement),this.table.rowManager.element.addEventListener("keydown",this.keyDownEvent),this.resetRanges(),this.table.rowManager.element.appendChild(this.overlay),this.table.columnManager.element.setAttribute("tabindex",0),this.table.element.classList.add("tabulator-ranges")}initializeWatchers(){this.columnSelection=this.options("selectableRangeColumns"),this.rowSelection=this.options("selectableRangeRows"),this.subscribe("column-init",this.initializeColumn.bind(this)),this.subscribe("column-mousedown",this.handleColumnMouseDown.bind(this)),this.subscribe("column-mousemove",this.handleColumnMouseMove.bind(this)),this.subscribe("column-resized",this.handleColumnResized.bind(this)),this.subscribe("column-moving",this.handleColumnMoving.bind(this)),this.subscribe("column-moved",this.handleColumnMoved.bind(this)),this.subscribe("column-width",this.layoutChange.bind(this)),this.subscribe("column-height",this.layoutChange.bind(this)),this.subscribe("column-resized",this.layoutChange.bind(this)),this.subscribe("columns-loaded",this.updateHeaderColumn.bind(this)),this.subscribe("cell-height",this.layoutChange.bind(this)),this.subscribe("cell-rendered",this.renderCell.bind(this)),this.subscribe("cell-mousedown",this.handleCellMouseDown.bind(this)),this.subscribe("cell-mousemove",this.handleCellMouseMove.bind(this)),this.subscribe("cell-click",this.handleCellClick.bind(this)),this.subscribe("cell-editing",this.handleEditingCell.bind(this)),this.subscribe("page-changed",this.redraw.bind(this)),this.subscribe("scroll-vertical",this.layoutChange.bind(this)),this.subscribe("scroll-horizontal",this.layoutChange.bind(this)),this.subscribe("data-destroy",this.tableDestroyed.bind(this)),this.subscribe("data-processed",this.resetRanges.bind(this)),this.subscribe("table-layout",this.layoutElement.bind(this)),this.subscribe("table-redraw",this.redraw.bind(this)),this.subscribe("table-destroy",this.tableDestroyed.bind(this)),this.subscribe("edit-editor-clear",this.finishEditingCell.bind(this)),this.subscribe("edit-blur",this.restoreFocus.bind(this)),this.subscribe("keybinding-nav-prev",this.keyNavigate.bind(this,"left")),this.subscribe("keybinding-nav-next",this.keyNavigate.bind(this,"right")),this.subscribe("keybinding-nav-left",this.keyNavigate.bind(this,"left")),this.subscribe("keybinding-nav-right",this.keyNavigate.bind(this,"right")),this.subscribe("keybinding-nav-up",this.keyNavigate.bind(this,"up")),this.subscribe("keybinding-nav-down",this.keyNavigate.bind(this,"down")),this.subscribe("keybinding-nav-range",this.keyNavigateRange.bind(this))}initializeColumn(e){this.columnSelection&&e.definition.headerSort&&"icon"!==this.options("headerSortClickElement")&&console.warn("Using column headerSort with selectableRangeColumns option may result in unpredictable behavior. Consider using headerSortClickElement: 'icon'."),e.modules.edit}updateHeaderColumn(){var e;this.rowSelection&&(this.rowHeader=this.table.columnManager.getVisibleColumnsByIndex()[0],this.rowHeader&&(this.rowHeader.definition.cssClass=this.rowHeader.definition.cssClass+" tabulator-range-row-header",this.rowHeader.definition.headerSort&&console.warn("Using column headerSort with selectableRangeRows option may result in unpredictable behavior"),this.rowHeader.definition.editor&&console.warn("Using column editor with selectableRangeRows option may result in unpredictable behavior"))),this.table.modules.frozenColumns&&this.table.modules.frozenColumns.active&&((e=this.table.modules.frozenColumns.getFrozenColumns()).length>1||1===e.length&&e[0]!==this.rowHeader)&&console.warn("Using frozen columns that are not the range header in combination with the selectRange option may result in unpredictable behavior")}getRanges(){return this.ranges.map((e=>e.getComponent()))}getRangesData(){return this.ranges.map((e=>e.getData()))}addRangeFromComponent(e,t){return e=e?e._cell:null,t=t?t._cell:null,this.addRange(e,t)}cellGetRanges(e){var t=[];return t=e.column===this.rowHeader?this.ranges.filter((t=>t.occupiesRow(e.row))):this.ranges.filter((t=>t.occupies(e))),t.map((e=>e.getComponent()))}rowGetRanges(e){var t=this.ranges.filter((t=>t.occupiesRow(e)));return t.map((e=>e.getComponent()))}colGetRanges(e){var t=this.ranges.filter((t=>t.occupiesColumn(e)));return t.map((e=>e.getComponent()))}_handleMouseUp(e){this.mousedown=!1,document.removeEventListener("mouseup",this.mouseUpEvent)}_handleKeyDown(e){if(!this.blockKeydown&&(!this.table.modules.edit||this.table.modules.edit&&!this.table.modules.edit.currentCell)){if("Enter"===e.key){if(this.table.modules.edit&&this.table.modules.edit.currentCell)return;this.table.modules.edit.editCell(this.getActiveCell()),e.preventDefault()}"Backspace"!==e.key&&"Delete"!==e.key||!this.options("selectableRangeClearCells")||this.activeRange&&this.activeRange.clearValues()}}initializeFocus(e){var t;this.restoreFocus();try{document.selection?((t=document.body.createTextRange()).moveToElementText(e.getElement()),t.select()):window.getSelection&&((t=document.createRange()).selectNode(e.getElement()),window.getSelection().removeAllRanges(),window.getSelection().addRange(t))}catch(e){}}restoreFocus(e){return this.table.rowManager.element.focus(),!0}handleColumnResized(e){var t;"column"!==this.selecting&&"all"!==this.selecting||(t=this.ranges.some((t=>t.occupiesColumn(e))),t&&this.ranges.forEach((t=>{t.getColumns(!0).forEach((t=>{t!==e&&t.setWidth(e.width)}))})))}handleColumnMoving(e,t){this.resetRanges().setBounds(t),this.overlay.style.visibility="hidden"}handleColumnMoved(e,t,i){this.activeRange.setBounds(e),this.layoutElement()}handleColumnMouseDown(e,t){(2!==e.button||"column"!==this.selecting&&"all"!==this.selecting||!this.activeRange.occupiesColumn(t))&&(this.table.options.movableColumns&&"column"===this.selecting&&this.activeRange.occupiesColumn(t)||(this.mousedown=!0,document.addEventListener("mouseup",this.mouseUpEvent),this.newSelection(e,t)))}handleColumnMouseMove(e,t){t!==this.rowHeader&&this.mousedown&&"all"!==this.selecting&&this.activeRange.setBounds(!1,t,!0)}renderCell(e){var t=e.getElement(),i=this.ranges.findIndex((t=>t.occupies(e)));t.classList.toggle("tabulator-range-selected",-1!==i),t.classList.toggle("tabulator-range-only-cell-selected",1===this.ranges.length&&this.ranges[0].atTopLeft(e)&&this.ranges[0].atBottomRight(e)),t.dataset.range=i}handleCellMouseDown(e,t){2===e.button&&(this.activeRange.occupies(t)||("row"===this.selecting||"all"===this.selecting)&&this.activeRange.occupiesRow(t.row))||(this.mousedown=!0,document.addEventListener("mouseup",this.mouseUpEvent),this.newSelection(e,t))}handleCellMouseMove(e,t){this.mousedown&&"all"!==this.selecting&&this.activeRange.setBounds(!1,t,!0)}handleCellClick(e,t){this.initializeFocus(t)}handleEditingCell(e){this.activeRange&&this.activeRange.setBounds(e)}finishEditingCell(){this.blockKeydown=!0,this.table.rowManager.element.focus(),setTimeout((()=>{this.blockKeydown=!1}),10)}keyNavigate(e,t){this.navigate(!1,!1,e),t.preventDefault()}keyNavigateRange(e,t,i,s){this.navigate(i,s,t),e.preventDefault()}navigate(e,t,i){var s,o,n,r,a,l,h;if(this.table.modules.edit&&this.table.modules.edit.currentCell)return!1;if(this.ranges.length>1&&(this.ranges=this.ranges.filter((e=>e===this.activeRange?(e.setEnd(e.start.row,e.start.col),!0):(e.destroy(),!1)))),o=this.activeRange,r=(n=t?o.end:o.start).row,a=n.col,e)switch(i){case"left":a=this.findJumpCellLeft(o.start.row,n.col);break;case"right":a=this.findJumpCellRight(o.start.row,n.col);break;case"up":r=this.findJumpCellUp(n.row,o.start.col);break;case"down":r=this.findJumpCellDown(n.row,o.start.col)}else{if(t&&("row"===this.selecting&&("left"===i||"right"===i)||"column"===this.selecting&&("up"===i||"down"===i)))return;switch(i){case"left":a=Math.max(a-1,0);break;case"right":a=Math.min(a+1,this.getTableColumns().length-1);break;case"up":r=Math.max(r-1,0);break;case"down":r=Math.min(r+1,this.getTableRows().length-1)}}return this.rowHeader&&0===a&&(a=1),s=a!==n.col||r!==n.row,t||o.setStart(r,a),o.setEnd(r,a),t||(this.selecting="cell"),s?(l=this.getRowByRangePos(o.end.row),h=this.getColumnByRangePos(o.end.col),"left"!==i&&"right"!==i||null!==h.getElement().parentNode?"up"!==i&&"down"!==i||null!==l.getElement().parentNode?this.autoScroll(o,l.getElement(),h.getElement()):l.getComponent().scrollTo(void 0,!1):h.getComponent().scrollTo(void 0,!1),this.layoutElement(),!0):void 0}rangeRemoved(e){this.ranges=this.ranges.filter((t=>t!==e)),this.activeRange===e&&(this.ranges.length?this.activeRange=this.ranges[this.ranges.length-1]:this.addRange()),this.layoutElement()}findJumpRow(e,t,i,s,o){return i&&(t=t.reverse()),this.findJumpItem(s,o,t,(function(t){return t.getData()[e.getField()]}))}findJumpCol(e,t,i,s,o){return i&&(t=t.reverse()),this.findJumpItem(s,o,t,(function(t){return e.getData()[t.getField()]}))}findJumpItem(e,t,i,s){var o;for(let n of i){let i=s(n);if(e){if(o=n,i)break}else if(t){if(o=n,i)break}else{if(!i)break;o=n}}return o}findJumpCellLeft(e,t){var i=this.getRowByRangePos(e),s=this.getTableColumns(),o=this.isEmpty(i.getData()[s[t].getField()]),n=!!s[t-1]&&this.isEmpty(i.getData()[s[t-1].getField()]),r=this.rowHeader?s.slice(1,t):s.slice(0,t),a=this.findJumpCol(i,r,!0,o,n);return a?a.getPosition()-1:t}findJumpCellRight(e,t){var i=this.getRowByRangePos(e),s=this.getTableColumns(),o=this.isEmpty(i.getData()[s[t].getField()]),n=!!s[t+1]&&this.isEmpty(i.getData()[s[t+1].getField()]),r=this.findJumpCol(i,s.slice(t+1,s.length),!1,o,n);return r?r.getPosition()-1:t}findJumpCellUp(e,t){var i=this.getColumnByRangePos(t),s=this.getTableRows(),o=this.isEmpty(s[e].getData()[i.getField()]),n=!!s[e-1]&&this.isEmpty(s[e-1].getData()[i.getField()]),r=this.findJumpRow(i,s.slice(0,e),!0,o,n);return r?r.position-1:e}findJumpCellDown(e,t){var i=this.getColumnByRangePos(t),s=this.getTableRows(),o=this.isEmpty(s[e].getData()[i.getField()]),n=!!s[e+1]&&this.isEmpty(s[e+1].getData()[i.getField()]),r=this.findJumpRow(i,s.slice(e+1,s.length),!1,o,n);return r?r.position-1:e}newSelection(e,t){var i;if("column"===t.type){if(!this.columnSelection)return;if(t===this.rowHeader){i=this.resetRanges(),this.selecting="all";var s,o=this.getCell(-1,-1);return s=this.rowHeader?this.getCell(0,1):this.getCell(0,0),void i.setBounds(s,o)}this.selecting="column"}else t.column===this.rowHeader?this.selecting="row":this.selecting="cell";e.shiftKey?this.activeRange.setBounds(!1,t):e.ctrlKey?this.addRange().setBounds(t):this.resetRanges().setBounds(t)}autoScroll(e,t,i){var s,o,n,r,a,l=this.table.rowManager.element;void 0===t&&(t=this.getRowByRangePos(e.end.row).getElement()),void 0===i&&(i=this.getColumnByRangePos(e.end.col).getElement()),this.rowHeader&&(s=this.rowHeader.getElement()),o={left:i.offsetLeft,right:i.offsetLeft+i.offsetWidth,top:t.offsetTop,bottom:t.offsetTop+t.offsetHeight},n={left:l.scrollLeft,right:Math.ceil(l.scrollLeft+l.clientWidth),top:l.scrollTop,bottom:l.scrollTop+l.offsetHeight-this.table.rowManager.scrollbarWidth},s&&(n.left+=s.offsetWidth),r=n.leftn.right&&(l.scrollLeft=o.right-l.clientWidth)),a||(o.topn.bottom&&(l.scrollTop=o.bottom-l.clientHeight))}layoutChange(){this.overlay.style.visibility="hidden",clearTimeout(this.layoutChangeTimeout),this.layoutChangeTimeout=setTimeout(this.layoutRanges.bind(this),200)}redraw(e){e&&(this.selecting="cell",this.resetRanges(),this.layoutElement())}layoutElement(e){(e?this.table.rowManager.getVisibleRows(!0):this.table.rowManager.getRows()).forEach((e=>{"row"===e.type&&(this.layoutRow(e),e.cells.forEach((e=>this.renderCell(e))))})),this.getTableColumns().forEach((e=>{this.layoutColumn(e)})),this.layoutRanges()}layoutRow(e){var t=e.getElement(),i=!1,s=this.ranges.some((t=>t.occupiesRow(e)));"row"===this.selecting?i=s:"all"===this.selecting&&(i=!0),t.classList.toggle("tabulator-range-selected",i),t.classList.toggle("tabulator-range-highlight",s)}layoutColumn(e){var t=e.getElement(),i=!1,s=this.ranges.some((t=>t.occupiesColumn(e)));"column"===this.selecting?i=s:"all"===this.selecting&&(i=!0),t.classList.toggle("tabulator-range-selected",i),t.classList.toggle("tabulator-range-highlight",s)}layoutRanges(){var e,t,i;this.table.initialized&&(e=this.getActiveCell())&&(t=e.getElement(),i=e.row.getElement(),this.table.rtl?this.activeRangeCellElement.style.right=i.offsetWidth-t.offsetLeft-t.offsetWidth+"px":this.activeRangeCellElement.style.left=i.offsetLeft+t.offsetLeft+"px",this.activeRangeCellElement.style.top=i.offsetTop+"px",this.activeRangeCellElement.style.width=t.offsetWidth+"px",this.activeRangeCellElement.style.height=i.offsetHeight+"px",this.ranges.forEach((e=>e.layout())),this.overlay.style.visibility="visible")}getCell(e,t){var i;return t<0&&(t=this.getTableColumns().length+t)<0?null:(e<0&&(e=this.getTableRows().length+e),(i=this.table.rowManager.getRowFromPosition(e+1))?i.getCells(!1,!0).filter((e=>e.column.visible))[t]:null)}getActiveCell(){return this.getCell(this.activeRange.start.row,this.activeRange.start.col)}getRowByRangePos(e){return this.getTableRows()[e]}getColumnByRangePos(e){return this.getTableColumns()[e]}getTableRows(){return this.table.rowManager.getDisplayRows().filter((e=>"row"===e.type))}getTableColumns(){return this.table.columnManager.getVisibleColumnsByIndex()}addRange(e,t){var i;return!0!==this.maxRanges&&this.ranges.length>=this.maxRanges&&this.ranges.shift().destroy(),i=new Ve(this.table,this,e,t),this.activeRange=i,this.ranges.push(i),this.rangeContainer.appendChild(i.element),i}resetRanges(){var e,t,i;return this.ranges.forEach((e=>e.destroy())),this.ranges=[],e=this.addRange(),this.table.rowManager.activeRows.length&&(i=this.table.rowManager.activeRows[0].cells.filter((e=>e.column.visible)),(t=i[this.rowHeader?1:0])&&(e.setBounds(t),this.initializeFocus(t))),e}tableDestroyed(){document.removeEventListener("mouseup",this.mouseUpEvent),this.table.rowManager.element.removeEventListener("keydown",this.keyDownEvent)}selectedRows(e){return e?this.activeRange.getRows().map((e=>e.getComponent())):this.activeRange.getRows()}selectedColumns(e){return e?this.activeRange.getColumns().map((e=>e.getComponent())):this.activeRange.getColumns()}isEmpty(e){return null==e||""===e}},SelectRowModule:class extends M{static moduleName="selectRow";static moduleExtensions=Ae;constructor(e){super(e),this.selecting=!1,this.lastClickedRow=!1,this.selectPrev=[],this.selectedRows=[],this.headerCheckboxElement=null,this.registerTableOption("selectableRows","highlight"),this.registerTableOption("selectableRowsRangeMode","drag"),this.registerTableOption("selectableRowsRollingSelection",!0),this.registerTableOption("selectableRowsPersistence",!0),this.registerTableOption("selectableRowsCheck",(function(e,t){return!0})),this.registerTableFunction("selectRow",this.selectRows.bind(this)),this.registerTableFunction("deselectRow",this.deselectRows.bind(this)),this.registerTableFunction("toggleSelectRow",this.toggleRow.bind(this)),this.registerTableFunction("getSelectedRows",this.getSelectedRows.bind(this)),this.registerTableFunction("getSelectedData",this.getSelectedData.bind(this)),this.registerComponentFunction("row","select",this.selectRows.bind(this)),this.registerComponentFunction("row","deselect",this.deselectRows.bind(this)),this.registerComponentFunction("row","toggleSelect",this.toggleRow.bind(this)),this.registerComponentFunction("row","isSelected",this.isRowSelected.bind(this))}initialize(){this.deprecatedOptionsCheck(),"highlight"===this.table.options.selectableRows&&this.table.options.selectableRange&&(this.table.options.selectableRows=!1),!1!==this.table.options.selectableRows&&(this.subscribe("row-init",this.initializeRow.bind(this)),this.subscribe("row-deleting",this.rowDeleted.bind(this)),this.subscribe("rows-wipe",this.clearSelectionData.bind(this)),this.subscribe("rows-retrieve",this.rowRetrieve.bind(this)),this.table.options.selectableRows&&!this.table.options.selectableRowsPersistence&&this.subscribe("data-refreshing",this.deselectRows.bind(this)))}deprecatedOptionsCheck(){}rowRetrieve(e,t){return"selected"===e?this.selectedRows:t}rowDeleted(e){this._deselectRow(e,!0)}clearSelectionData(e){var t=this.selectedRows.length;this.selecting=!1,this.lastClickedRow=!1,this.selectPrev=[],this.selectedRows=[],t&&!0!==e&&this._rowSelectionChanged()}initializeRow(e){var t=this,i=t.checkRowSelectability(e),s=e.getElement(),o=function(){setTimeout((function(){t.selecting=!1}),50),document.body.removeEventListener("mouseup",o)};e.modules.select={selected:!1},s.classList.toggle("tabulator-selectable",i),s.classList.toggle("tabulator-unselectable",!i),t.checkRowSelectability(e)&&t.table.options.selectableRows&&"highlight"!=t.table.options.selectableRows&&("click"===t.table.options.selectableRowsRangeMode?s.addEventListener("click",this.handleComplexRowClick.bind(this,e)):(s.addEventListener("click",(function(i){t.table.modExists("edit")&&t.table.modules.edit.getCurrentCell()||t.table._clearSelection(),t.selecting||t.toggleRow(e)})),s.addEventListener("mousedown",(function(i){if(i.shiftKey)return t.table._clearSelection(),t.selecting=!0,t.selectPrev=[],document.body.addEventListener("mouseup",o),document.body.addEventListener("keyup",o),t.toggleRow(e),!1})),s.addEventListener("mouseenter",(function(i){t.selecting&&(t.table._clearSelection(),t.toggleRow(e),t.selectPrev[1]==e&&t.toggleRow(t.selectPrev[0]))})),s.addEventListener("mouseout",(function(i){t.selecting&&(t.table._clearSelection(),t.selectPrev.unshift(e))}))))}handleComplexRowClick(e,t){if(t.shiftKey){this.table._clearSelection(),this.lastClickedRow=this.lastClickedRow||e;var i=this.table.rowManager.getDisplayRowIndex(this.lastClickedRow),s=this.table.rowManager.getDisplayRowIndex(e),o=i<=s?i:s,n=i>=s?i:s,r=this.table.rowManager.getDisplayRows().slice(0).splice(o,n-o+1);t.ctrlKey||t.metaKey?(r.forEach((t=>{t!==this.lastClickedRow&&(!0===this.table.options.selectableRows||this.isRowSelected(e)||this.selectedRows.lengththis.table.options.selectableRows&&(r=r.slice(0,this.table.options.selectableRows)),this.selectRows(r)),this.table._clearSelection()}else t.ctrlKey||t.metaKey?(this.toggleRow(e),this.lastClickedRow=e):(this.deselectRows(void 0,!0),this.selectRows(e),this.lastClickedRow=e)}checkRowSelectability(e){return!(!e||"row"!==e.type)&&this.table.options.selectableRowsCheck.call(this.table,e.getComponent())}toggleRow(e){this.checkRowSelectability(e)&&(e.modules.select&&e.modules.select.selected?this._deselectRow(e):this._selectRow(e))}selectRows(e){var t,i,s=[];switch(typeof e){case"undefined":t=this.table.rowManager.rows;break;case"number":t=this.table.rowManager.findRow(e);break;case"string":(t=this.table.rowManager.findRow(e))||(t=this.table.rowManager.getRows(e));break;default:t=e}Array.isArray(t)?t.length&&(t.forEach((e=>{(i=this._selectRow(e,!0,!0))&&s.push(i)})),this._rowSelectionChanged(!1,s)):t&&this._selectRow(t,!1,!0)}_selectRow(e,t,i){if(!isNaN(this.table.options.selectableRows)&&!0!==this.table.options.selectableRows&&!i&&this.selectedRows.length>=this.table.options.selectableRows){if(!this.table.options.selectableRowsRollingSelection)return!1;this._deselectRow(this.selectedRows[0])}var s=this.table.rowManager.findRow(e);if(s){if(-1==this.selectedRows.indexOf(s))return s.getElement().classList.add("tabulator-selected"),s.modules.select||(s.modules.select={}),s.modules.select.selected=!0,s.modules.select.checkboxEl&&(s.modules.select.checkboxEl.checked=!0),this.selectedRows.push(s),this.table.options.dataTreeSelectPropagate&&this.childRowSelection(s,!0),this.dispatchExternal("rowSelected",s.getComponent()),this._rowSelectionChanged(t,s),s}else t||console.warn("Selection Error - No such row found, ignoring selection:"+e)}isRowSelected(e){return-1!==this.selectedRows.indexOf(e)}deselectRows(e,t){var i,s,o=[];switch(typeof e){case"undefined":i=Object.assign([],this.selectedRows);break;case"number":i=this.table.rowManager.findRow(e);break;case"string":(i=this.table.rowManager.findRow(e))||(i=this.table.rowManager.getRows(e));break;default:i=e}Array.isArray(i)?i.length&&(i.forEach((e=>{(s=this._deselectRow(e,!0,!0))&&o.push(s)})),this._rowSelectionChanged(t,[],o)):i&&this._deselectRow(i,t,!0)}_deselectRow(e,t){var i,s,o=this,n=o.table.rowManager.findRow(e);if(n){if((i=o.selectedRows.findIndex((function(e){return e==n})))>-1)return(s=n.getElement())&&s.classList.remove("tabulator-selected"),n.modules.select||(n.modules.select={}),n.modules.select.selected=!1,n.modules.select.checkboxEl&&(n.modules.select.checkboxEl.checked=!1),o.selectedRows.splice(i,1),this.table.options.dataTreeSelectPropagate&&this.childRowSelection(n,!1),this.dispatchExternal("rowDeselected",n.getComponent()),o._rowSelectionChanged(t,void 0,n),n}else t||console.warn("Deselection Error - No such row found, ignoring selection:"+e)}getSelectedData(){var e=[];return this.selectedRows.forEach((function(t){e.push(t.getData())})),e}getSelectedRows(){var e=[];return this.selectedRows.forEach((function(t){e.push(t.getComponent())})),e}_rowSelectionChanged(e,t=[],i=[]){this.headerCheckboxElement&&(0===this.selectedRows.length?(this.headerCheckboxElement.checked=!1,this.headerCheckboxElement.indeterminate=!1):this.table.rowManager.rows.length===this.selectedRows.length?(this.headerCheckboxElement.checked=!0,this.headerCheckboxElement.indeterminate=!1):(this.headerCheckboxElement.indeterminate=!0,this.headerCheckboxElement.checked=!1)),e||(Array.isArray(t)||(t=[t]),t=t.map((e=>e.getComponent())),Array.isArray(i)||(i=[i]),i=i.map((e=>e.getComponent())),this.dispatchExternal("rowSelectionChanged",this.getSelectedData(),this.getSelectedRows(),t,i))}registerRowSelectCheckbox(e,t){e._row.modules.select||(e._row.modules.select={}),e._row.modules.select.checkboxEl=t}registerHeaderSelectCheckbox(e){this.headerCheckboxElement=e}childRowSelection(e,t){var i=this.table.modules.dataTree.getChildren(e,!0,!0);if(t)for(let e of i)this._selectRow(e,!0);else for(let e of i)this._deselectRow(e,!0)}},SortModule:je,SpreadsheetModule:class extends M{static moduleName="spreadsheet";constructor(e){super(e),this.sheets=[],this.element=null,this.registerTableOption("spreadsheet",!1),this.registerTableOption("spreadsheetRows",50),this.registerTableOption("spreadsheetColumns",50),this.registerTableOption("spreadsheetColumnDefinition",{}),this.registerTableOption("spreadsheetOutputFull",!1),this.registerTableOption("spreadsheetData",!1),this.registerTableOption("spreadsheetSheets",!1),this.registerTableOption("spreadsheetSheetTabs",!1),this.registerTableOption("spreadsheetSheetTabsElement",!1),this.registerTableFunction("setSheets",this.setSheets.bind(this)),this.registerTableFunction("addSheet",this.addSheet.bind(this)),this.registerTableFunction("getSheets",this.getSheets.bind(this)),this.registerTableFunction("getSheetDefinitions",this.getSheetDefinitions.bind(this)),this.registerTableFunction("setSheetData",this.setSheetData.bind(this)),this.registerTableFunction("getSheet",this.getSheet.bind(this)),this.registerTableFunction("getSheetData",this.getSheetData.bind(this)),this.registerTableFunction("clearSheet",this.clearSheet.bind(this)),this.registerTableFunction("removeSheet",this.removeSheetFunc.bind(this)),this.registerTableFunction("activeSheet",this.activeSheetFunc.bind(this))}initialize(){this.options("spreadsheet")&&(this.subscribe("table-initialized",this.tableInitialized.bind(this)),this.subscribe("data-loaded",this.loadRemoteData.bind(this)),this.table.options.index="_id",this.options("spreadsheetData")&&this.options("spreadsheetSheets")&&(console.warn("You cannot use spreadsheetData and spreadsheetSheets at the same time, ignoring spreadsheetData"),this.table.options.spreadsheetData=!1),this.compatibilityCheck(),this.options("spreadsheetSheetTabs")&&this.initializeTabset())}compatibilityCheck(){this.options("data")&&console.warn("Do not use the data option when working with spreadsheets, use either spreadsheetData or spreadsheetSheets to pass data into the table"),this.options("pagination")&&console.warn("The spreadsheet module is not compatible with the pagination module"),this.options("groupBy")&&console.warn("The spreadsheet module is not compatible with the row grouping module"),this.options("responsiveCollapse")&&console.warn("The spreadsheet module is not compatible with the responsive collapse module")}initializeTabset(){this.element=document.createElement("div"),this.element.classList.add("tabulator-spreadsheet-tabs");var e=this.options("spreadsheetSheetTabsElement");!e||e instanceof HTMLElement||(e=document.querySelector(e))||console.warn("Unable to find element matching spreadsheetSheetTabsElement selector:",this.options("spreadsheetSheetTabsElement")),e?e.appendChild(this.element):this.footerAppend(this.element)}tableInitialized(){this.sheets.length?this.loadSheet(this.sheets[0]):this.options("spreadsheetSheets")?this.loadSheets(this.options("spreadsheetSheets")):this.options("spreadsheetData")&&this.loadData(this.options("spreadsheetData"))}loadRemoteData(e,t,i){return console.log("data",e,t,i),Array.isArray(e)?(this.table.dataLoader.clearAlert(),this.dispatchExternal("dataLoaded",e),!e.length||Array.isArray(e[0])?this.loadData(e):this.loadSheets(e)):console.error("Spreadsheet Loading Error - Unable to process remote data due to invalid data type \nExpecting: array \nReceived: ",typeof e,"\nData: ",e),!1}loadData(e){var t={data:e};this.loadSheet(this.newSheet(t))}destroySheets(){this.sheets.forEach((e=>{e.destroy()})),this.sheets=[],this.activeSheet=null}loadSheets(e){Array.isArray(e)||(e=[]),this.destroySheets(),e.forEach((e=>{this.newSheet(e)})),this.loadSheet(this.sheets[0])}loadSheet(e){this.activeSheet!==e&&(this.activeSheet&&this.activeSheet.unload(),this.activeSheet=e,e.load())}newSheet(e={}){var t;return e.rows||(e.rows=this.options("spreadsheetRows")),e.columns||(e.columns=this.options("spreadsheetColumns")),t=new Xe(this,e),this.sheets.push(t),this.element&&this.element.appendChild(t.element),t}removeSheet(e){var t,i=this.sheets.indexOf(e);this.sheets.length>1?i>-1&&(this.sheets.splice(i,1),e.destroy(),this.activeSheet===e&&((t=this.sheets[i-1]||this.sheets[0])?this.loadSheet(t):this.activeSheet=null)):console.warn("Unable to remove sheet, at least one sheet must be active")}lookupSheet(e){return e?e instanceof Xe?e:e instanceof Ue?e._sheet:this.sheets.find((t=>t.key===e))||!1:this.activeSheet}setSheets(e){return this.loadSheets(e),this.getSheets()}addSheet(e){return this.newSheet(e).getComponent()}getSheetDefinitions(){return this.sheets.map((e=>e.getDefinition()))}getSheets(){return this.sheets.map((e=>e.getComponent()))}getSheet(e){var t=this.lookupSheet(e);return!!t&&t.getComponent()}setSheetData(e,t){e&&!t&&(t=e,e=!1);var i=this.lookupSheet(e);return!!i&&i.setData(t)}getSheetData(e){var t=this.lookupSheet(e);return!!t&&t.getData()}clearSheet(e){var t=this.lookupSheet(e);return!!t&&t.clear()}removeSheetFunc(e){var t=this.lookupSheet(e);t&&this.removeSheet(t)}activeSheetFunc(e){var t=this.lookupSheet(e);return!!t&&this.loadSheet(t)}},TooltipModule:class extends M{static moduleName="tooltip";constructor(e){super(e),this.tooltipSubscriber=null,this.headerSubscriber=null,this.timeout=null,this.popupInstance=null,this.registerTableOption("tooltipDelay",300),this.registerColumnOption("tooltip"),this.registerColumnOption("headerTooltip")}initialize(){this.deprecatedOptionsCheck(),this.subscribe("column-init",this.initializeColumn.bind(this))}deprecatedOptionsCheck(){}initializeColumn(e){e.definition.headerTooltip&&!this.headerSubscriber&&(this.headerSubscriber=!0,this.subscribe("column-mousemove",this.mousemoveCheck.bind(this,"headerTooltip")),this.subscribe("column-mouseout",this.mouseoutCheck.bind(this,"headerTooltip"))),e.definition.tooltip&&!this.tooltipSubscriber&&(this.tooltipSubscriber=!0,this.subscribe("cell-mousemove",this.mousemoveCheck.bind(this,"tooltip")),this.subscribe("cell-mouseout",this.mouseoutCheck.bind(this,"tooltip")))}mousemoveCheck(e,t,i){var s="tooltip"===e?i.column.definition.tooltip:i.definition.headerTooltip;s&&(this.clearPopup(),this.timeout=setTimeout(this.loadTooltip.bind(this,t,i,s),this.table.options.tooltipDelay))}mouseoutCheck(e,t,i){this.popupInstance||this.clearPopup()}clearPopup(e,t,i){clearTimeout(this.timeout),this.timeout=null,this.popupInstance&&this.popupInstance.hide()}loadTooltip(e,t,i){var s,o,r;"function"==typeof i&&(i=i(e,t.getComponent(),(function(e){o=e}))),i instanceof HTMLElement?s=i:(s=document.createElement("div"),!0===i&&(t instanceof n?i=t.value:t.definition.field?this.langBind("columns|"+t.definition.field,(e=>{s.innerHTML=i=e||t.definition.title})):i=t.definition.title),s.innerHTML=i),(i||0===i||!1===i)&&(s.classList.add("tabulator-tooltip"),s.addEventListener("mousemove",(e=>e.preventDefault())),this.popupInstance=this.popup(s),"function"==typeof o&&this.popupInstance.renderCallback(o),r=this.popupInstance.containerEventCoords(e),this.popupInstance.show(r.x+15,r.y+15).hideOnBlur((()=>{this.dispatchExternal("TooltipClosed",t.getComponent()),this.popupInstance=null})),this.dispatchExternal("TooltipOpened",t.getComponent()))}},ValidateModule:qe});var Ye=class extends O{static extendModule(){O.initializeModuleBinder(Ke),O._extendModule(...arguments)}static registerModule(){O.initializeModuleBinder(Ke),O._registerModule(...arguments)}constructor(e,t,i){super(e,t,Ke)}};return Ye})); +//# sourceMappingURL=tabulator.min.js.map \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/arrow.svg b/packages/syft/src/syft/assets/svg/arrow.svg new file mode 100644 index 00000000000..23d370f7731 --- /dev/null +++ b/packages/syft/src/syft/assets/svg/arrow.svg @@ -0,0 +1,7 @@ + + + + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/clipboard.svg b/packages/syft/src/syft/assets/svg/clipboard.svg new file mode 100644 index 00000000000..519935e3604 --- /dev/null +++ b/packages/syft/src/syft/assets/svg/clipboard.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/copy.svg b/packages/syft/src/syft/assets/svg/copy.svg new file mode 100644 index 00000000000..9e43a5b27f2 --- /dev/null +++ b/packages/syft/src/syft/assets/svg/copy.svg @@ -0,0 +1,3 @@ + + + diff --git a/packages/syft/src/syft/assets/svg/folder.svg b/packages/syft/src/syft/assets/svg/folder.svg new file mode 100644 index 00000000000..94d1ea2ccfa --- /dev/null +++ b/packages/syft/src/syft/assets/svg/folder.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/info.svg b/packages/syft/src/syft/assets/svg/info.svg new file mode 100644 index 00000000000..2d773b07a06 --- /dev/null +++ b/packages/syft/src/syft/assets/svg/info.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/request.svg b/packages/syft/src/syft/assets/svg/request.svg new file mode 100644 index 00000000000..a2e0a87130e --- /dev/null +++ b/packages/syft/src/syft/assets/svg/request.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/search.svg b/packages/syft/src/syft/assets/svg/search.svg new file mode 100644 index 00000000000..99ce6ab0b52 --- /dev/null +++ b/packages/syft/src/syft/assets/svg/search.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/packages/syft/src/syft/assets/svg/table.svg b/packages/syft/src/syft/assets/svg/table.svg new file mode 100644 index 00000000000..e19b6a4556d --- /dev/null +++ b/packages/syft/src/syft/assets/svg/table.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/packages/syft/src/syft/capnp/recursive_serde.capnp b/packages/syft/src/syft/capnp/recursive_serde.capnp index 8f4b1b17953..5b6fadb5c65 100644 --- a/packages/syft/src/syft/capnp/recursive_serde.capnp +++ b/packages/syft/src/syft/capnp/recursive_serde.capnp @@ -3,6 +3,7 @@ struct RecursiveSerde { fieldsName @0 :List(Text); fieldsData @1 :List(List(Data)); - fullyQualifiedName @2 :Text; - nonrecursiveBlob @3 :List(Data); + nonrecursiveBlob @2 :List(Data); + canonicalName @3 :Text; + version @4 :Int32; } diff --git a/packages/syft/src/syft/client/__init__.py b/packages/syft/src/syft/client/__init__.py index e69de29bb2d..0f427902877 100644 --- a/packages/syft/src/syft/client/__init__.py +++ b/packages/syft/src/syft/client/__init__.py @@ -0,0 +1,2 @@ +# relative +from .enclave_client import EnclaveMetadata diff --git a/packages/syft/src/syft/client/api.py b/packages/syft/src/syft/client/api.py index ff36317238e..85fe33545fa 100644 --- a/packages/syft/src/syft/client/api.py +++ b/packages/syft/src/syft/client/api.py @@ -10,94 +10,159 @@ import types from typing import Any from typing import TYPE_CHECKING -from typing import _GenericAlias from typing import cast from typing import get_args from typing import get_origin # third party from nacl.exceptions import BadSignatureError -from pydantic import EmailStr -from result import OkErr -from result import Result -from typeguard import check_type +from pydantic import BaseModel +from pydantic import ConfigDict +from pydantic import TypeAdapter # relative -from ..abstract_node import AbstractNode -from ..node.credentials import SyftSigningKey -from ..node.credentials import SyftVerifyKey +from ..abstract_server import AbstractServer from ..protocol.data_protocol import PROTOCOL_TYPE from ..protocol.data_protocol import get_data_protocol from ..protocol.data_protocol import migrate_args_and_kwargs from ..serde.deserialize import _deserialize -from ..serde.recursive import index_syft_by_module_name from ..serde.serializable import serializable from ..serde.serialize import _serialize from ..serde.signature import Signature -from ..serde.signature import signature_remove_context -from ..serde.signature import signature_remove_self +from ..serde.signature import signature_remove +from ..server.credentials import SyftSigningKey +from ..server.credentials import SyftVerifyKey from ..service.context import AuthedServiceContext from ..service.context import ChangeContext -from ..service.response import SyftAttributeError +from ..service.metadata.server_metadata import ServerMetadataJSON from ..service.response import SyftError +from ..service.response import SyftResponseMessage from ..service.response import SyftSuccess from ..service.service import UserLibConfigRegistry from ..service.service import UserServiceConfigRegistry +from ..service.service import _format_signature +from ..service.service import _signature_error_message from ..service.user.user_roles import ServiceRole from ..service.warnings import APIEndpointWarning from ..service.warnings import WarningContext -from ..types.cache_object import CachedSyftObject +from ..types.errors import SyftException +from ..types.errors import exclude_from_traceback from ..types.identity import Identity -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.result import as_result +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftBaseObject from ..types.syft_object import SyftMigrationRegistry from ..types.syft_object import SyftObject from ..types.uid import LineageID from ..types.uid import UID from ..util.autoreload import autoreload_enabled -from ..util.telemetry import instrument +from ..util.markdown import as_markdown_python_code +from ..util.notebook_ui.components.tabulator_template import build_tabulator_table +from ..util.util import index_syft_by_module_name from ..util.util import prompt_warning_message -from .connection import NodeConnection +from .connection import ServerConnection if TYPE_CHECKING: # relative - from ..node import Node + from ..server import Server from ..service.job.job_stash import Job +IPYNB_BACKGROUND_METHODS = { + "getdoc", + "_partialmethod", + "__name__", + "__code__", + "__wrapped__", + "__custom_documentations__", + "__signature__", + "__defaults__", + "__kwdefaults__", +} + +IPYNB_BACKGROUND_PREFIXES = ["_ipy", "_repr", "__ipython", "__pydantic"] + + +@exclude_from_traceback +def post_process_result( + result: SyftError | SyftSuccess, unwrap_on_success: bool = False +) -> Any: + if isinstance(result, SyftError): + raise SyftException(public_message=result.message, server_trace=result.tb) + + if unwrap_on_success and isinstance(result, SyftSuccess): + result = result.unwrap_value() + + return result + + +def _has_config_dict(t: Any) -> bool: + return ( + # Use this instead of `issubclass`` to be compatible with python 3.10 + # `inspect.isclass(t) and issubclass(t, BaseModel)`` wouldn't work with + # generics, e.g. `set[sy.UID]`, in python 3.10 + (hasattr(t, "__mro__") and BaseModel in t.__mro__) + or hasattr(t, "__pydantic_config__") + ) + + +_config_dict = ConfigDict(arbitrary_types_allowed=True) + + +def _check_type(v: object, t: Any) -> Any: + # TypeAdapter only accepts `config` arg if `t` does not + # already contain a ConfigDict + # i.e model_config in BaseModel and __pydantic_config__ in + # other types. + type_adapter = ( + TypeAdapter(t, config=_config_dict) + if not _has_config_dict(t) + else TypeAdapter(t) + ) + + return type_adapter.validate_python(v) + + class APIRegistry: __api_registry__: dict[tuple, SyftAPI] = OrderedDict() @classmethod def set_api_for( cls, - node_uid: UID | str, + server_uid: UID | str, user_verify_key: SyftVerifyKey | str, api: SyftAPI, ) -> None: - if isinstance(node_uid, str): - node_uid = UID.from_string(node_uid) + if isinstance(server_uid, str): + server_uid = UID.from_string(server_uid) if isinstance(user_verify_key, str): user_verify_key = SyftVerifyKey.from_string(user_verify_key) - key = (node_uid, user_verify_key) + key = (server_uid, user_verify_key) cls.__api_registry__[key] = api @classmethod - def api_for(cls, node_uid: UID, user_verify_key: SyftVerifyKey) -> SyftAPI | None: - key = (node_uid, user_verify_key) - return cls.__api_registry__.get(key, None) + @as_result(SyftException) + def api_for(cls, server_uid: UID, user_verify_key: SyftVerifyKey) -> SyftAPI: + key = (server_uid, user_verify_key) + api_instance = cls.__api_registry__.get(key, None) + + if api_instance is None: + msg = f"Unable to get the API. Please login to datasite {server_uid}" + raise SyftException(public_message=msg) + + return api_instance @classmethod def get_all_api(cls) -> list[SyftAPI]: return list(cls.__api_registry__.values()) @classmethod - def get_by_recent_node_uid(cls, node_uid: UID) -> SyftAPI | None: + def get_by_recent_server_uid(cls, server_uid: UID) -> SyftAPI | None: for key, api in reversed(cls.__api_registry__.items()): - if key[0] == node_uid: + if key[0] == server_uid: return api return None @@ -105,7 +170,7 @@ def get_by_recent_node_uid(cls, node_uid: UID) -> SyftAPI | None: @serializable() class APIEndpoint(SyftObject): __canonical_name__ = "APIEndpoint" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID service_path: str @@ -117,12 +182,13 @@ class APIEndpoint(SyftObject): has_self: bool = False pre_kwargs: dict[str, Any] | None = None warning: APIEndpointWarning | None = None + unwrap_on_success: bool = True @serializable() class LibEndpoint(SyftBaseObject): __canonical_name__ = "LibEndpoint" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 # TODO: bad name, change service_path: str @@ -138,7 +204,7 @@ class LibEndpoint(SyftBaseObject): @serializable(attrs=["signature", "credentials", "serialized_message"]) class SignedSyftAPICall(SyftObject): __canonical_name__ = "SignedSyftAPICall" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 credentials: SyftVerifyKey signature: bytes @@ -159,26 +225,24 @@ def message(self) -> SyftAPICall: return self.cached_deseralized_message @property - def is_valid(self) -> Result[SyftSuccess, SyftError]: + def is_valid(self) -> bool: try: _ = self.credentials.verify_key.verify( self.serialized_message, self.signature ) except BadSignatureError: - return SyftError(message="BadSignatureError") - - return SyftSuccess(message="Credentials are valid") + return False + return True -@instrument @serializable() class SyftAPICall(SyftObject): # version __canonical_name__ = "SyftAPICall" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 # fields - node_uid: UID + server_uid: UID path: str args: list kwargs: dict[str, Any] @@ -193,13 +257,15 @@ def sign(self, credentials: SyftSigningKey) -> SignedSyftAPICall: signature=signed_message.signature, ) + def __repr__(self) -> str: + return f"SyftAPICall(path={self.path}, args={self.args}, kwargs={self.kwargs}, blocking={self.blocking})" + -@instrument @serializable() class SyftAPIData(SyftBaseObject): # version __canonical_name__ = "SyftAPIData" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 # fields data: Any = None @@ -216,21 +282,24 @@ def sign(self, credentials: SyftSigningKey) -> SignedSyftAPICall: class RemoteFunction(SyftObject): __canonical_name__ = "RemoteFunction" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = [ "id", - "node_uid", + "server_uid", "signature", "path", ] - node_uid: UID + server_uid: UID signature: Signature + refresh_api_callback: Callable | None = None path: str make_call: Callable pre_kwargs: dict[str, Any] | None = None communication_protocol: PROTOCOL_TYPE warning: APIEndpointWarning | None = None + custom_function: bool = False + unwrap_on_success: bool = True @property def __ipython_inspector_signature_override__(self) -> Signature | None: @@ -238,20 +307,20 @@ def __ipython_inspector_signature_override__(self) -> Signature | None: def prepare_args_and_kwargs( self, args: list | tuple, kwargs: dict[str, Any] - ) -> SyftError | tuple[tuple, dict[str, Any]]: + ) -> tuple[tuple, dict[str, Any]]: # Validate and migrate args and kwargs - res = validate_callable_args_and_kwargs(args, kwargs, self.signature) - if isinstance(res, SyftError): - return res + res = validate_callable_args_and_kwargs(args, kwargs, self.signature).unwrap() args, kwargs = res args, kwargs = migrate_args_and_kwargs( to_protocol=self.communication_protocol, args=args, kwargs=kwargs ) - return args, kwargs + return tuple(args), kwargs - def __call__(self, *args: Any, **kwargs: Any) -> Any: + def function_call( + self, path: str, *args: Any, cache_result: bool = True, **kwargs: Any + ) -> Any: if "blocking" in self.signature.parameters: raise Exception( f"Signature {self.signature} can't have 'blocking' kwarg because it's reserved" @@ -259,22 +328,23 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: blocking = True if "blocking" in kwargs: + if path == "api.call_public_in_jobs": + raise SyftException( + public_message="The 'blocking' parameter is not allowed for this function" + ) + blocking = bool(kwargs["blocking"]) del kwargs["blocking"] - res = self.prepare_args_and_kwargs(args, kwargs) - if isinstance(res, SyftError): - return res - - _valid_args, _valid_kwargs = res + _valid_args, _valid_kwargs = self.prepare_args_and_kwargs(args, kwargs) if self.pre_kwargs: _valid_kwargs.update(self.pre_kwargs) _valid_kwargs["communication_protocol"] = self.communication_protocol api_call = SyftAPICall( - node_uid=self.node_uid, - path=self.path, + server_uid=self.server_uid, + path=path, args=list(_valid_args), kwargs=_valid_kwargs, blocking=blocking, @@ -283,37 +353,154 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: allowed = self.warning.show() if self.warning else True if not allowed: return - result = self.make_call(api_call=api_call) + result = self.make_call(api_call=api_call, cache_result=cache_result) + + # TODO: annotate this on the service method decorator + API_CALLS_THAT_REQUIRE_REFRESH = ["settings.enable_eager_execution"] + + if path in API_CALLS_THAT_REQUIRE_REFRESH: + if self.refresh_api_callback is not None: + self.refresh_api_callback() result, _ = migrate_args_and_kwargs( [result], kwargs={}, to_latest_protocol=True ) result = result[0] - return result + + return post_process_result(result, self.unwrap_on_success) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return self.function_call(self.path, *args, **kwargs) + + @property + def mock(self) -> Any: + if self.custom_function: + remote_func = self + + class PrivateCustomAPIReference: + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return remote_func.function_call( + "api.call_public_in_jobs", *args, **kwargs + ) + + @property + def context(self) -> Any: + return remote_func.function_call("api.get_public_context") + + return PrivateCustomAPIReference() + raise SyftException( + public_message="This function doesn't support mock/private calls as it's not custom." + ) + + @property + def private(self) -> Any: + if self.custom_function: + remote_func = self + + class PrivateCustomAPIReference: + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return remote_func.function_call( + "api.call_private_in_jobs", *args, **kwargs + ) + + @property + def context(self) -> Any: + return remote_func.function_call("api.get_private_context") + + return PrivateCustomAPIReference() + raise SyftException( + public_message="This function doesn't support mock/private calls as it's not custom." + ) + + @as_result(SyftException) + def custom_function_actionobject_id(self) -> UID: + if self.custom_function and self.pre_kwargs is not None: + custom_path = self.pre_kwargs.get("path", "") + api_call = SyftAPICall( + server_uid=self.server_uid, + path="api.view", + args=[custom_path], + kwargs={}, + ) + endpoint = self.make_call(api_call=api_call) + if isinstance(endpoint, SyftSuccess): + endpoint = endpoint.value + return endpoint.action_object_id + raise SyftException(public_message="This function is not a custom function") + + def _repr_markdown_(self, wrap_as_python: bool = False, indent: int = 0) -> str: + if self.custom_function and self.pre_kwargs is not None: + custom_path = self.pre_kwargs.get("path", "") + api_call = SyftAPICall( + server_uid=self.server_uid, + path="api.view", + args=[custom_path], + kwargs={}, + ) + + endpoint = self.make_call(api_call=api_call) + if isinstance(endpoint, SyftSuccess): + endpoint = endpoint.value + + str_repr = "## API: " + custom_path + "\n" + if endpoint.description is not None: + text = endpoint.description.text + else: + text = "" + str_repr += ( + "### Description: " + + f'{text}
    ' + + "\n" + ) + str_repr += "#### Private Code:\n" + not_accessible_code = "N / A" + private_code_repr = endpoint.private_function or not_accessible_code + public_code_repr = endpoint.mock_function or not_accessible_code + str_repr += as_markdown_python_code(private_code_repr) + "\n" + if endpoint.private_helper_functions: + str_repr += "##### Helper Functions:\n" + for helper_function in endpoint.private_helper_functions: + str_repr += as_markdown_python_code(helper_function) + "\n" + str_repr += "#### Public Code:\n" + str_repr += as_markdown_python_code(public_code_repr) + "\n" + if endpoint.mock_helper_functions: + str_repr += "##### Helper Functions:\n" + for helper_function in endpoint.mock_helper_functions: + str_repr += as_markdown_python_code(helper_function) + "\n" + return str_repr + return super()._repr_markdown_() class RemoteUserCodeFunction(RemoteFunction): __canonical_name__ = "RemoteUserFunction" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = RemoteFunction.__repr_attrs__ + ["user_code_id"] api: SyftAPI def prepare_args_and_kwargs( self, args: list | tuple, kwargs: dict[str, Any] - ) -> SyftError | tuple[tuple, dict[str, Any]]: + ) -> tuple[tuple, dict[str, Any]]: # relative from ..service.action.action_object import convert_to_pointers # Validate and migrate args and kwargs - res = validate_callable_args_and_kwargs(args, kwargs, self.signature) - if isinstance(res, SyftError): - return res + res = validate_callable_args_and_kwargs(args, kwargs, self.signature).unwrap() args, kwargs = res + # Check remote function type to avoid function/method serialization + # We can recover the function/method pointer by its UID in server side. + for i in range(len(args)): + if isinstance(args[i], RemoteFunction) and args[i].custom_function: + args[i] = args[i].custom_function_id() # type: ignore + + for k, v in kwargs.items(): + if isinstance(v, RemoteFunction) and v.custom_function: + kwargs[k] = v.custom_function_actionobject_id().unwrap() + args, kwargs = convert_to_pointers( api=self.api, - node_uid=self.node_uid, + server_uid=self.server_uid, args=args, kwargs=kwargs, ) @@ -322,7 +509,7 @@ def prepare_args_and_kwargs( to_protocol=self.communication_protocol, args=args, kwargs=kwargs ) - return args, kwargs + return tuple(args), kwargs @property def user_code_id(self) -> UID | None: @@ -332,28 +519,30 @@ def user_code_id(self) -> UID | None: return None @property - def jobs(self) -> list[Job] | SyftError: + def jobs(self) -> list[Job]: if self.user_code_id is None: - return SyftError(message="Could not find user_code_id") + raise SyftException(public_message="Could not find user_code_id") api_call = SyftAPICall( - node_uid=self.node_uid, + server_uid=self.server_uid, path="job.get_by_user_code_id", args=[self.user_code_id], kwargs={}, blocking=True, ) - return self.make_call(api_call=api_call) + result = self.make_call(api_call=api_call) + return post_process_result(result, self.unwrap_on_success) def generate_remote_function( api: SyftAPI, - node_uid: UID, + server_uid: UID, signature: Signature, path: str, make_call: Callable, pre_kwargs: dict[str, Any] | None, communication_protocol: PROTOCOL_TYPE, warning: APIEndpointWarning | None, + unwrap_on_success: bool = True, ) -> RemoteFunction: if "blocking" in signature.parameters: raise Exception( @@ -364,7 +553,7 @@ def generate_remote_function( if path == "code.call" and pre_kwargs is not None and "uid" in pre_kwargs: remote_function = RemoteUserCodeFunction( api=api, - node_uid=node_uid, + server_uid=server_uid, signature=signature, path=path, make_call=make_call, @@ -372,16 +561,21 @@ def generate_remote_function( communication_protocol=communication_protocol, warning=warning, user_code_id=pre_kwargs["uid"], + unwrap_on_success=unwrap_on_success, ) else: + custom_function = bool(path == "api.call_in_jobs") remote_function = RemoteFunction( - node_uid=node_uid, + server_uid=server_uid, + refresh_api_callback=api.refresh_api_callback, signature=signature, path=path, make_call=make_call, pre_kwargs=pre_kwargs, communication_protocol=communication_protocol, warning=warning, + custom_function=custom_function, + unwrap_on_success=unwrap_on_success, ) return remote_function @@ -389,7 +583,7 @@ def generate_remote_function( def generate_remote_lib_function( api: SyftAPI, - node_uid: UID, + server_uid: UID, signature: Signature, path: str, module_path: str, @@ -402,7 +596,7 @@ def generate_remote_lib_function( f"Signature {signature} can't have 'blocking' kwarg because its reserved" ) - def wrapper(*args: Any, **kwargs: Any) -> SyftError | Any: + def wrapper(*args: Any, **kwargs: Any) -> Any: # relative from ..service.action.action_object import TraceResultRegistry @@ -410,20 +604,18 @@ def wrapper(*args: Any, **kwargs: Any) -> SyftError | Any: if trace_result is not None: wrapper_make_call = trace_result.client.api.make_call # type: ignore - wrapper_node_uid = trace_result.client.api.node_uid # type: ignore + wrapper_server_uid = trace_result.client.api.server_uid # type: ignore else: # somehow this is necessary to prevent shadowing problems wrapper_make_call = make_call - wrapper_node_uid = node_uid + wrapper_server_uid = server_uid blocking = True if "blocking" in kwargs: blocking = bool(kwargs["blocking"]) del kwargs["blocking"] - res = validate_callable_args_and_kwargs(args, kwargs, signature) + res = validate_callable_args_and_kwargs(args, kwargs, signature).unwrap() - if isinstance(res, SyftError): - return res _valid_args, _valid_kwargs = res if pre_kwargs: @@ -435,7 +627,7 @@ def wrapper(*args: Any, **kwargs: Any) -> SyftError | Any: from ..service.action.action_object import convert_to_pointers action_args, action_kwargs = convert_to_pointers( - api, wrapper_node_uid, _valid_args, _valid_kwargs + api, wrapper_server_uid, _valid_args, _valid_kwargs ) # e.g. numpy.array -> numpy, array @@ -456,7 +648,7 @@ def wrapper(*args: Any, **kwargs: Any) -> SyftError | Any: trace_result.result += [action] api_call = SyftAPICall( - node_uid=wrapper_node_uid, + server_uid=wrapper_server_uid, path=path, args=service_args, kwargs={}, @@ -464,20 +656,48 @@ def wrapper(*args: Any, **kwargs: Any) -> SyftError | Any: ) result = wrapper_make_call(api_call=api_call) + result = post_process_result(result, unwrap_on_success=True) + return result wrapper.__ipython_inspector_signature_override__ = signature return wrapper -@serializable() +class APISubModulesView(SyftObject): + __canonical_name__ = "APISubModulesView" + __version__ = SYFT_OBJECT_VERSION_1 + + submodule: str = "" + endpoints: list[str] = [] + + __syft_include_id_coll_repr__ = False + + def _coll_repr_(self) -> dict[str, Any]: + return {"submodule": self.submodule, "endpoints": "\n".join(self.endpoints)} + + +@serializable(canonical_name="APIModule", version=1) class APIModule: _modules: list[str] path: str + refresh_callback: Callable | None - def __init__(self, path: str) -> None: + def __init__(self, path: str, refresh_callback: Callable | None) -> None: self._modules = [] self.path = path + self.refresh_callback = refresh_callback + + def __dir__(self) -> list[str]: + return self._modules + ["path"] + + def has_submodule(self, name: str) -> bool: + """We use this as hasattr() triggers __getattribute__ which triggers recursion""" + try: + _ = object.__getattribute__(self, name) + return True + except AttributeError: + return False def _add_submodule( self, attr_name: str, module_or_func: Callable | APIModule @@ -485,23 +705,79 @@ def _add_submodule( setattr(self, attr_name, module_or_func) self._modules.append(attr_name) - def __getattribute__(self, name: str) -> Any: + def __getattr__(self, name: str) -> Any: try: return object.__getattribute__(self, name) except AttributeError: - raise SyftAttributeError( + # if we fail, we refresh the api and try again + # however, we dont want this to happen all the time because of ipy magic happening + # in the background + if ( + self.refresh_callback is not None + and name not in IPYNB_BACKGROUND_METHODS + and not any( + name.startswith(prefix) for prefix in IPYNB_BACKGROUND_PREFIXES + ) + ): + api = self.refresh_callback() + try: + # get current path in the module tree + new_current_module = api.services + for submodule in self.path.split("."): + if submodule != "": + new_current_module = getattr(new_current_module, submodule) + # retry getting the attribute, if this fails, we throw an error + return object.__getattribute__(new_current_module, name) + except AttributeError: + pass + raise AttributeError( f"'APIModule' api{self.path} object has no submodule or method '{name}', " - "you may not have permission to access the module you are trying to access" + "you may not have permission to access the module you are trying to access." + "If you think this is an error, try calling `client.refresh()` to update the API." ) def __getitem__(self, key: str | int) -> Any: + if hasattr(self, "get_index"): + return self.get_index(key) if hasattr(self, "get_all"): return self.get_all()[key] raise NotImplementedError + def __iter__(self) -> Any: + if hasattr(self, "get_all"): + return iter(self.get_all()) + raise NotImplementedError + def _repr_html_(self) -> Any: + if self.path == "settings": + return self.get()._repr_html_() + if not hasattr(self, "get_all"): - return NotImplementedError + + def recursively_get_submodules( + module: APIModule | Callable, + ) -> list[APIModule | Callable]: + children = [module] + if isinstance(module, APIModule): + for submodule_name in module._modules: + submodule = getattr(module, submodule_name) + children += recursively_get_submodules(submodule) + return children + + views = [] + for submodule_name in self._modules: + submodule = getattr(self, submodule_name) + children = recursively_get_submodules(submodule) + child_paths = [ + x.path for x in children if isinstance(x, RemoteFunction) + ] + views.append( + APISubModulesView(submodule=submodule_name, endpoints=child_paths) + ) + + return build_tabulator_table(views) + + # should never happen? results = self.get_all() return results._repr_html_() @@ -509,20 +785,23 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: return NotImplementedError +# TODO ERROR: what is this function return type??? +@as_result(SyftException) def debox_signed_syftapicall_response( signed_result: SignedSyftAPICall | Any, -) -> Any | SyftError: +) -> Any: if not isinstance(signed_result, SignedSyftAPICall): - return SyftError(message="The result is not signed") + raise SyftException(public_message="The result is not signed") if not signed_result.is_valid: - return SyftError(message="The result signature is invalid") + raise SyftException(public_message="The result signature is invalid") + return signed_result.message.data def downgrade_signature(signature: Signature, object_versions: dict) -> Signature: migrated_parameters = [] - for _, parameter in signature.parameters.items(): + for parameter in signature.parameters.values(): annotation = unwrap_and_migrate_annotation( parameter.annotation, object_versions ) @@ -598,12 +877,11 @@ def result_needs_api_update(api_call_result: Any) -> bool: return False -@instrument @serializable( attrs=[ "endpoints", - "node_uid", - "node_name", + "server_uid", + "server_name", "lib_endpoints", "communication_protocol", ] @@ -611,12 +889,12 @@ def result_needs_api_update(api_call_result: Any) -> bool: class SyftAPI(SyftObject): # version __canonical_name__ = "SyftAPI" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 # fields - connection: NodeConnection | None = None - node_uid: UID | None = None - node_name: str | None = None + connection: ServerConnection | None = None + server_uid: UID | None = None + server_name: str | None = None endpoints: dict[str, APIEndpoint] lib_endpoints: dict[str, LibEndpoint] | None = None api_module: APIModule | None = None @@ -626,39 +904,60 @@ class SyftAPI(SyftObject): refresh_api_callback: Callable | None = None __user_role: ServiceRole = ServiceRole.NONE communication_protocol: PROTOCOL_TYPE + metadata: ServerMetadataJSON | None = None + + # informs getattr does not have nasty side effects + __syft_allow_autocomplete__ = ["services"] - # def __post_init__(self) -> None: - # pass + def __dir__(self) -> list[str]: + modules = getattr(self.api_module, "_modules", []) + return ["services"] + modules + + def __syft_dir__(self) -> list[str]: + modules = getattr(self.api_module, "_modules", []) + return ["services"] + modules + + def __getattr__(self, name: str) -> Any: + try: + return getattr(self.api_module, name) + except Exception: + raise AttributeError( + f"'SyftAPI' object has no submodule or method '{name}', " + "you may not have permission to access the module you are trying to access." + "If you think this is an error, try calling `client.refresh()` to update the API." + ) @staticmethod def for_user( - node: AbstractNode, + server: AbstractServer, communication_protocol: PROTOCOL_TYPE, user_verify_key: SyftVerifyKey | None = None, ) -> SyftAPI: # relative + from ..service.api.api_service import APIService + # TODO: Maybe there is a possibility of merging ServiceConfig and APIEndpoint from ..service.code.user_code_service import UserCodeService # find user role by verify_key # TODO: we should probably not allow empty verify keys but instead make user always register - role = node.get_role_for_credentials(user_verify_key) + role = server.get_role_for_credentials(user_verify_key) _user_service_config_registry = UserServiceConfigRegistry.from_role(role) _user_lib_config_registry = UserLibConfigRegistry.from_user(user_verify_key) endpoints: dict[str, APIEndpoint] = {} lib_endpoints: dict[str, LibEndpoint] = {} warning_context = WarningContext( - node=node, role=role, credentials=user_verify_key + server=server, role=role, credentials=user_verify_key ) # If server uses a higher protocol version than client, then # signatures needs to be downgraded. - if node.current_protocol == "dev" and communication_protocol != "dev": + if server.current_protocol == "dev" and communication_protocol != "dev": # We assume dev is the highest staged protocol signature_needs_downgrade = True else: - signature_needs_downgrade = node.current_protocol != "dev" and int( - node.current_protocol + signature_needs_downgrade = server.current_protocol != "dev" and int( + server.current_protocol ) > int(communication_protocol) data_protocol = get_data_protocol() @@ -675,7 +974,7 @@ def for_user( service_warning = service_config.warning if service_warning: service_warning = service_warning.message_from(warning_context) - service_warning.enabled = node.enable_warnings + service_warning.enabled = server.enable_warnings signature = ( downgrade_signature( @@ -695,6 +994,7 @@ def for_user( signature=signature, # TODO: Migrate signature based on communication protocol has_self=False, warning=service_warning, + unwrap_on_success=service_config.unwrap_on_success, ) endpoints[path] = endpoint @@ -714,8 +1014,10 @@ def for_user( lib_endpoints[path] = endpoint # 🟡 TODO 35: fix root context - context = AuthedServiceContext(node=node, credentials=user_verify_key) - method = node.get_method_with_context(UserCodeService.get_all_for_user, context) + context = AuthedServiceContext(server=server, credentials=user_verify_key) + method = server.get_method_with_context( + UserCodeService.get_all_for_user, context + ) code_items = method() for code_item in code_items: @@ -733,9 +1035,29 @@ def for_user( ) endpoints[unique_path] = endpoint + # get admin defined custom api endpoints + method = server.get_method_with_context(APIService.get_endpoints, context) + custom_endpoints = method().unwrap() + for custom_endpoint in custom_endpoints: + pre_kwargs = {"path": custom_endpoint.path} + service_path = "api.call_in_jobs" + path = custom_endpoint.path + api_end = custom_endpoint.path.split(".")[-1] + endpoint = APIEndpoint( + service_path=service_path, + module_path=path, + name=api_end, + description="", + doc_string="", + signature=custom_endpoint.signature, + has_self=False, + pre_kwargs=pre_kwargs, + ) + endpoints[path] = endpoint + return SyftAPI( - node_name=node.name, - node_uid=node.id, + server_name=server.name, + server_uid=server.id, endpoints=endpoints, lib_endpoints=lib_endpoints, __user_role=role, @@ -746,27 +1068,19 @@ def for_user( def user_role(self) -> ServiceRole: return self.__user_role - def make_call(self, api_call: SyftAPICall) -> Result: + def make_call(self, api_call: SyftAPICall, cache_result: bool = True) -> Any: signed_call = api_call.sign(credentials=self.signing_key) if self.connection is not None: signed_result = self.connection.make_call(signed_call) else: - return SyftError(message="API connection is None") + raise SyftException(public_message="API connection is None") - result = debox_signed_syftapicall_response(signed_result=signed_result) - - if isinstance(result, CachedSyftObject): - if result.error_msg is not None: + result = debox_signed_syftapicall_response(signed_result=signed_result).unwrap() + if isinstance(result, SyftResponseMessage): + for warning in result.client_warnings: prompt_warning_message( - message=f"{result.error_msg}. Loading results from cache." + message=warning, ) - result = result.result - - if isinstance(result, OkErr): - if result.is_ok(): - result = result.ok() - else: - result = result.err() # we update the api when we create objects that change it self.update_api(result) return result @@ -777,9 +1091,8 @@ def update_api(self, api_call_result: Any) -> None: if self.refresh_api_callback is not None: self.refresh_api_callback() - @staticmethod def _add_route( - api_module: APIModule, endpoint: APIEndpoint, endpoint_method: Callable + self, api_module: APIModule, endpoint: APIEndpoint, endpoint_method: Callable ) -> None: """Recursively create a module path to the route endpoint.""" @@ -789,37 +1102,47 @@ def _add_route( _last_module = _modules.pop() while _modules: module = _modules.pop(0) - if not hasattr(_self, module): - submodule_path = f"{_self.path}.{module}" - _self._add_submodule(module, APIModule(path=submodule_path)) + if not _self.has_submodule(module): + submodule_path = ( + f"{_self.path}.{module}" if _self.path != "" else module + ) + _self._add_submodule( + module, + APIModule( + path=submodule_path, refresh_callback=self.refresh_api_callback + ), + ) _self = getattr(_self, module) _self._add_submodule(_last_module, endpoint_method) def generate_endpoints(self) -> None: def build_endpoint_tree( - endpoints: dict[str, LibEndpoint], communication_protocol: PROTOCOL_TYPE + endpoints: dict[str, LibEndpoint | APIEndpoint], + communication_protocol: PROTOCOL_TYPE, ) -> APIModule: - api_module = APIModule(path="") - for _, v in endpoints.items(): + api_module = APIModule(path="", refresh_callback=self.refresh_api_callback) + for v in endpoints.values(): signature = v.signature + args_to_remove = ["context"] if not v.has_self: - signature = signature_remove_self(signature) - signature = signature_remove_context(signature) + args_to_remove.append("self") + signature = signature_remove(signature, args_to_remove) if isinstance(v, APIEndpoint): endpoint_function = generate_remote_function( self, - self.node_uid, + self.server_uid, signature, v.service_path, self.make_call, pre_kwargs=v.pre_kwargs, warning=v.warning, communication_protocol=communication_protocol, + unwrap_on_success=v.unwrap_on_success, ) elif isinstance(v, LibEndpoint): endpoint_function = generate_remote_lib_function( self, - self.node_uid, + self.server_uid, signature, v.service_path, v.module_path, @@ -869,7 +1192,9 @@ def __repr__(self) -> str: if hasattr(module_or_func, "_modules"): for func_name in module_or_func._modules: func = getattr(module_or_func, func_name) - sig = func.__ipython_inspector_signature_override__ + sig = getattr( + func, "__ipython_inspector_signature_override__", "" + ) _repr_str += f"{module_path_str}.{func_name}{sig}\n\n" return _repr_str @@ -947,71 +1272,84 @@ def monkey_patch_getdef(self: Any, obj: Any, oname: str = "") -> str | None: Inspector._getdef_bak = Inspector._getdef Inspector._getdef = types.MethodType(monkey_patch_getdef, Inspector) except Exception: - # print("Failed to monkeypatch IPython Signature Override") pass # nosec -@serializable() -class NodeIdentity(Identity): - node_name: str +@serializable(canonical_name="ServerIdentity", version=1) +class ServerIdentity(Identity): + server_name: str @staticmethod - def from_api(api: SyftAPI) -> NodeIdentity: - # stores the name root verify key of the domain node + def from_api(api: SyftAPI) -> ServerIdentity: + # stores the name root verify key of the datasite server if api.connection is None: - raise ValueError("{api}'s connection is None. Can't get the node identity") - node_metadata = api.connection.get_node_metadata(api.signing_key) - return NodeIdentity( - node_name=node_metadata.name, - node_id=api.node_uid, - verify_key=SyftVerifyKey.from_string(node_metadata.verify_key), + raise ValueError( + "{api}'s connection is None. Can't get the server identity" + ) + server_metadata = api.connection.get_server_metadata(api.signing_key) + return ServerIdentity( + server_name=server_metadata.name, + server_id=api.server_uid, + verify_key=SyftVerifyKey.from_string(server_metadata.verify_key), ) @classmethod - def from_change_context(cls, context: ChangeContext) -> NodeIdentity: - if context.node is None: - raise ValueError(f"{context}'s node is None") + def from_change_context(cls, context: ChangeContext) -> ServerIdentity: + if context.server is None: + raise ValueError(f"{context}'s server is None") return cls( - node_name=context.node.name, - node_id=context.node.id, - verify_key=context.node.signing_key.verify_key, + server_name=context.server.name, + server_id=context.server.id, + verify_key=context.server.signing_key.verify_key, ) @classmethod - def from_node(cls, node: Node) -> NodeIdentity: + def from_server(cls, server: Server) -> ServerIdentity: return cls( - node_name=node.name, - node_id=node.id, - verify_key=node.signing_key.verify_key, + server_name=server.name, + server_id=server.id, + verify_key=server.signing_key.verify_key, ) def __eq__(self, other: Any) -> bool: - if not isinstance(other, NodeIdentity): + if not isinstance(other, ServerIdentity): return False return ( - self.node_name == other.node_name + self.server_name == other.server_name and self.verify_key == other.verify_key - and self.node_id == other.node_id + and self.server_id == other.server_id ) def __hash__(self) -> int: - return hash((self.node_name, self.verify_key)) + return hash((self.server_name, self.verify_key)) def __repr__(self) -> str: - return f"NodeIdentity " + return f"ServerIdentity " +@as_result(SyftException) def validate_callable_args_and_kwargs( args: list, kwargs: dict, signature: Signature -) -> tuple[list, dict] | SyftError: +) -> tuple[list, dict]: _valid_kwargs = {} if "kwargs" in signature.parameters: _valid_kwargs = kwargs else: for key, value in kwargs.items(): if key not in signature.parameters: - return SyftError( - message=f"""Invalid parameter: `{key}`. Valid Parameters: {list(signature.parameters)}""" + valid_parameters = list(signature.parameters) + valid_parameters_msg = ( + f"Valid parameter: {valid_parameters}" + if len(valid_parameters) == 1 + else f"Valid parameters: {valid_parameters}" + ) + + raise SyftException( + public_message=( + f"Invalid parameter: `{key}`\n" + f"{valid_parameters_msg}\n" + f"{_signature_error_message(_format_signature(signature))}" + ) ) param = signature.parameters[key] if isinstance(param.annotation, str): @@ -1020,30 +1358,18 @@ def validate_callable_args_and_kwargs( t = index_syft_by_module_name(param.annotation) else: t = param.annotation - msg = None - try: - if t is not inspect.Parameter.empty: - if isinstance(t, _GenericAlias) and type(None) in t.__args__: - success = False - for v in t.__args__: - if issubclass(v, EmailStr): - v = str - try: - check_type(value, v) # raises Exception - success = True - break # only need one to match - except Exception: # nosec - pass - if not success: - raise TypeError() - else: - check_type(value, t) # raises Exception - except TypeError: - _type_str = getattr(t, "__name__", str(t)) - msg = f"`{key}` must be of type `{_type_str}` not `{type(value).__name__}`" - if msg: - return SyftError(message=msg) + if t is not inspect.Parameter.empty: + try: + _check_type(value, t) + except ValueError: + # TODO: fix this properly + if not (t == type(Any)): + _type_str = getattr(t, "__name__", str(t)) + raise SyftException( + public_message=f"`{key}` must be of type `{_type_str}` not `{type(value).__name__}`" + f"{_signature_error_message(_format_signature(signature))}" + ) _valid_kwargs[key] = value @@ -1061,15 +1387,8 @@ def validate_callable_args_and_kwargs( msg = None try: if t is not inspect.Parameter.empty: - if isinstance(t, _GenericAlias) and type(None) in t.__args__: - for v in t.__args__: - if issubclass(v, EmailStr): - v = str - check_type(arg, v) # raises Exception - break # only need one to match - else: - check_type(arg, t) # raises Exception - except TypeError: + _check_type(arg, t) + except ValueError: t_arg = type(arg) if ( autoreload_enabled() @@ -1080,10 +1399,14 @@ def validate_callable_args_and_kwargs( pass else: _type_str = getattr(t, "__name__", str(t)) - msg = f"Arg: {arg} must be {_type_str} not {type(arg).__name__}" + + msg = ( + f"Arg is `{arg}`. \nIt must be of type `{_type_str}`, not `{type(arg).__name__}`\n" + f"{_signature_error_message(_format_signature(signature))}" + ) if msg: - return SyftError(message=msg) + raise SyftException(public_message=msg) _valid_args.append(arg) diff --git a/packages/syft/src/syft/client/client.py b/packages/syft/src/syft/client/client.py index d408dab3ee9..d6fd164f44f 100644 --- a/packages/syft/src/syft/client/client.py +++ b/packages/syft/src/syft/client/client.py @@ -4,18 +4,21 @@ # stdlib import base64 from collections.abc import Callable -from copy import deepcopy +from collections.abc import Generator +from collections.abc import Iterable from enum import Enum from getpass import getpass import json -import os +import logging +import traceback from typing import Any from typing import TYPE_CHECKING from typing import cast # third party from argon2 import PasswordHasher -from pydantic import Field +from cachetools import TTLCache +from cachetools import cached from pydantic import field_validator import requests from requests import Response @@ -26,37 +29,32 @@ # relative from .. import __version__ -from ..abstract_node import AbstractNode -from ..abstract_node import NodeSideType -from ..abstract_node import NodeType -from ..node.credentials import SyftSigningKey -from ..node.credentials import SyftVerifyKey -from ..node.credentials import UserLoginCredentials +from ..abstract_server import AbstractServer +from ..abstract_server import ServerSideType +from ..abstract_server import ServerType from ..protocol.data_protocol import DataProtocol from ..protocol.data_protocol import PROTOCOL_TYPE from ..protocol.data_protocol import get_data_protocol from ..serde.deserialize import _deserialize from ..serde.serializable import serializable from ..serde.serialize import _serialize -from ..service.context import NodeServiceContext -from ..service.metadata.node_metadata import NodeMetadataJSON -from ..service.metadata.node_metadata import NodeMetadataV3 -from ..service.response import SyftError +from ..server.credentials import SyftSigningKey +from ..server.credentials import SyftVerifyKey +from ..server.credentials import UserLoginCredentials +from ..service.context import ServerServiceContext +from ..service.metadata.server_metadata import ServerMetadata +from ..service.metadata.server_metadata import ServerMetadataJSON from ..service.response import SyftSuccess from ..service.user.user import UserCreate from ..service.user.user import UserPrivateKey from ..service.user.user import UserView from ..service.user.user_roles import ServiceRole from ..service.user.user_service import UserService -from ..service.veilid.veilid_endpoints import VEILID_PROXY_PATH -from ..service.veilid.veilid_endpoints import VEILID_SERVICE_URL -from ..service.veilid.veilid_endpoints import VEILID_SYFT_PROXY_URL -from ..types.grid_url import GridURL +from ..types.errors import SyftException +from ..types.result import as_result +from ..types.server_url import ServerURL from ..types.syft_object import SYFT_OBJECT_VERSION_1 -from ..types.syft_object import SYFT_OBJECT_VERSION_2 from ..types.uid import UID -from ..util.logger import debug -from ..util.telemetry import instrument from ..util.util import prompt_warning_message from ..util.util import thread_ident from ..util.util import verify_tls @@ -66,24 +64,23 @@ from .api import SyftAPI from .api import SyftAPICall from .api import debox_signed_syftapicall_response -from .connection import NodeConnection +from .api import post_process_result +from .connection import ServerConnection from .protocol import SyftProtocol +logger = logging.getLogger(__name__) + if TYPE_CHECKING: # relative - from ..service.network.node_peer import NodePeer - -# use to enable mitm proxy -# from syft.grid.connections.http_connection import HTTPConnection -# HTTPConnection.proxies = {"http": "http://127.0.0.1:8080"} + from ..service.network.server_peer import ServerPeer -def upgrade_tls(url: GridURL, response: Response) -> GridURL: +def upgrade_tls(url: ServerURL, response: Response) -> ServerURL: try: if response.url.startswith("https://") and url.protocol == "http": # we got redirected to https - https_url = GridURL.from_url(response.url).with_path("") - debug(f"GridURL Upgraded to HTTPS. {https_url}") + https_url = ServerURL.from_url(response.url).with_path("") + logger.debug(f"ServerURL Upgraded to HTTPS. {https_url}") return https_url except Exception as e: print(f"Failed to upgrade to HTTPS. {e}") @@ -97,11 +94,11 @@ def forward_message_to_proxy( credentials: SyftSigningKey | None = None, args: list | None = None, kwargs: dict | None = None, -) -> Any | SyftError: +) -> Any: kwargs = {} if kwargs is None else kwargs args = [] if args is None else args call = SyftAPICall( - node_uid=proxy_target_uid, + server_uid=proxy_target_uid, path=path, args=args, kwargs=kwargs, @@ -112,15 +109,18 @@ def forward_message_to_proxy( # generate a random signing key credentials = SyftSigningKey.generate() - signed_message = call.sign(credentials=credentials) + signed_message: SignedSyftAPICall = call.sign(credentials=credentials) signed_result = make_call(signed_message) - response = debox_signed_syftapicall_response(signed_result) - return response + response = debox_signed_syftapicall_response(signed_result).unwrap() + result = post_process_result(response, unwrap_on_success=True) + + return result API_PATH = "/api/v2" -DEFAULT_PYGRID_PORT = 80 -DEFAULT_PYGRID_ADDRESS = f"http://localhost:{DEFAULT_PYGRID_PORT}" +DEFAULT_SYFT_UI_PORT = 80 +DEFAULT_SYFT_UI_ADDRESS = f"http://localhost:{DEFAULT_SYFT_UI_PORT}" +INTERNAL_PROXY_TO_RATHOLE = "http://proxy:80/rtunnel/" class Routes(Enum): @@ -130,38 +130,59 @@ class Routes(Enum): ROUTE_REGISTER = f"{API_PATH}/register" ROUTE_API_CALL = f"{API_PATH}/api_call" ROUTE_BLOB_STORE = "/blob" + ROUTE_FORGOT_PASSWORD = f"{API_PATH}/forgot_password" + ROUTE_RESET_PASSWORD = f"{API_PATH}/reset_password" + STREAM = f"{API_PATH}/stream" -@serializable(attrs=["proxy_target_uid", "url"]) -class HTTPConnection(NodeConnection): +@serializable(attrs=["proxy_target_uid", "url", "rtunnel_token"]) +class HTTPConnection(ServerConnection): __canonical_name__ = "HTTPConnection" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - url: GridURL + url: ServerURL proxy_target_uid: UID | None = None routes: type[Routes] = Routes session_cache: Session | None = None + headers: dict[str, str] | None = None + rtunnel_token: str | None = None @field_validator("url", mode="before") @classmethod def make_url(cls, v: Any) -> Any: return ( - GridURL.from_url(v).as_container_host() - if isinstance(v, str | GridURL) + ServerURL.from_url(v).as_container_host() + if isinstance(v, str | ServerURL) else v ) + def set_headers(self, headers: dict[str, str]) -> None: + self.headers = headers + def with_proxy(self, proxy_target_uid: UID) -> Self: - return HTTPConnection(url=self.url, proxy_target_uid=proxy_target_uid) + return HTTPConnection( + url=self.url, + proxy_target_uid=proxy_target_uid, + rtunnel_token=self.rtunnel_token, + ) + + def stream_via(self, proxy_uid: UID, url_path: str) -> ServerURL: + # Update the presigned url path to + # // + # url_path_bytes = _serialize(url_path, to_bytes=True) + + url_path_str = base64.urlsafe_b64encode(url_path.encode()).decode() + stream_url_path = f"{self.routes.STREAM.value}/{proxy_uid}/{url_path_str}/" + return self.url.with_path(stream_url_path) def get_cache_key(self) -> str: return str(self.url) @property - def api_url(self) -> GridURL: + def api_url(self) -> ServerURL: return self.url.with_path(self.routes.ROUTE_API_CALL.value) - def to_blob_route(self, path: str, **kwargs: Any) -> GridURL: + def to_blob_route(self, path: str, **kwargs: Any) -> ServerURL: _path = self.routes.ROUTE_BLOB_STORE.value + path return self.url.with_path(_path) @@ -176,10 +197,28 @@ def session(self) -> Session: self.session_cache = session return self.session_cache - def _make_get(self, path: str, params: dict | None = None) -> bytes: - url = self.url.with_path(path) + def _make_get( + self, path: str, params: dict | None = None, stream: bool = False + ) -> bytes | Iterable: + if params is None: + return self._make_get_no_params(path, stream=stream) + + url = self.url + + if self.rtunnel_token: + self.headers = {} if self.headers is None else self.headers + url = ServerURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) + response = self.session.get( - str(url), verify=verify_tls(), proxies={}, params=params + str(url), + headers=self.headers, + verify=verify_tls(), + proxies={}, + params=params, + stream=stream, ) if response.status_code != 200: raise requests.ConnectionError( @@ -191,15 +230,87 @@ def _make_get(self, path: str, params: dict | None = None) -> bytes: return response.content + @cached(cache=TTLCache(maxsize=128, ttl=300)) + def _make_get_no_params(self, path: str, stream: bool = False) -> bytes | Iterable: + url = self.url + + if self.rtunnel_token: + self.headers = {} if self.headers is None else self.headers + url = ServerURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) + + response = self.session.get( + str(url), + headers=self.headers, + verify=verify_tls(), + proxies={}, + stream=stream, + ) + if response.status_code != 200: + raise requests.ConnectionError( + f"Failed to fetch {url}. Response returned with code {response.status_code}" + ) + + # upgrade to tls if available + self.url = upgrade_tls(self.url, response) + + if stream: + return response.iter_content(chunk_size=None) + + return response.content + + def _make_put( + self, path: str, data: bytes | Generator, stream: bool = False + ) -> Response: + url = self.url + + if self.rtunnel_token: + url = ServerURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers = {} if self.headers is None else self.headers + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) + response = self.session.put( + str(url), + verify=verify_tls(), + proxies={}, + data=data, + headers=self.headers, + stream=stream, + ) + if response.status_code != 200: + raise requests.ConnectionError( + f"Failed to fetch {url}. Response returned with code {response.status_code}" + ) + + # upgrade to tls if available + self.url = upgrade_tls(self.url, response) + + return response + def _make_post( self, path: str, json: dict[str, Any] | None = None, data: bytes | None = None, ) -> bytes: - url = self.url.with_path(path) + url = self.url + + if self.rtunnel_token: + url = ServerURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers = {} if self.headers is None else self.headers + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) response = self.session.post( - str(url), verify=verify_tls(), json=json, proxies={}, data=data + str(url), + headers=self.headers, + verify=verify_tls(), + json=json, + proxies={}, + data=data, ) if response.status_code != 200: raise requests.ConnectionError( @@ -211,7 +322,14 @@ def _make_post( return response.content - def get_node_metadata(self, credentials: SyftSigningKey) -> NodeMetadataJSON: + def stream_data(self, credentials: SyftSigningKey) -> Response: + url = self.url.with_path(self.routes.STREAM.value) + response = self.session.get( + str(url), verify=verify_tls(), proxies={}, stream=True, headers=self.headers + ) + return response + + def get_server_metadata(self, credentials: SyftSigningKey) -> ServerMetadataJSON: if self.proxy_target_uid: response = forward_message_to_proxy( make_call=self.make_call, @@ -223,10 +341,13 @@ def get_node_metadata(self, credentials: SyftSigningKey) -> NodeMetadataJSON: else: response = self._make_get(self.routes.ROUTE_METADATA.value) metadata_json = json.loads(response) - return NodeMetadataJSON(**metadata_json) + return ServerMetadataJSON(**metadata_json) - def get_api( - self, credentials: SyftSigningKey, communication_protocol: int + def get_api( # type: ignore [override] + self, + credentials: SyftSigningKey, + communication_protocol: int, + metadata: ServerMetadataJSON | None = None, ) -> SyftAPI: params = { "verify_key": str(credentials.verify_key), @@ -249,8 +370,9 @@ def get_api( obj.connection = self obj.signing_key = credentials obj.communication_protocol = communication_protocol + obj.metadata = metadata if self.proxy_target_uid: - obj.node_uid = self.proxy_target_uid + obj.server_uid = self.proxy_target_uid return cast(SyftAPI, obj) def login( @@ -260,7 +382,7 @@ def login( ) -> SyftSigningKey | None: credentials = {"email": email, "password": password} if self.proxy_target_uid: - obj = forward_message_to_proxy( + response = forward_message_to_proxy( self.make_call, proxy_target_uid=self.proxy_target_uid, path="login", @@ -268,6 +390,46 @@ def login( ) else: response = self._make_post(self.routes.ROUTE_LOGIN.value, credentials) + response = _deserialize(response, from_bytes=True) + response = post_process_result(response, unwrap_on_success=True) + + return response + + def forgot_password( + self, + email: str, + ) -> SyftSigningKey | None: + credentials = {"email": email} + if self.proxy_target_uid: + obj = forward_message_to_proxy( + self.make_call, + proxy_target_uid=self.proxy_target_uid, + path="forgot_password", + kwargs=credentials, + ) + else: + response = self._make_post( + self.routes.ROUTE_FORGOT_PASSWORD.value, credentials + ) + obj = _deserialize(response, from_bytes=True) + + return obj + + def reset_password( + self, + token: str, + new_password: str, + ) -> SyftSigningKey | None: + payload = {"token": token, "new_password": new_password} + if self.proxy_target_uid: + obj = forward_message_to_proxy( + self.make_call, + proxy_target_uid=self.proxy_target_uid, + path="reset_password", + kwargs=payload, + ) + else: + response = self._make_post(self.routes.ROUTE_RESET_PASSWORD.value, payload) obj = _deserialize(response, from_bytes=True) return obj @@ -284,13 +446,24 @@ def register(self, new_user: UserCreate) -> SyftSigningKey: else: response = self._make_post(self.routes.ROUTE_REGISTER.value, data=data) response = _deserialize(response, from_bytes=True) + response = post_process_result(response, unwrap_on_success=False) return response - def make_call(self, signed_call: SignedSyftAPICall) -> Any | SyftError: + def make_call(self, signed_call: SignedSyftAPICall) -> Any: msg_bytes: bytes = _serialize(obj=signed_call, to_bytes=True) + + if self.rtunnel_token: + api_url = ServerURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + api_url = api_url.with_path(self.routes.ROUTE_API_CALL.value) + self.headers = {} if self.headers is None else self.headers + self.headers["Host"] = self.url.host_or_ip + else: + api_url = self.api_url + response = requests.post( # nosec - url=str(self.api_url), + url=api_url, data=msg_bytes, + headers=self.headers, ) if response.status_code != 200: @@ -310,250 +483,41 @@ def __str__(self) -> str: def __hash__(self) -> int: return hash(self.proxy_target_uid) + hash(self.url) + @as_result(SyftException) def get_client_type(self) -> type[SyftClient]: # TODO: Rasswanth, should remove passing in credentials - # when metadata are proxy forwarded in the grid routes + # when metadata are proxy forwarded in the server routes # in the gateway fixes PR # relative - from .domain_client import DomainClient + from .datasite_client import DatasiteClient from .enclave_client import EnclaveClient from .gateway_client import GatewayClient - metadata = self.get_node_metadata(credentials=SyftSigningKey.generate()) - if metadata.node_type == NodeType.DOMAIN.value: - return DomainClient - elif metadata.node_type == NodeType.GATEWAY.value: + metadata = self.get_server_metadata(credentials=SyftSigningKey.generate()) + if metadata.server_type == ServerType.DATASITE.value: + return DatasiteClient + elif metadata.server_type == ServerType.GATEWAY.value: return GatewayClient - elif metadata.node_type == NodeType.ENCLAVE.value: + elif metadata.server_type == ServerType.ENCLAVE.value: return EnclaveClient else: - return SyftError(message=f"Unknown node type {metadata.node_type}") - - -@serializable( - attrs=["proxy_target_uid", "vld_key", "vld_forward_proxy", "vld_reverse_proxy"] -) -class VeilidConnection(NodeConnection): - __canonical_name__ = "VeilidConnection" - __version__ = SYFT_OBJECT_VERSION_1 - - vld_forward_proxy: GridURL = Field(default=GridURL.from_url(VEILID_SERVICE_URL)) - vld_reverse_proxy: GridURL = Field(default=GridURL.from_url(VEILID_SYFT_PROXY_URL)) - vld_key: str - proxy_target_uid: UID | None = None - routes: type[Routes] = Field(default=Routes) - session_cache: Session | None = None - - @field_validator("vld_forward_proxy", mode="before") - def make_forward_proxy_url(cls, v: GridURL | str) -> GridURL: - if isinstance(v, str): - return GridURL.from_url(v) - else: - return v - - # TODO: Remove this once when we remove reverse proxy in Veilid Connection - @field_validator("vld_reverse_proxy", mode="before") - def make_reverse_proxy_url(cls, v: GridURL | str) -> GridURL: - if isinstance(v, str): - return GridURL.from_url(v) - else: - return v - - def with_proxy(self, proxy_target_uid: UID) -> Self: - raise NotImplementedError("VeilidConnection does not support with_proxy") - - def get_cache_key(self) -> str: - return str(self.vld_key) - - # def to_blob_route(self, path: str, **kwargs) -> GridURL: - # _path = self.routes.ROUTE_BLOB_STORE.value + path - # return self.url.with_path(_path) - - @property - def session(self) -> Session: - if self.session_cache is None: - session = requests.Session() - retry = Retry(total=3, backoff_factor=0.5) - adapter = HTTPAdapter(max_retries=retry) - session.mount("http://", adapter) - session.mount("https://", adapter) - self.session_cache = session - return self.session_cache - - def _make_get(self, path: str, params: dict | None = None) -> bytes: - rev_proxy_url = self.vld_reverse_proxy.with_path(path) - forward_proxy_url = self.vld_forward_proxy.with_path(VEILID_PROXY_PATH) - - json_data = { - "url": str(rev_proxy_url), - "method": "GET", - "vld_key": self.vld_key, - "params": params, - } - response = self.session.get(str(forward_proxy_url), json=json_data) - if response.status_code != 200: - raise requests.ConnectionError( - f"Failed to fetch {forward_proxy_url}. Response returned with code {response.status_code}" + raise SyftException( + public_message=f"Unknown server type {metadata.server_type}" ) - return response.content - - def _make_post( - self, - path: str, - json: dict[str, Any] | None = None, - data: bytes | None = None, - ) -> bytes: - rev_proxy_url = self.vld_reverse_proxy.with_path(path) - forward_proxy_url = self.vld_forward_proxy.with_path(VEILID_PROXY_PATH) - - # Since JSON expects strings, we need to encode the bytes to base64 - # as some bytes may not be valid utf-8 - # TODO: Can we optimize this? - data_base64 = base64.b64encode(data).decode() if data else None - - json_data = { - "url": str(rev_proxy_url), - "method": "POST", - "vld_key": self.vld_key, - "json": json, - "data": data_base64, - } - - response = self.session.post(str(forward_proxy_url), json=json_data) - if response.status_code != 200: - raise requests.ConnectionError( - f"Failed to fetch {forward_proxy_url}. Response returned with code {response.status_code}" - ) - - return response.content - - def get_node_metadata(self, credentials: SyftSigningKey) -> NodeMetadataJSON: - # TODO: Implement message proxy forwarding for gateway - - response = self._make_get(self.routes.ROUTE_METADATA.value) - metadata_json = json.loads(response) - return NodeMetadataJSON(**metadata_json) - - def get_api( - self, credentials: SyftSigningKey, communication_protocol: int - ) -> SyftAPI: - # TODO: Implement message proxy forwarding for gateway - - params = { - "verify_key": str(credentials.verify_key), - "communication_protocol": communication_protocol, - } - content = self._make_get(self.routes.ROUTE_API.value, params=params) - obj = _deserialize(content, from_bytes=True) - obj.connection = self - obj.signing_key = credentials - obj.communication_protocol = communication_protocol - if self.proxy_target_uid: - obj.node_uid = self.proxy_target_uid - return cast(SyftAPI, obj) - - def login( - self, - email: str, - password: str, - ) -> SyftSigningKey | None: - # TODO: Implement message proxy forwarding for gateway - - credentials = {"email": email, "password": password} - response = self._make_post(self.routes.ROUTE_LOGIN.value, credentials) - obj = _deserialize(response, from_bytes=True) - - return obj - - def register(self, new_user: UserCreate) -> Any: - # TODO: Implement message proxy forwarding for gateway - - data = _serialize(new_user, to_bytes=True) - response = self._make_post(self.routes.ROUTE_REGISTER.value, data=data) - response = _deserialize(response, from_bytes=True) - return response - - def make_call(self, signed_call: SignedSyftAPICall) -> Any: - msg_bytes: bytes = _serialize(obj=signed_call, to_bytes=True) - # Since JSON expects strings, we need to encode the bytes to base64 - # as some bytes may not be valid utf-8 - # TODO: Can we optimize this? - msg_base64 = base64.b64encode(msg_bytes).decode() - - rev_proxy_url = self.vld_reverse_proxy.with_path( - self.routes.ROUTE_API_CALL.value - ) - forward_proxy_url = self.vld_forward_proxy.with_path(VEILID_PROXY_PATH) - json_data = { - "url": str(rev_proxy_url), - "method": "POST", - "vld_key": self.vld_key, - "data": msg_base64, - } - response = requests.post( # nosec - url=str(forward_proxy_url), - json=json_data, - ) - - if response.status_code != 200: - raise requests.ConnectionError( - f"Failed to fetch metadata. Response returned with code {response.status_code}" - ) - - result = _deserialize(response.content, from_bytes=True) - return result - - def __repr__(self) -> str: - return self.__str__() - - def __str__(self) -> str: - res = f"{type(self).__name__}:" - res += f"\n DHT Key: {self.vld_key}" - res += f"\n Forward Proxy: {self.vld_forward_proxy}" - res += f"\n Reverse Proxy: {self.vld_reverse_proxy}" - return res - - def __hash__(self) -> int: - return ( - hash(self.proxy_target_uid) - + hash(self.vld_key) - + hash(self.vld_forward_proxy) - + hash(self.vld_reverse_proxy) - ) - - def get_client_type(self) -> type[SyftClient]: - # TODO: Rasswanth, should remove passing in credentials - # when metadata are proxy forwarded in the grid routes - # in the gateway fixes PR - # relative - from .domain_client import DomainClient - from .enclave_client import EnclaveClient - from .gateway_client import GatewayClient - - metadata = self.get_node_metadata(credentials=SyftSigningKey.generate()) - if metadata.node_type == NodeType.DOMAIN.value: - return DomainClient - elif metadata.node_type == NodeType.GATEWAY.value: - return GatewayClient - elif metadata.node_type == NodeType.ENCLAVE.value: - return EnclaveClient - else: - return SyftError(message=f"Unknown node type {metadata.node_type}") - @serializable() -class PythonConnection(NodeConnection): +class PythonConnection(ServerConnection): __canonical_name__ = "PythonConnection" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - node: AbstractNode + server: AbstractServer proxy_target_uid: UID | None = None def with_proxy(self, proxy_target_uid: UID) -> Self: - return PythonConnection(node=self.node, proxy_target_uid=proxy_target_uid) + return PythonConnection(server=self.server, proxy_target_uid=proxy_target_uid) - def get_node_metadata(self, credentials: SyftSigningKey) -> NodeMetadataJSON: + def get_server_metadata(self, credentials: SyftSigningKey) -> ServerMetadataJSON: if self.proxy_target_uid: response = forward_message_to_proxy( make_call=self.make_call, @@ -563,17 +527,20 @@ def get_node_metadata(self, credentials: SyftSigningKey) -> NodeMetadataJSON: ) return response else: - return self.node.metadata.to(NodeMetadataJSON) + return self.server.metadata.to(ServerMetadataJSON) - def to_blob_route(self, path: str, host: str | None = None) -> GridURL: + def to_blob_route(self, path: str, host: str | None = None) -> ServerURL: # TODO: FIX! if host is not None: - return GridURL(host_or_ip=host, port=8333).with_path(path) + return ServerURL(host_or_ip=host, port=8333).with_path(path) else: - return GridURL(port=8333).with_path(path) + return ServerURL(port=8333).with_path(path) - def get_api( - self, credentials: SyftSigningKey, communication_protocol: int + def get_api( # type: ignore [override] + self, + credentials: SyftSigningKey, + communication_protocol: int, + metadata: ServerMetadataJSON | None = None, ) -> SyftAPI: # todo: its a bit odd to identify a user by its verify key maybe? if self.proxy_target_uid: @@ -588,28 +555,36 @@ def get_api( credentials=credentials, ) else: - obj = self.node.get_api( + obj = self.server.get_api( for_user=credentials.verify_key, communication_protocol=communication_protocol, ) obj.connection = self obj.signing_key = credentials obj.communication_protocol = communication_protocol + obj.metadata = metadata if self.proxy_target_uid: - obj.node_uid = self.proxy_target_uid + obj.server_uid = self.proxy_target_uid return obj def get_cache_key(self) -> str: - return str(self.node.id) + return str(self.server.id) - def exchange_credentials(self, email: str, password: str) -> UserPrivateKey | None: - context = self.node.get_unauthed_context( + def exchange_credentials(self, email: str, password: str) -> SyftSuccess | None: + context = self.server.get_unauthed_context( login_credentials=UserLoginCredentials(email=email, password=password) ) - method = self.node.get_method_with_context( + method = self.server.get_method_with_context( UserService.exchange_credentials, context ) - result = method() + try: + result = method() + except SyftException: + raise + except Exception: + raise SyftException( + public_message=f"Exception calling exchange credentials. {traceback.format_exc()}" + ) return result def login( @@ -618,7 +593,7 @@ def login( password: str, ) -> SyftSigningKey | None: if self.proxy_target_uid: - obj = forward_message_to_proxy( + result = forward_message_to_proxy( self.make_call, proxy_target_uid=self.proxy_target_uid, path="login", @@ -626,7 +601,51 @@ def login( ) else: - obj = self.exchange_credentials(email=email, password=password) + result = self.exchange_credentials(email=email, password=password) + result = post_process_result(result, unwrap_on_success=True) + return result + + def forgot_password( + self, + email: str, + ) -> SyftSigningKey | None: + credentials = {"email": email} + if self.proxy_target_uid: + obj = forward_message_to_proxy( + self.make_call, + proxy_target_uid=self.proxy_target_uid, + path="forgot_password", + kwargs=credentials, + ) + else: + response = self.server.services.user.forgot_password( + context=ServerServiceContext(server=self.server), email=email + ) + obj = post_process_result(response, unwrap_on_success=True) + + return obj + + def reset_password( + self, + token: str, + new_password: str, + ) -> SyftSigningKey | None: + payload = {"token": token, "new_password": new_password} + if self.proxy_target_uid: + obj = forward_message_to_proxy( + self.make_call, + proxy_target_uid=self.proxy_target_uid, + path="reset_password", + kwargs=payload, + ) + else: + response = self.server.services.user.reset_password( + context=ServerServiceContext(server=self.server), + token=token, + new_password=new_password, + ) + obj = post_process_result(response, unwrap_on_success=True) + return obj def register(self, new_user: UserCreate) -> SyftSigningKey | None: @@ -638,13 +657,15 @@ def register(self, new_user: UserCreate) -> SyftSigningKey | None: kwargs={"new_user": new_user}, ) else: - service_context = NodeServiceContext(node=self.node) - method = self.node.get_service_method(UserService.register) - response = method(context=service_context, new_user=new_user) + service_context = ServerServiceContext(server=self.server) + response = self.server.services.user.register( + context=service_context, new_user=new_user + ) + response = post_process_result(response, unwrap_on_success=False) return response - def make_call(self, signed_call: SignedSyftAPICall) -> Any | SyftError: - return self.node.handle_api_call(signed_call) + def make_call(self, signed_call: SignedSyftAPICall) -> Any: + return self.server.handle_api_call(signed_call) def __repr__(self) -> str: return f"{type(self).__name__}" @@ -652,37 +673,48 @@ def __repr__(self) -> str: def __str__(self) -> str: return f"{type(self).__name__}" + @as_result(SyftException) def get_client_type(self) -> type[SyftClient]: # relative - from .domain_client import DomainClient + from .datasite_client import DatasiteClient from .enclave_client import EnclaveClient from .gateway_client import GatewayClient - metadata = self.get_node_metadata(credentials=SyftSigningKey.generate()) - if metadata.node_type == NodeType.DOMAIN.value: - return DomainClient - elif metadata.node_type == NodeType.GATEWAY.value: + metadata = self.get_server_metadata(credentials=SyftSigningKey.generate()) + if metadata.server_type == ServerType.DATASITE.value: + return DatasiteClient + elif metadata.server_type == ServerType.GATEWAY.value: return GatewayClient - elif metadata.node_type == NodeType.ENCLAVE.value: + elif metadata.server_type == ServerType.ENCLAVE.value: return EnclaveClient else: - return SyftError(message=f"Unknown node type {metadata.node_type}") + raise SyftException(message=f"Unknown server type {metadata.server_type}") -@instrument -@serializable() +@serializable(canonical_name="SyftClient", version=1) class SyftClient: - connection: NodeConnection - metadata: NodeMetadataJSON | None + connection: ServerConnection + metadata: ServerMetadataJSON | None credentials: SyftSigningKey | None __logged_in_user: str = "" __logged_in_username: str = "" __user_role: ServiceRole = ServiceRole.NONE + # informs getattr does not have nasty side effects + __syft_allow_autocomplete__ = [ + "api", + "code", + "jobs", + "users", + "settings", + "notifications", + "custom_api", + ] + def __init__( self, - connection: NodeConnection, - metadata: NodeMetadataJSON | None = None, + connection: ServerConnection, + metadata: ServerMetadataJSON | None = None, credentials: SyftSigningKey | None = None, api: SyftAPI | None = None, ) -> None: @@ -690,6 +722,7 @@ def __init__( self.metadata = metadata self.credentials: SyftSigningKey | None = credentials self._api = api + self.services: APIModule | None = None self.communication_protocol: int | str | None = None self.current_protocol: int | str | None = None @@ -700,12 +733,21 @@ def get_env(self) -> str: def post_init(self) -> None: if self.metadata is None: - self._fetch_node_metadata(self.credentials) - self.metadata = cast(NodeMetadataJSON, self.metadata) + self._fetch_server_metadata(self.credentials) + self.metadata = cast(ServerMetadataJSON, self.metadata) self.communication_protocol = self._get_communication_protocol( self.metadata.supported_protocols ) + def set_headers(self, headers: dict[str, str]) -> None: + if isinstance(self.connection, HTTPConnection): + self.connection.set_headers(headers) + return None + raise SyftException( # type: ignore + public_message="Incompatible connection type." + + f"Expected HTTPConnection, got {type(self.connection)}" + ) + def _get_communication_protocol( self, protocols_supported_by_server: list ) -> int | str: @@ -741,63 +783,9 @@ def create_project( user_email_address=user_email_address, members=[self], ) - project = project_create.start() + project = project_create.send() return project - # TODO: type of request should be REQUEST, but it will give circular import error - def sync_code_from_request(self, request: Any) -> SyftSuccess | SyftError: - # relative - from ..service.code.user_code import UserCode - from ..service.code.user_code import UserCodeStatusCollection - from ..store.linked_obj import LinkedObject - - code: UserCode | SyftError = request.code - if isinstance(code, SyftError): - return code - - code = deepcopy(code) - code.node_uid = self.id - code.user_verify_key = self.verify_key - - def get_nested_codes(code: UserCode) -> list[UserCode]: - result: list[UserCode] = [] - if code.nested_codes is None: - return result - - for _, (linked_code_obj, _) in code.nested_codes.items(): - nested_code = linked_code_obj.resolve - nested_code = deepcopy(nested_code) - nested_code.node_uid = code.node_uid - nested_code.user_verify_key = code.user_verify_key - result.append(nested_code) - result += get_nested_codes(nested_code) - - return result - - def get_code_statusses(codes: list[UserCode]) -> list[UserCodeStatusCollection]: - statusses = [] - for code in codes: - status = deepcopy(code.status) - statusses.append(status) - code.status_link = LinkedObject.from_obj(status, node_uid=code.node_uid) - return statusses - - nested_codes = get_nested_codes(code) - statusses = get_code_statusses(nested_codes + [code]) - - for c in nested_codes + [code]: - res = self.code.submit(c) - if isinstance(res, SyftError): - return res - - for status in statusses: - res = self.api.services.code_status.create(status) - if isinstance(res, SyftError): - return res - - self._fetch_api(self.credentials) - return SyftSuccess(message="User Code Submitted") - @property def authed(self) -> bool: return bool(self.credentials) @@ -821,12 +809,12 @@ def verify_key(self) -> SyftVerifyKey: return self.credentials.verify_key @classmethod - def from_url(cls, url: str | GridURL) -> Self: - return cls(connection=HTTPConnection(url=GridURL.from_url(url))) + def from_url(cls, url: str | ServerURL) -> Self: + return cls(connection=HTTPConnection(url=ServerURL.from_url(url))) @classmethod - def from_node(cls, node: AbstractNode) -> Self: - return cls(connection=PythonConnection(node=node)) + def from_server(cls, server: AbstractServer) -> Self: + return cls(connection=PythonConnection(server=server)) @property def name(self) -> str | None: @@ -843,9 +831,9 @@ def icon(self) -> str: @property def peer(self) -> Any: # relative - from ..service.network.network_service import NodePeer + from ..service.network.network_service import ServerPeer - return NodePeer.from_client(self) + return ServerPeer.from_client(self) @property def route(self) -> Any: @@ -866,36 +854,33 @@ def guest(self) -> Self: ) def exchange_route( - self, client: Self, protocol: SyftProtocol = SyftProtocol.HTTP - ) -> SyftSuccess | SyftError: + self, + client: Self, + protocol: SyftProtocol = SyftProtocol.HTTP, + reverse_tunnel: bool = False, + ) -> SyftSuccess: # relative from ..service.network.routes import connection_to_route if protocol == SyftProtocol.HTTP: - self_node_route = connection_to_route(self.connection) - remote_node_route = connection_to_route(client.connection) + self_server_route = connection_to_route(self.connection) + remote_server_route = connection_to_route(client.connection) if client.metadata is None: - return SyftError(f"client {client}'s metadata is None!") - - result = self.api.services.network.exchange_credentials_with( - self_node_route=self_node_route, - remote_node_route=remote_node_route, - remote_node_verify_key=client.metadata.to(NodeMetadataV3).verify_key, - ) - - elif protocol == SyftProtocol.VEILID: - remote_node_route = connection_to_route(client.connection) + raise SyftException( + public_message=f"client {client}'s metadata is None!" + ) - result = self.api.services.network.exchange_veilid_route( - remote_node_route=remote_node_route, + return self.api.services.network.exchange_credentials_with( + self_server_route=self_server_route, + remote_server_route=remote_server_route, + remote_server_verify_key=client.metadata.to(ServerMetadata).verify_key, + reverse_tunnel=reverse_tunnel, ) else: raise ValueError( f"Invalid Route Exchange SyftProtocol: {protocol}.Supported protocols are {SyftProtocol.all()}" ) - return result - @property def jobs(self) -> APIModule | None: if self.api.has_service("job"): @@ -908,6 +893,12 @@ def users(self) -> APIModule | None: return self.api.services.user return None + @property + def custom_api(self) -> APIModule | None: + if self.api.has_service("api"): + return self.api.services.api + return None + @property def numpy(self) -> APIModule | None: if self.api.has_lib("numpy"): @@ -916,7 +907,7 @@ def numpy(self) -> APIModule | None: @property def settings(self) -> APIModule | None: - if self.api.has_service("user"): + if self.api.has_service("settings"): return self.api.services.settings return None @@ -933,13 +924,13 @@ def notifier(self) -> APIModule | None: return None @property - def peers(self) -> list[NodePeer] | SyftError | None: + def peers(self) -> list[ServerPeer] | None: if self.api.has_service("network"): return self.api.services.network.get_all_peers() return None @property - def me(self) -> UserView | SyftError | None: + def account(self) -> UserView | None: if self.api.has_service("user"): return self.api.services.user.get_current_user() return None @@ -949,12 +940,29 @@ def login_as_guest(self) -> Self: if self.metadata is not None: print( - f"Logged into <{self.name}: {self.metadata.node_side_type.capitalize()}-side " - f"{self.metadata.node_type.capitalize()}> as GUEST" + f"Logged into <{self.name}: {self.metadata.server_side_type.capitalize()}-side " + f"{self.metadata.server_type.capitalize()}> as GUEST" ) return _guest_client + # is this used?? + def login_as(self, email: str) -> Self: + user_private_key = self.api.services.user.key_for_email(email=email) + if not isinstance(user_private_key, UserPrivateKey): + return user_private_key + if self.metadata is not None: + print( + f"Logged into <{self.name}: {self.metadata.server_side_type.capitalize()}-side " + f"{self.metadata.server_type.capitalize()}> as {email}" + ) + + return self.__class__( + connection=self.connection, + credentials=user_private_key.signing_key, + metadata=self.metadata, + ) + def login( self, email: str | None = None, @@ -963,15 +971,6 @@ def login( register: bool = False, **kwargs: Any, ) -> Self: - # TODO: Remove this Hack (Note to Rasswanth) - # If SYFT_LOGIN_{NODE_NAME}_PASSWORD is set, use that as the password - # for the login. This is useful for CI/CD environments to test password - # randomization that is implemented by helm charts - if self.name is not None and email == "info@openmined.org": - pass_env_var = f"SYFT_LOGIN_{self.name}_PASSWORD" - if pass_env_var in os.environ: - password = os.environ[pass_env_var] - if email is None: email = input("Email: ") if password is None: @@ -982,9 +981,10 @@ def login( email=email, password=password, password_verify=password, **kwargs ) - user_private_key = self.connection.login(email=email, password=password) - if isinstance(user_private_key, SyftError): - return user_private_key + try: + user_private_key = self.connection.login(email=email, password=password) + except Exception as e: + raise SyftException(public_message=e.public_message) signing_key = None if user_private_key is None else user_private_key.signing_key @@ -1002,16 +1002,16 @@ def login( if signing_key is not None and client.metadata is not None: print( - f"Logged into <{client.name}: {client.metadata.node_side_type.capitalize()} side " - f"{client.metadata.node_type.capitalize()}> as <{email}>" + f"Logged into <{client.name}: {client.metadata.server_side_type.capitalize()} side " + f"{client.metadata.server_type.capitalize()}> as <{email}>" ) # relative - from ..node.node import get_default_root_password + from ..server.server import get_default_root_password if password == get_default_root_password(): message = ( "You are using a default password. Please change the password " - "using `[your_client].me.set_password([new_password])`." + "using `[your_client].account.set_password([new_password])`." ) prompt_warning_message(message) @@ -1024,19 +1024,19 @@ def login( ) # Adding another cache storage # as this would be useful in retrieving unique clients - # node uid and verify key are not individually unique - # both the combination of node uid and verify key are unique - # which could be used to identity a client uniquely of any given node + # server uid and verify key are not individually unique + # both the combination of server uid and verify key are unique + # which could be used to identity a client uniquely of any given server # TODO: It would be better to have a single cache storage # combining both email, password and verify key and uid SyftClientSessionCache.add_client_by_uid_and_verify_key( verify_key=signing_key.verify_key, - node_uid=client.id, + server_uid=client.id, syft_client=client, ) # relative - from ..node.node import CODE_RELOADER + from ..server.server import CODE_RELOADER thread_id = thread_ident() if thread_id is not None: @@ -1059,7 +1059,7 @@ def register( password_verify: str | None = None, institution: str | None = None, website: str | None = None, - ) -> SyftError | SyftSigningKey | None: + ) -> SyftSigningKey | None: if not email: email = input("Email: ") if not password: @@ -1067,7 +1067,7 @@ def register( if not password_verify: password_verify = getpass("Confirm Password: ") if password != password_verify: - return SyftError(message="Passwords do not match") + raise SyftException(public_message="Passwords do not match") try: new_user = UserCreate( @@ -1082,15 +1082,15 @@ def register( ), ) except Exception as e: - return SyftError(message=str(e)) + raise SyftException(public_message=str(e)) if ( self.metadata - and self.metadata.node_side_type == NodeSideType.HIGH_SIDE.value + and self.metadata.server_side_type == ServerSideType.HIGH_SIDE.value ): message = ( "You're registering a user to a high side " - f"{self.metadata.node_type}, which could " + f"{self.metadata.server_type}, which could " "host datasets with private information." ) if self.metadata.show_warnings and not prompt_warning_message( @@ -1098,10 +1098,7 @@ def register( ): return None - response = self.connection.register(new_user=new_user) - if isinstance(response, tuple): - response = response[0] - return response + return self.connection.register(new_user=new_user) def __hash__(self) -> int: return hash(self.id) + hash(self.connection) @@ -1127,19 +1124,21 @@ def __repr__(self) -> str: return f"<{client_type} - <{uid}>: via {self.connection}>" return f"<{client_type} - {self.name} <{uid}>: {self.connection}>" - def _fetch_node_metadata(self, credentials: SyftSigningKey) -> None: - metadata = self.connection.get_node_metadata(credentials=credentials) - if isinstance(metadata, NodeMetadataJSON): + def _fetch_server_metadata(self, credentials: SyftSigningKey) -> None: + metadata = self.connection.get_server_metadata(credentials=credentials) + if isinstance(metadata, ServerMetadataJSON): metadata.check_version(__version__) self.metadata = metadata - def _fetch_api(self, credentials: SyftSigningKey) -> None: - _api: SyftAPI = self.connection.get_api( + def _fetch_api(self, credentials: SyftSigningKey) -> SyftAPI: + _api: SyftAPI = self.connection.get_api( # type: ignore [call-arg] credentials=credentials, communication_protocol=self.communication_protocol, + metadata=self.metadata, ) + self._fetch_server_metadata(self.credentials) - def refresh_callback() -> None: + def refresh_callback() -> SyftAPI: return self._fetch_api(self.credentials) _api.refresh_api_callback = refresh_callback @@ -1148,54 +1147,44 @@ def refresh_callback() -> None: raise ValueError(f"{self}'s credentials (signing key) is None!") APIRegistry.set_api_for( - node_uid=self.id, + server_uid=self.id, user_verify_key=self.credentials.verify_key, api=_api, ) + self._api = _api + self._api.metadata = self.metadata + self.services = _api.services + + return _api -@instrument def connect( - url: str | GridURL = DEFAULT_PYGRID_ADDRESS, - node: AbstractNode | None = None, + url: str | ServerURL = DEFAULT_SYFT_UI_ADDRESS, + server: AbstractServer | None = None, port: int | None = None, - vld_forward_proxy: str | GridURL | None = None, - vld_reverse_proxy: str | GridURL | None = None, - vld_key: str | None = None, ) -> SyftClient: - if node: - connection = PythonConnection(node=node) - elif vld_key and vld_forward_proxy and vld_reverse_proxy: - connection = VeilidConnection( - vld_forward_proxy=vld_forward_proxy, - vld_reverse_proxy=vld_reverse_proxy, - vld_key=vld_key, - ) + if server: + connection = PythonConnection(server=server) else: - url = GridURL.from_url(url) + url = ServerURL.from_url(url) if isinstance(port, int | str): url.set_port(int(port)) connection = HTTPConnection(url=url) - client_type = connection.get_client_type() - - if isinstance(client_type, SyftError): - return client_type - + client_type = connection.get_client_type().unwrap() return client_type(connection=connection) -@instrument def register( - url: str | GridURL, + url: str | ServerURL, port: int, name: str, email: str, password: str, institution: str | None = None, website: str | None = None, -) -> SyftError | SyftSigningKey | None: +) -> SyftSigningKey | None: guest_client = connect(url=url, port=port) return guest_client.register( name=name, @@ -1206,67 +1195,45 @@ def register( ) -@instrument def login_as_guest( # HTTPConnection - url: str | GridURL = DEFAULT_PYGRID_ADDRESS, + url: str | ServerURL = DEFAULT_SYFT_UI_ADDRESS, port: int | None = None, # PythonConnection - node: AbstractNode | None = None, - # Veilid Connection - vld_forward_proxy: str | GridURL | None = None, - vld_reverse_proxy: str | GridURL | None = None, - vld_key: str | None = None, + server: AbstractServer | None = None, verbose: bool = True, ) -> SyftClient: _client = connect( url=url, - node=node, + server=server, port=port, - vld_forward_proxy=vld_forward_proxy, - vld_reverse_proxy=vld_reverse_proxy, - vld_key=vld_key, ) - if isinstance(_client, SyftError): - return _client - if verbose and _client.metadata is not None: print( - f"Logged into <{_client.name}: {_client.metadata.node_side_type.capitalize()}-" - f"side {_client.metadata.node_type.capitalize()}> as GUEST" + f"Logged into <{_client.name}: {_client.metadata.server_side_type.capitalize()}-" + f"side {_client.metadata.server_type.capitalize()}> as GUEST" ) return _client.guest() -@instrument def login( email: str, # HTTPConnection - url: str | GridURL = DEFAULT_PYGRID_ADDRESS, + url: str | ServerURL = DEFAULT_SYFT_UI_ADDRESS, port: int | None = None, # PythonConnection - node: AbstractNode | None = None, - # Veilid Connection - vld_forward_proxy: str | GridURL | None = None, - vld_reverse_proxy: str | GridURL | None = None, - vld_key: str | None = None, + server: AbstractServer | None = None, password: str | None = None, cache: bool = True, ) -> SyftClient: _client = connect( url=url, - node=node, + server=server, port=port, - vld_forward_proxy=vld_forward_proxy, - vld_reverse_proxy=vld_reverse_proxy, - vld_key=vld_key, ) - if isinstance(_client, SyftError): - return _client - connection = _client.connection login_credentials = None @@ -1315,7 +1282,7 @@ def add_client( cls, email: str, password: str, - connection: NodeConnection, + connection: ServerConnection, syft_client: SyftClient, ) -> None: hash_key = cls._get_key(email, password, connection.get_cache_key()) @@ -1326,22 +1293,22 @@ def add_client( def add_client_by_uid_and_verify_key( cls, verify_key: SyftVerifyKey, - node_uid: UID, + server_uid: UID, syft_client: SyftClient, ) -> None: - hash_key = str(node_uid) + str(verify_key) + hash_key = str(server_uid) + str(verify_key) cls.__client_cache__[hash_key] = syft_client @classmethod def get_client_by_uid_and_verify_key( - cls, verify_key: SyftVerifyKey, node_uid: UID + cls, verify_key: SyftVerifyKey, server_uid: UID ) -> SyftClient | None: - hash_key = str(node_uid) + str(verify_key) + hash_key = str(server_uid) + str(verify_key) return cls.__client_cache__.get(hash_key, None) @classmethod def get_client( - cls, email: str, password: str, connection: NodeConnection + cls, email: str, password: str, connection: ServerConnection ) -> SyftClient | None: # we have some bugs here so lets disable until they are fixed. return None @@ -1349,5 +1316,5 @@ def get_client( # return cls.__credentials_store__.get(hash_key, None) @classmethod - def get_client_for_node_uid(cls, node_uid: UID) -> SyftClient | None: - return cls.__client_cache__.get(node_uid, None) + def get_client_for_server_uid(cls, server_uid: UID) -> SyftClient | None: + return cls.__client_cache__.get(server_uid, None) diff --git a/packages/syft/src/syft/client/connection.py b/packages/syft/src/syft/client/connection.py index e82db863e8a..1bb0ea6ceb8 100644 --- a/packages/syft/src/syft/client/connection.py +++ b/packages/syft/src/syft/client/connection.py @@ -2,13 +2,16 @@ from typing import Any # relative -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftObject +from ..types.uid import UID -class NodeConnection(SyftObject): - __canonical_name__ = "NodeConnection" - __version__ = SYFT_OBJECT_VERSION_2 +class ServerConnection(SyftObject): + __canonical_name__ = "ServerConnection" + __version__ = SYFT_OBJECT_VERSION_1 + + id: UID | None = None # type: ignore def get_cache_key(self) -> str: raise NotImplementedError diff --git a/packages/syft/src/syft/client/datasite_client.py b/packages/syft/src/syft/client/datasite_client.py new file mode 100644 index 00000000000..b88a7081299 --- /dev/null +++ b/packages/syft/src/syft/client/datasite_client.py @@ -0,0 +1,503 @@ +# future +from __future__ import annotations + +# stdlib +import json +import logging +from pathlib import Path +import re +from string import Template +import traceback +from typing import TYPE_CHECKING +from typing import cast + +# third party +import markdown +from tqdm import tqdm + +# relative +from ..abstract_server import ServerSideType +from ..serde.serializable import serializable +from ..service.action.action_object import ActionObject +from ..service.code_history.code_history import CodeHistoriesDict +from ..service.code_history.code_history import UsersCodeHistoriesDict +from ..service.dataset.dataset import Contributor +from ..service.dataset.dataset import CreateAsset +from ..service.dataset.dataset import CreateDataset +from ..service.dataset.dataset import _check_asset_must_contain_mock +from ..service.migration.object_migration_state import MigrationData +from ..service.request.request import Request +from ..service.response import SyftError +from ..service.response import SyftSuccess +from ..service.sync.diff_state import ResolvedSyncState +from ..service.sync.sync_state import SyncState +from ..service.user.roles import Roles +from ..service.user.user import ServiceRole +from ..service.user.user import UserView +from ..types.blob_storage import BlobFile +from ..types.errors import SyftException +from ..types.uid import UID +from ..util.misc_objs import HTMLObject +from ..util.util import get_mb_size +from ..util.util import prompt_warning_message +from .api import APIModule +from .client import SyftClient +from .client import login +from .client import login_as_guest +from .connection import ServerConnection +from .protocol import SyftProtocol + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + # relative + from ..orchestra import ServerHandle + from ..service.project.project import Project + + +def _get_files_from_glob(glob_path: str) -> list[Path]: + files = Path().glob(glob_path) + return [f for f in files if f.is_file() and not f.name.startswith(".")] + + +def _get_files_from_dir(dir: Path, recursive: bool) -> list: + files = dir.rglob("*") if recursive else dir.iterdir() + return [f for f in files if not f.name.startswith(".") and f.is_file()] + + +def _contains_subdir(dir: Path) -> bool: + for item in dir.iterdir(): + if item.is_dir(): + return True + return False + + +def add_default_uploader( + user: UserView, obj: CreateDataset | CreateAsset +) -> CreateDataset | CreateAsset: + uploader = None + for contributor in obj.contributors: + if contributor.role == str(Roles.UPLOADER): + uploader = contributor + break + + if uploader is None: + uploader = Contributor( + role=str(Roles.UPLOADER), + name=user.name, + email=user.email, + ) + obj.contributors.add(uploader) + + obj.uploader = uploader + return obj + + +@serializable(canonical_name="DatasiteClient", version=1) +class DatasiteClient(SyftClient): + def __repr__(self) -> str: + return f"" + + def upload_dataset(self, dataset: CreateDataset) -> SyftSuccess: + # relative + from ..types.twin_object import TwinObject + + if self.users is None: + raise SyftException(public_message=f"can't get user service for {self}") + + user = self.users.get_current_user() + if user.role not in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN]: + return SyftError(message="You don't have permission to upload datasets.") + dataset = add_default_uploader(user, dataset) + + for i in range(len(dataset.asset_list)): + asset = dataset.asset_list[i] + dataset.asset_list[i] = add_default_uploader(user, asset) + + # dataset._check_asset_must_contain_mock() + dataset_size: float = 0.0 + + # TODO: Refactor so that object can also be passed to generate warnings + + self.api.connection = cast(ServerConnection, self.api.connection) + + metadata = self.api.connection.get_server_metadata(self.api.signing_key) + + if ( + metadata.show_warnings + and metadata.server_side_type == ServerSideType.HIGH_SIDE.value + ): + message = ( + "You're approving a request on " + f"{metadata.server_side_type} side {metadata.server_type} " + "which may host datasets with private information." + ) + prompt_warning_message(message=message, confirm=True) + + with tqdm( + total=len(dataset.asset_list), colour="green", desc="Uploading" + ) as pbar: + for asset in dataset.asset_list: + try: + contains_empty: bool = asset.contains_empty() + twin = TwinObject( + private_obj=ActionObject.from_obj(asset.data), + mock_obj=ActionObject.from_obj(asset.mock), + syft_server_location=self.id, + syft_client_verify_key=self.verify_key, + ) + twin._save_to_blob_storage(allow_empty=contains_empty).unwrap() + except Exception as e: + tqdm.write(f"Failed to create twin for {asset.name}. {e}") + raise SyftException( + public_message=f"Failed to create twin. {e}" + ) from e + + try: + self.api.services.action.set( + twin, ignore_detached_objs=contains_empty + ) + except SyftException: + tqdm.write(f"Failed to upload asset: {asset.name}") + raise + + asset.action_id = twin.id + asset.server_uid = self.id + dataset_size += get_mb_size(asset.data) + + # Update the progress bar and set the dynamic description + pbar.set_description(f"Uploading: {asset.name}") + pbar.update(1) + + dataset.mb_size = dataset_size + _check_asset_must_contain_mock(dataset.asset_list) + dataset.check() + return self.api.services.dataset.add(dataset=dataset) + + def forgot_password(self, email: str) -> SyftSuccess | SyftError: + return self.connection.forgot_password(email=email) + + def reset_password(self, token: str, new_password: str) -> SyftSuccess | SyftError: + return self.connection.reset_password(token=token, new_password=new_password) + + def refresh(self) -> None: + if self.credentials: + self._fetch_server_metadata(self.credentials) + + if self._api and self._api.refresh_api_callback: + self._api.refresh_api_callback() + + def get_sync_state(self) -> SyncState: + state: SyncState = self.api.services.sync._get_state() + for uid, obj in state.objects.items(): + if isinstance(obj, ActionObject): + obj = obj.refresh_object(resolve_nested=False) + state.objects[uid] = obj + return state + + def apply_state(self, resolved_state: ResolvedSyncState) -> SyftSuccess: + if len(resolved_state.delete_objs): + prompt_warning_message( + message=( + "Attempted to delete objects by syncing. " + "This is not currently supported, objects must be deleted manually." + ) + ) + items = resolved_state.create_objs + resolved_state.update_objs + + action_objects = [x for x in items if isinstance(x, ActionObject)] + for action_object in action_objects: + action_object.reload_cache() + # NOTE permissions are added separately server side + action_object._send(self.id, self.verify_key, add_storage_permission=False) + action_object._clear_cache() + + ignored_batches = resolved_state.ignored_batches + + res = self.api.services.sync.sync_items( + items, + resolved_state.new_permissions, + resolved_state.new_storage_permissions, + ignored_batches, + unignored_batches=resolved_state.unignored_batches, + ) + self._fetch_api(self.credentials) + return res + + def upload_files( + self, + file_list: BlobFile | list[BlobFile] | str | list[str] | Path | list[Path], + allow_recursive: bool = False, + show_files: bool = False, + ) -> SyftSuccess: + if not file_list: + raise SyftException(public_message="No files to upload") + + if not isinstance(file_list, list): + file_list = [file_list] # type: ignore[assignment] + file_list = cast(list, file_list) + + expanded_file_list: list[BlobFile | Path] = [] + + for file in file_list: + if isinstance(file, BlobFile): + expanded_file_list.append(file) + continue + + path = Path(file) + + if re.search(r"[\*\?\[]", str(path)): + expanded_file_list.extend(_get_files_from_glob(str(path))) + elif path.is_dir(): + if not allow_recursive and _contains_subdir(path): + res = input( + f"Do you want to include all files recursively in {path.absolute()}? [y/n]: " + ).lower() + print( + f'{"Recursively uploading all files" if res == "y" else "Uploading files"} in {path.absolute()}' + ) + allow_recursive = res == "y" + expanded_file_list.extend(_get_files_from_dir(path, allow_recursive)) + elif path.exists(): + expanded_file_list.append(path) + + if not expanded_file_list: + raise SyftException(public_message="No files to upload were found") + + print( + f"Uploading {len(expanded_file_list)} {'file' if len(expanded_file_list) == 1 else 'files'}:" + ) + + if show_files: + for file in expanded_file_list: + if isinstance(file, BlobFile): + print(file.path or file.file_name) + else: + print(file.absolute()) + + try: + result = [] + for file in expanded_file_list: + if not isinstance(file, BlobFile): + file = BlobFile(path=file, file_name=file.name) + print("Uploading", file.file_name) + if not file.uploaded: + file.upload_to_blobstorage(self) + result.append(file) + + return ActionObject.from_obj(result).send(self) + except Exception as err: + raise SyftException( + public_message=f"Failed to upload files: {err}.\n{traceback.format_exc()}" + ) + + def connect_to_gateway( + self, + via_client: SyftClient | None = None, + url: str | None = None, + port: int | None = None, + handle: ServerHandle | None = None, # noqa: F821 + email: str | None = None, + password: str | None = None, + protocol: str | SyftProtocol = SyftProtocol.HTTP, + reverse_tunnel: bool = False, + ) -> SyftSuccess | None: + if isinstance(protocol, str): + protocol = SyftProtocol(protocol) + + if via_client is not None: + client = via_client + elif handle is not None: + client = handle.client + else: + client = ( + login_as_guest(url=url, port=port) + if email is None + else login(url=url, port=port, email=email, password=password) + ) + + res = self.exchange_route( + client, + protocol=protocol, + reverse_tunnel=reverse_tunnel, + ) + if isinstance(res.value, Request): + return res.value + else: + if self.metadata: + return SyftSuccess( + message=( + f"Connected {self.metadata.server_type} " + f"'{self.metadata.name}' to gateway '{client.name}'. " + f"{res.message}" + ) + ) + else: + return SyftSuccess(message=f"Connected to '{client.name}' gateway") + + def _get_service_by_name_if_exists(self, name: str) -> APIModule | None: + if self.api.has_service(name): + return getattr(self.api.services, name) + return None + + def set_server_side_type_dangerous(self, server_side_type: str) -> SyftSuccess: + return self.api.services.settings.set_server_side_type_dangerous( + server_side_type + ) + + @property + def data_subject_registry(self) -> APIModule | None: + return self._get_service_by_name_if_exists("data_subject") + + @property + def code(self) -> APIModule | None: + return self._get_service_by_name_if_exists("code") + + @property + def worker(self) -> APIModule | None: + return self._get_service_by_name_if_exists("worker") + + @property + def requests(self) -> APIModule | None: + return self._get_service_by_name_if_exists("request") + + @property + def datasets(self) -> APIModule | None: + return self._get_service_by_name_if_exists("dataset") + + @property + def projects(self) -> APIModule | None: + return self._get_service_by_name_if_exists("project") + + @property + def code_history_service(self) -> APIModule | None: + return self._get_service_by_name_if_exists("code_history") + + @property + def code_history(self) -> CodeHistoriesDict: + return self.api.services.code_history.get_history() + + @property + def code_histories(self) -> UsersCodeHistoriesDict: + return self.api.services.code_history.get_histories() + + @property + def images(self) -> APIModule | None: + return self._get_service_by_name_if_exists("worker_image") + + @property + def worker_pools(self) -> APIModule | None: + return self._get_service_by_name_if_exists("worker_pool") + + @property + def worker_images(self) -> APIModule | None: + return self._get_service_by_name_if_exists("worker_images") + + @property + def sync(self) -> APIModule | None: + return self._get_service_by_name_if_exists("sync") + + @property + def code_status(self) -> APIModule | None: + return self._get_service_by_name_if_exists("code_status") + + @property + def output(self) -> APIModule | None: + return self._get_service_by_name_if_exists("output") + + @property + def migration(self) -> APIModule | None: + return self._get_service_by_name_if_exists("migration") + + def get_migration_data(self, include_blobs: bool = True) -> MigrationData: + res = self.api.services.migration.get_migration_data() + if include_blobs: + res.download_blobs() + + return res + + def load_migration_data( + self, + path_or_data: str | Path | MigrationData, + include_worker_pools: bool = False, + with_reset_db: bool = False, + ) -> SyftSuccess: + if isinstance(path_or_data, MigrationData): + migration_data = path_or_data + else: + migration_data = MigrationData.from_file(path_or_data) + + migration_data._set_obj_location_(self.id, self.verify_key) + + if self.id != migration_data.server_uid: + raise SyftException( + public_message=f"This Migration data is not for this server. Expected server id {self.id}, " + f"got {migration_data.server_uid}" + ) + + if migration_data.signing_key.verify_key != self.verify_key: + raise SyftException( + public_message="Root verify key in migration data does not match this client's verify key" + ) + + if migration_data.includes_custom_workerpools and not include_worker_pools: + prompt_warning_message( + "This migration data includes custom workers, " + "which need to be migrated separately with `sy.upgrade_custom_workerpools` " + "after finishing the migration." + ) + + migration_data.migrate_and_upload_blobs() + migration_data = migration_data.copy_without_blobs() + + if not include_worker_pools: + migration_data = migration_data.copy_without_workerpools() + + if with_reset_db: + return self.api.services.migration.reset_and_restore(migration_data) + else: + return self.api.services.migration.apply_migration_data(migration_data) + + def dump_state(self, path: str | Path) -> None: + if isinstance(path, str): + path = Path(path) + path.mkdir(exist_ok=True) + blob_path = path / "migration.blob" + yaml_path = path / "migration.yaml" + config_path = path / "config.json" + + migration_data = self.get_migration_data(include_blobs=True) + migration_data.save(blob_path, yaml_path=yaml_path) + server_config = self.api.services.settings.get_server_config() + with open(config_path, "w") as fp: + json.dump(server_config, fp) + + def get_project( + self, + name: str | None = None, + uid: UID | None = None, + ) -> Project | None: + """Get project by name or UID""" + + if not self.api.has_service("project"): + return None + + if name: + return self.api.services.project.get_by_name(name) + + elif uid: + return self.api.services.project.get_by_uid(uid) + + return self.api.services.project.get_all() + + def _repr_html_(self) -> str: + obj = self.api.services.settings.welcome_show() + updated_template_str = Template(obj.text).safe_substitute( + server_url=getattr(self.connection, "url", None) + ) + # If it's a markdown structured file + if not isinstance(obj, HTMLObject): + return markdown.markdown(updated_template_str) + + # if it's a html string + return updated_template_str diff --git a/packages/syft/src/syft/client/deploy.py b/packages/syft/src/syft/client/deploy.py deleted file mode 100644 index bd19895ced5..00000000000 --- a/packages/syft/src/syft/client/deploy.py +++ /dev/null @@ -1,33 +0,0 @@ -# stdlib -from typing import Any - -# relative -from ..service.response import SyftError - - -class InstallOrchestra: - def launch(self, *args: Any, **kwargs: Any) -> None: - return self.error() - - def error(self) -> Any: - message = "Please install hagrid with `pip install -U hagrid`" - return SyftError(message=message) - - def _repr_html_(self) -> str: - return self.error()._repr_html_() - - -def import_orchestra() -> Any: - try: - # third party - from hagrid import Orchestra - - return Orchestra - - except Exception as e: # nosec - print(e) - pass - return InstallOrchestra() - - -Orchestra = import_orchestra() diff --git a/packages/syft/src/syft/client/domain_client.py b/packages/syft/src/syft/client/domain_client.py deleted file mode 100644 index cd25acf7150..00000000000 --- a/packages/syft/src/syft/client/domain_client.py +++ /dev/null @@ -1,531 +0,0 @@ -# future -from __future__ import annotations - -# stdlib -from pathlib import Path -import re -from typing import TYPE_CHECKING -from typing import cast - -# third party -from hagrid.orchestra import NodeHandle -from loguru import logger -from tqdm import tqdm - -# relative -from ..abstract_node import NodeSideType -from ..img.base64 import base64read -from ..serde.serializable import serializable -from ..service.action.action_object import ActionObject -from ..service.code_history.code_history import CodeHistoriesDict -from ..service.code_history.code_history import UsersCodeHistoriesDict -from ..service.dataset.dataset import Contributor -from ..service.dataset.dataset import CreateAsset -from ..service.dataset.dataset import CreateDataset -from ..service.response import SyftError -from ..service.response import SyftSuccess -from ..service.sync.diff_state import ResolvedSyncState -from ..service.sync.sync_state import SyncState -from ..service.user.roles import Roles -from ..service.user.user import UserView -from ..service.user.user_roles import ServiceRole -from ..types.blob_storage import BlobFile -from ..types.uid import UID -from ..util.fonts import fonts_css -from ..util.util import get_mb_size -from ..util.util import prompt_warning_message -from .api import APIModule -from .client import SyftClient -from .client import login -from .client import login_as_guest -from .connection import NodeConnection -from .protocol import SyftProtocol - -if TYPE_CHECKING: - # relative - from ..service.project.project import Project - - -def _get_files_from_glob(glob_path: str) -> list[Path]: - files = Path().glob(glob_path) - return [f for f in files if f.is_file() and not f.name.startswith(".")] - - -def _get_files_from_dir(dir: Path, recursive: bool) -> list: - files = dir.rglob("*") if recursive else dir.iterdir() - return [f for f in files if not f.name.startswith(".") and f.is_file()] - - -def _contains_subdir(dir: Path) -> bool: - for item in dir.iterdir(): - if item.is_dir(): - return True - return False - - -def add_default_uploader( - user: UserView, obj: CreateDataset | CreateAsset -) -> CreateDataset | CreateAsset: - uploader = None - for contributor in obj.contributors: - if contributor.role == str(Roles.UPLOADER): - uploader = contributor - break - - if uploader is None: - uploader = Contributor( - role=str(Roles.UPLOADER), - name=user.name, - email=user.email, - ) - obj.contributors.add(uploader) - - obj.uploader = uploader - return obj - - -@serializable() -class DomainClient(SyftClient): - def __repr__(self) -> str: - return f"" - - def upload_dataset(self, dataset: CreateDataset) -> SyftSuccess | SyftError: - # relative - from ..types.twin_object import TwinObject - - if self.users is None: - return SyftError(f"can't get user service for {self}") - - user = self.users.get_current_user() - dataset = add_default_uploader(user, dataset) - for i in range(len(dataset.asset_list)): - asset = dataset.asset_list[i] - dataset.asset_list[i] = add_default_uploader(user, asset) - - dataset._check_asset_must_contain_mock() - dataset_size: float = 0.0 - - # TODO: Refactor so that object can also be passed to generate warnings - - self.api.connection = cast(NodeConnection, self.api.connection) - - metadata = self.api.connection.get_node_metadata(self.api.signing_key) - - if ( - metadata.show_warnings - and metadata.node_side_type == NodeSideType.HIGH_SIDE.value - ): - message = ( - "You're approving a request on " - f"{metadata.node_side_type} side {metadata.node_type} " - "which may host datasets with private information." - ) - prompt_warning_message(message=message, confirm=True) - - for asset in tqdm(dataset.asset_list): - print(f"Uploading: {asset.name}") - try: - twin = TwinObject( - private_obj=asset.data, - mock_obj=asset.mock, - syft_node_location=self.id, - syft_client_verify_key=self.verify_key, - ) - twin._save_to_blob_storage() - except Exception as e: - return SyftError(message=f"Failed to create twin. {e}") - response = self.api.services.action.set(twin) - if isinstance(response, SyftError): - print(f"Failed to upload asset\n: {asset}") - return response - asset.action_id = twin.id - asset.node_uid = self.id - dataset_size += get_mb_size(asset.data) - dataset.mb_size = dataset_size - valid = dataset.check() - if isinstance(valid, SyftError): - return valid - return self.api.services.dataset.add(dataset=dataset) - - # def get_permissions_for_other_node( - # self, - # items: list[Union[ActionObject, SyftObject]], - # ) -> dict: - # if len(items) > 0: - # if not len({i.syft_node_location for i in items}) == 1 or ( - # not len({i.syft_client_verify_key for i in items}) == 1 - # ): - # raise ValueError("permissions from different nodes") - # item = items[0] - # api = APIRegistry.api_for( - # item.syft_node_location, item.syft_client_verify_key - # ) - # if api is None: - # raise ValueError( - # f"Can't access the api. Please log in to {item.syft_node_location}" - # ) - # return api.services.sync.get_permissions(items) - # else: - # return {} - - def get_sync_state(self) -> SyncState | SyftError: - state: SyncState = self.api.services.sync._get_state() - for uid, obj in state.objects.items(): - if isinstance(obj, ActionObject): - state.objects[uid] = obj.refresh_object() - return state - - def apply_state(self, resolved_state: ResolvedSyncState) -> SyftSuccess | SyftError: - if len(resolved_state.delete_objs): - raise NotImplementedError("TODO implement delete") - items = resolved_state.create_objs + resolved_state.update_objs - - action_objects = [x for x in items if isinstance(x, ActionObject)] - # permissions = self.get_permissions_for_other_node(items) - - permissions: dict[UID, set[str]] = {} - for p in resolved_state.new_permissions: - if p.uid in permissions: - permissions[p.uid].add(p.permission_string) - else: - permissions[p.uid] = {p.permission_string} - - storage_permissions: dict[UID, set[UID]] = {} - for sp in resolved_state.new_storage_permissions: - if sp.uid in storage_permissions: - storage_permissions[sp.uid].add(sp.node_uid) - else: - storage_permissions[sp.uid] = {sp.node_uid} - - for action_object in action_objects: - # NOTE permissions are added separately server side - action_object._send(self, add_storage_permission=False) - - res = self.api.services.sync.sync_items( - items, - permissions, - storage_permissions, - ) - if isinstance(res, SyftError): - return res - - # Add updated node state to store to have a previous_state for next sync - new_state = self.api.services.sync._get_state(add_to_store=True) - if isinstance(new_state, SyftError): - return new_state - - self._fetch_api(self.credentials) - return res - - def upload_files( - self, - file_list: BlobFile | list[BlobFile] | str | list[str] | Path | list[Path], - allow_recursive: bool = False, - show_files: bool = False, - ) -> SyftSuccess | SyftError: - if not file_list: - return SyftError(message="No files to upload") - - if not isinstance(file_list, list): - file_list = [file_list] # type: ignore[assignment] - file_list = cast(list, file_list) - - expanded_file_list: list[BlobFile | Path] = [] - - for file in file_list: - if isinstance(file, BlobFile): - expanded_file_list.append(file) - continue - - path = Path(file) - - if re.search(r"[\*\?\[]", str(path)): - expanded_file_list.extend(_get_files_from_glob(str(path))) - elif path.is_dir(): - if not allow_recursive and _contains_subdir(path): - res = input( - f"Do you want to include all files recursively in {path.absolute()}? [y/n]: " - ).lower() - print( - f'{"Recursively uploading all files" if res == "y" else "Uploading files"} in {path.absolute()}' - ) - allow_recursive = res == "y" - expanded_file_list.extend(_get_files_from_dir(path, allow_recursive)) - elif path.exists(): - expanded_file_list.append(path) - - if not expanded_file_list: - return SyftError(message="No files to upload were found") - - print( - f"Uploading {len(expanded_file_list)} {'file' if len(expanded_file_list) == 1 else 'files'}:" - ) - - if show_files: - for file in expanded_file_list: - if isinstance(file, BlobFile): - print(file.path or file.file_name) - else: - print(file.absolute()) - - try: - result = [] - for file in expanded_file_list: - if not isinstance(file, BlobFile): - file = BlobFile(path=file, file_name=file.name) - print("Uploading", file.file_name) - if not file.uploaded: - file.upload_to_blobstorage(self) - result.append(file) - - return ActionObject.from_obj(result).send(self) - except Exception as err: - logger.debug("upload_files: Error creating action_object: {}", err) - return SyftError(message=f"Failed to upload files: {err}") - - def connect_to_gateway( - self, - via_client: SyftClient | None = None, - url: str | None = None, - port: int | None = None, - handle: NodeHandle | None = None, # noqa: F821 - email: str | None = None, - password: str | None = None, - protocol: str | SyftProtocol = SyftProtocol.HTTP, - ) -> SyftSuccess | SyftError | None: - if isinstance(protocol, str): - protocol = SyftProtocol(protocol) - - if via_client is not None: - client = via_client - elif handle is not None: - client = handle.client - else: - client = ( - login_as_guest(url=url, port=port) - if email is None - else login(url=url, port=port, email=email, password=password) - ) - if isinstance(client, SyftError): - return client - - res = self.exchange_route(client, protocol=protocol) - if isinstance(res, SyftSuccess): - if self.metadata: - return SyftSuccess( - message=f"Connected {self.metadata.node_type} to {client.name} gateway" - ) - else: - return SyftSuccess(message=f"Connected to {client.name} gateway") - return res - - @property - def data_subject_registry(self) -> APIModule | None: - if self.api.has_service("data_subject"): - return self.api.services.data_subject - return None - - @property - def code(self) -> APIModule | None: - # if self.api.refresh_api_callback is not None: - # self.api.refresh_api_callback() - if self.api.has_service("code"): - return self.api.services.code - return None - - @property - def worker(self) -> APIModule | None: - if self.api.has_service("worker"): - return self.api.services.worker - return None - - @property - def requests(self) -> APIModule | None: - if self.api.has_service("request"): - return self.api.services.request - return None - - @property - def datasets(self) -> APIModule | None: - if self.api.has_service("dataset"): - return self.api.services.dataset - return None - - @property - def projects(self) -> APIModule | None: - if self.api.has_service("project"): - return self.api.services.project - return None - - @property - def code_history_service(self) -> APIModule | None: - if self.api is not None and self.api.has_service("code_history"): - return self.api.services.code_history - return None - - @property - def code_history(self) -> CodeHistoriesDict: - return self.api.services.code_history.get_history() - - @property - def code_histories(self) -> UsersCodeHistoriesDict: - return self.api.services.code_history.get_histories() - - @property - def images(self) -> APIModule | None: - if self.api.has_service("worker_image"): - return self.api.services.worker_image - return None - - @property - def worker_pools(self) -> APIModule | None: - if self.api.has_service("worker_pool"): - return self.api.services.worker_pool - return None - - @property - def worker_images(self) -> APIModule | None: - if self.api.has_service("worker_image"): - return self.api.services.worker_image - return None - - @property - def sync(self) -> APIModule | None: - if self.api.has_service("sync"): - return self.api.services.sync - return None - - @property - def code_status(self) -> APIModule | None: - if self.api.has_service("code_status"): - return self.api.services.code_status - return None - - @property - def output(self) -> APIModule | None: - if self.api.has_service("output"): - return self.api.services.output - return None - - def get_project( - self, - name: str | None = None, - uid: UID | None = None, - ) -> Project | None: - """Get project by name or UID""" - - if not self.api.has_service("project"): - return None - - if name: - return self.api.services.project.get_by_name(name) - - elif uid: - return self.api.services.project.get_by_uid(uid) - - return self.api.services.project.get_all() - - def _repr_html_(self) -> str: - guest_commands = """ -
  • <your_client>.datasets - list datasets
  • -
  • <your_client>.code - list code
  • -
  • <your_client>.login - list projects
  • -
  • - <your_client>.code.submit? - display function signature -
  • """ - ds_commands = """ -
  • <your_client>.datasets - list datasets
  • -
  • <your_client>.code - list code
  • -
  • <your_client>.projects - list projects
  • -
  • - <your_client>.code.submit? - display function signature -
  • """ - - do_commands = """ -
  • <your_client>.projects - list projects
  • -
  • <your_client>.requests - list requests
  • -
  • <your_client>.users - list users
  • -
  • - <your_client>.requests.submit? - display function signature -
  • """ - - # TODO: how to select ds/do commands based on self.__user_role - - if ( - self.user_role.value == ServiceRole.NONE.value - or self.user_role.value == ServiceRole.GUEST.value - ): - commands = guest_commands - elif ( - self.user_role is not None - and self.user_role.value == ServiceRole.DATA_SCIENTIST.value - ): - commands = ds_commands - elif ( - self.user_role is not None - and self.user_role.value >= ServiceRole.DATA_OWNER.value - ): - commands = do_commands - - command_list = f""" -
      - {commands} -
    - """ - - small_grid_symbol_logo = base64read("small-grid-symbol-logo.png") - - url = getattr(self.connection, "url", None) - node_details = f"URL: {url}
    " if url else "" - if self.metadata is not None: - node_details += f"Node Type: {self.metadata.node_type.capitalize()}
    " - node_side_type = ( - "Low Side" - if self.metadata.node_side_type == NodeSideType.LOW_SIDE.value - else "High Side" - ) - node_details += f"Node Side Type: {node_side_type}
    " - node_details += ( - f"Syft Version: {self.metadata.syft_version}
    " - ) - - return f""" - -
    - Logo -

    Welcome to {self.name}

    -
    - {node_details} -
    -
    - ⓘ  - This domain is run by the library PySyft to learn more about how it works visit - github.com/OpenMined/PySyft. -
    -

    Commands to Get Started

    - {command_list} -

    - """ diff --git a/packages/syft/src/syft/client/enclave_client.py b/packages/syft/src/syft/client/enclave_client.py index dc9c7c15de1..8505e26203e 100644 --- a/packages/syft/src/syft/client/enclave_client.py +++ b/packages/syft/src/syft/client/enclave_client.py @@ -2,25 +2,18 @@ from __future__ import annotations # stdlib -from typing import Any from typing import TYPE_CHECKING -# third party -from hagrid.orchestra import NodeHandle - # relative -from ..abstract_node import NodeSideType -from ..client.api import APIRegistry -from ..img.base64 import base64read +from ..abstract_server import ServerSideType from ..serde.serializable import serializable -from ..service.metadata.node_metadata import NodeMetadataJSON -from ..service.network.routes import NodeRouteType -from ..service.response import SyftError +from ..service.metadata.server_metadata import ServerMetadataJSON +from ..service.network.routes import ServerRouteType from ..service.response import SyftSuccess -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftObject -from ..types.uid import UID -from ..util.fonts import fonts_css +from ..util.assets import load_png_base64 +from ..util.notebook_ui.styles import FONT_CSS from .api import APIModule from .client import SyftClient from .client import login @@ -29,18 +22,18 @@ if TYPE_CHECKING: # relative - from ..service.code.user_code import SubmitUserCode + from ..orchestra import ServerHandle @serializable() class EnclaveMetadata(SyftObject): __canonical_name__ = "EnclaveMetadata" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - route: NodeRouteType + route: ServerRouteType -@serializable() +@serializable(canonical_name="EnclaveClient", version=1) class EnclaveClient(SyftClient): # TODO: add widget repr for enclave client @@ -70,11 +63,11 @@ def connect_to_gateway( via_client: SyftClient | None = None, url: str | None = None, port: int | None = None, - handle: NodeHandle | None = None, # noqa: F821 + handle: ServerHandle | None = None, # noqa: F821 email: str | None = None, password: str | None = None, protocol: str | SyftProtocol = SyftProtocol.HTTP, - ) -> SyftSuccess | SyftError | None: + ) -> SyftSuccess | None: if isinstance(protocol, str): protocol = SyftProtocol(protocol) @@ -88,70 +81,23 @@ def connect_to_gateway( if email is None else login(url=url, port=port, email=email, password=password) ) - if isinstance(client, SyftError): - return client - self.metadata: NodeMetadataJSON = self.metadata + self.metadata: ServerMetadataJSON = self.metadata res = self.exchange_route(client, protocol=protocol) - - if isinstance(res, SyftSuccess): + if self.metadata: return SyftSuccess( - message=f"Connected {self.metadata.node_type} to {client.name} gateway" + message=( + f"Connected {self.metadata.server_type} " + f"'{self.metadata.name}' to gateway '{client.name}'. " + f"{res.message}" + ) ) - - return res + else: + return SyftSuccess(message=f"Connected to '{client.name}' gateway") def get_enclave_metadata(self) -> EnclaveMetadata: return EnclaveMetadata(route=self.connection.route) - def request_code_execution(self, code: SubmitUserCode) -> Any | SyftError: - # relative - from ..service.code.user_code_service import SubmitUserCode - - if not isinstance(code, SubmitUserCode): - raise Exception( - f"The input code should be of type: {SubmitUserCode} got:{type(code)}" - ) - if code.input_policy_init_kwargs is None: - raise ValueError(f"code {code}'s input_policy_init_kwargs is None") - - enclave_metadata = self.get_enclave_metadata() - - code_id = UID() - code.id = code_id - code.enclave_metadata = enclave_metadata - - apis = [] - for k, v in code.input_policy_init_kwargs.items(): - # We would need the verify key of the data scientist to be able to index the correct client - # Since we do not want the data scientist to pass in the clients to the enclave client - # from a UX perspecitve. - # we will use the recent node id to find the correct client - # assuming that it is the correct client - # Warning: This could lead to inconsistent results, when we have multiple clients - # in the same node pointing to the same node. - # One way, by which we could solve this in the long term, - # by forcing the user to pass only assets to the sy.ExactMatch, - # by which we could extract the verify key of the data scientist - # as each object comes with a verify key and node_uid - # the asset object would contain the verify key of the data scientist. - api = APIRegistry.get_by_recent_node_uid(k.node_id) - if api is None: - raise ValueError(f"could not find client for input {v}") - else: - apis += [api] - - for api in apis: - res = api.services.code.request_code_execution(code=code) - if isinstance(res, SyftError): - return res - - # we are using the real method here, see the .code property getter - _ = self.code - res = self._request_code_execution(code=code) - - return res - def _repr_html_(self) -> str: commands = """
  • <your_client> @@ -164,25 +110,27 @@ def _repr_html_(self) -> str: """ - small_grid_symbol_logo = base64read("small-grid-symbol-logo.png") + small_server_symbol_logo = load_png_base64("small-syft-symbol-logo.png") url = getattr(self.connection, "url", None) - node_details = f"URL: {url}
    " if url else "" + server_details = f"URL: {url}
    " if url else "" if self.metadata: - node_details += f"Node Type: {self.metadata.node_type.capitalize()}
    " - node_side_type = ( + server_details += f"Server Type: {self.metadata.server_type.capitalize()}
    " + server_side_type = ( "Low Side" - if self.metadata.node_side_type == NodeSideType.LOW_SIDE.value + if self.metadata.server_side_type == ServerSideType.LOW_SIDE.value else "High Side" ) - node_details += f"Node Side Type: {node_side_type}
    " - node_details += ( + server_details += ( + f"Server Side Type: {server_side_type}
    " + ) + server_details += ( f"Syft Version: {self.metadata.syft_version}
    " ) return f"""
    - Logo

    Welcome to {self.name}

    - {node_details} + {server_details}
    ⓘ  - This node is run by the library PySyft to learn more about how it works visit + This server is run by the library PySyft to learn more about how it works visit github.com/OpenMined/PySyft.

    Commands to Get Started

    diff --git a/packages/syft/src/syft/client/errors.py b/packages/syft/src/syft/client/errors.py new file mode 100644 index 00000000000..197bd6c04b8 --- /dev/null +++ b/packages/syft/src/syft/client/errors.py @@ -0,0 +1,9 @@ +# relative +from ..types.errors import SyftException + + +class SyftClientError(SyftException): + public_message = "Unknown client error." + + +class SyftAPINotFoundException(SyftClientError): ... diff --git a/packages/syft/src/syft/client/gateway_client.py b/packages/syft/src/syft/client/gateway_client.py index aa37fd19387..e5b09f2bfa1 100644 --- a/packages/syft/src/syft/client/gateway_client.py +++ b/packages/syft/src/syft/client/gateway_client.py @@ -2,38 +2,41 @@ from typing import Any # relative -from ..abstract_node import NodeSideType -from ..abstract_node import NodeType -from ..img.base64 import base64read -from ..node.credentials import SyftSigningKey +from ..abstract_server import ServerSideType +from ..abstract_server import ServerType from ..serde.serializable import serializable -from ..service.network.node_peer import NodePeer -from ..service.response import SyftError -from ..service.response import SyftException -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..server.credentials import SyftSigningKey +from ..service.metadata.server_metadata import ServerMetadataJSON +from ..service.network.server_peer import ServerPeer +from ..types.errors import SyftException +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftObject -from ..util.fonts import fonts_css +from ..util.assets import load_png_base64 +from ..util.notebook_ui.styles import FONT_CSS from .client import SyftClient +from .connection import ServerConnection -@serializable() +@serializable(canonical_name="GatewayClient", version=1) class GatewayClient(SyftClient): # TODO: add widget repr for gateway client def proxy_to(self, peer: Any) -> SyftClient: # relative - from .domain_client import DomainClient + from .datasite_client import DatasiteClient from .enclave_client import EnclaveClient - connection = self.connection.with_proxy(peer.id) - metadata = connection.get_node_metadata(credentials=SyftSigningKey.generate()) - if metadata.node_type == NodeType.DOMAIN.value: - client_type: type[SyftClient] = DomainClient - elif metadata.node_type == NodeType.ENCLAVE.value: + connection: type[ServerConnection] = self.connection.with_proxy(peer.id) + metadata: ServerMetadataJSON = connection.get_server_metadata( + credentials=SyftSigningKey.generate() + ) + if metadata.server_type == ServerType.DATASITE.value: + client_type: type[SyftClient] = DatasiteClient + elif metadata.server_type == ServerType.ENCLAVE.value: client_type = EnclaveClient else: raise SyftException( - f"Unknown node type {metadata.node_type} to create proxy client" + public_message=f"Unknown server type {metadata.server_type} to create proxy client" ) client = client_type( @@ -53,30 +56,30 @@ def proxy_client_for( if self.api.has_service("network"): peer = self.api.services.network.get_peer_by_name(name=name) if peer is None: - return SyftError(message=f"No domain with name {name}") + raise SyftException(public_message=f"No datasite with name {name}") res = self.proxy_to(peer) if email and password: res = res.login(email=email, password=password, **kwargs) return res @property - def peers(self) -> list[NodePeer] | SyftError | None: + def peers(self) -> list[ServerPeer] | None: return ProxyClient(routing_client=self) @property - def domains(self) -> list[NodePeer] | SyftError | None: - return ProxyClient(routing_client=self, node_type=NodeType.DOMAIN) + def datasites(self) -> list[ServerPeer] | None: + return ProxyClient(routing_client=self, server_type=ServerType.DATASITE) @property - def enclaves(self) -> list[NodePeer] | SyftError | None: - return ProxyClient(routing_client=self, node_type=NodeType.ENCLAVE) + def enclaves(self) -> list[ServerPeer] | None: + return ProxyClient(routing_client=self, server_type=ServerType.ENCLAVE) def _repr_html_(self) -> str: commands = """
  • <your_client> - .domains - list domains connected to this gateway
  • + .datasites - list datasites connected to this gateway
  • <your_client> - .proxy_client_for - get a connection to a listed domain
  • + .proxy_client_for - get a connection to a listed datasite
  • <your_client> .login - log into the gateway
  • """ @@ -87,25 +90,27 @@ def _repr_html_(self) -> str: """ - small_grid_symbol_logo = base64read("small-grid-symbol-logo.png") + small_server_symbol_logo = load_png_base64("small-syft-symbol-logo.png") url = getattr(self.connection, "url", None) - node_details = f"URL: {url}
    " if url else "" + server_details = f"URL: {url}
    " if url else "" if self.metadata: - node_details += f"Node Type: {self.metadata.node_type.capitalize()}
    " - node_side_type = ( + server_details += f"Server Type: {self.metadata.server_type.capitalize()}
    " + server_side_type = ( "Low Side" - if self.metadata.node_side_type == NodeSideType.LOW_SIDE.value + if self.metadata.server_side_type == ServerSideType.LOW_SIDE.value else "High Side" ) - node_details += f"Node Side Type: {node_side_type}
    " - node_details += ( + server_details += ( + f"Server Side Type: {server_side_type}
    " + ) + server_details += ( f"Syft Version: {self.metadata.syft_version}
    " ) return f"""
    - Logo

    Welcome to {self.name}

    - {node_details} + {server_details}
    ⓘ  - This node is run by the library PySyft to learn more about how it works visit + This server is run by the library PySyft to learn more about how it works visit github.com/OpenMined/PySyft.

    Commands to Get Started

    @@ -147,37 +152,39 @@ def _repr_html_(self) -> str: class ProxyClient(SyftObject): __canonical_name__ = "ProxyClient" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 routing_client: GatewayClient - node_type: NodeType | None = None + server_type: ServerType | None = None - def retrieve_nodes(self) -> list[NodePeer]: - if self.node_type in [NodeType.DOMAIN, NodeType.ENCLAVE]: + def retrieve_servers(self) -> list[ServerPeer]: + if self.server_type in [ServerType.DATASITE, ServerType.ENCLAVE]: return self.routing_client.api.services.network.get_peers_by_type( - node_type=self.node_type + server_type=self.server_type ) - elif self.node_type is None: - # if node type is None, return all nodes + elif self.server_type is None: + # if server type is None, return all servers return self.routing_client.api.services.network.get_all_peers() else: raise SyftException( - f"Unknown node type {self.node_type} to retrieve proxy client" + public_message=f"Unknown server type {self.server_type} to retrieve proxy client" ) def _repr_html_(self) -> str: - return self.retrieve_nodes()._repr_html_() + return self.retrieve_servers()._repr_html_() def __len__(self) -> int: - return len(self.retrieve_nodes()) + return len(self.retrieve_servers()) def __getitem__(self, key: int | str) -> SyftClient: if not isinstance(key, int): - raise SyftException(f"Key: {key} must be an integer") + raise SyftException(public_message=f"Key: {key} must be an integer") - nodes = self.retrieve_nodes() + servers = self.retrieve_servers() - if key >= len(nodes): - raise SyftException(f"Index {key} out of range for retrieved nodes") + if key >= len(servers): + raise SyftException( + public_message=f"Index {key} out of range for retrieved servers" + ) - return self.routing_client.proxy_to(nodes[key]) + return self.routing_client.proxy_to(servers[key]) diff --git a/packages/syft/src/syft/client/protocol.py b/packages/syft/src/syft/client/protocol.py index e969d59ca5d..ffb23c5880e 100644 --- a/packages/syft/src/syft/client/protocol.py +++ b/packages/syft/src/syft/client/protocol.py @@ -6,7 +6,6 @@ class SyftProtocol(Enum): """Enum class to represent the different Syft protocols.""" HTTP = "http" - VEILID = "veilid" def all(self) -> list: return [p.value for p in SyftProtocol] diff --git a/packages/syft/src/syft/client/registry.py b/packages/syft/src/syft/client/registry.py index 52100e64831..e793f57be11 100644 --- a/packages/syft/src/syft/client/registry.py +++ b/packages/syft/src/syft/client/registry.py @@ -3,6 +3,9 @@ # stdlib from concurrent import futures +import json +import logging +import os from typing import Any # third party @@ -10,32 +13,66 @@ import requests # relative -from ..service.metadata.node_metadata import NodeMetadataJSON -from ..service.network.network_service import NodePeer -from ..service.response import SyftException -from ..types.grid_url import GridURL +from ..service.metadata.server_metadata import ServerMetadataJSON +from ..service.network.server_peer import ServerPeer +from ..service.network.server_peer import ServerPeerConnectionStatus +from ..types.errors import SyftException +from ..types.server_url import ServerURL +from ..types.syft_object import SyftObject from ..util.constants import DEFAULT_TIMEOUT -from ..util.logger import error -from ..util.logger import warning from .client import SyftClient as Client +logger = logging.getLogger(__name__) NETWORK_REGISTRY_URL = ( "https://raw.githubusercontent.com/OpenMined/NetworkRegistry/main/gateways.json" ) + NETWORK_REGISTRY_REPO = "https://github.com/OpenMined/NetworkRegistry" +DATASITE_REGISTRY_URL = ( + "https://raw.githubusercontent.com/OpenMined/NetworkRegistry/main/datasites.json" +) + + +def _get_all_networks(network_json: dict, version: str) -> list[dict]: + return network_json.get(version, {}).get("gateways", []) + class NetworkRegistry: def __init__(self) -> None: self.all_networks: list[dict] = [] + try: - response = requests.get(NETWORK_REGISTRY_URL) # nosec - network_json = response.json() - self.all_networks = network_json["2.0.0"]["gateways"] + network_json = self.load_network_registry_json() + self.all_networks = _get_all_networks( + network_json=network_json, version="2.0.0" + ) except Exception as e: - warning( - f"Failed to get Network Registry, go checkout: {NETWORK_REGISTRY_REPO}. {e}" + logger.warning( + f"Failed to get Network Registry, go checkout: {NETWORK_REGISTRY_REPO}. Exception: {e}" + ) + + @staticmethod + def load_network_registry_json() -> dict: + try: + # Get the environment variable + network_registry_json: str | None = os.getenv("NETWORK_REGISTRY_JSON") + # If the environment variable exists, use it + if network_registry_json is not None: + network_json: dict = json.loads(network_registry_json) + else: + # Load the network registry from the NETWORK_REGISTRY_URL + response = requests.get(NETWORK_REGISTRY_URL, timeout=30) # nosec + response.raise_for_status() # raise an exception if the HTTP request returns an error + network_json = response.json() + + return network_json + + except Exception as e: + logger.warning( + f"Failed to get Network Registry from {NETWORK_REGISTRY_REPO}. Exception: {e}" ) + return {} @property def online_networks(self) -> list[dict]: @@ -45,14 +82,14 @@ def check_network(network: dict) -> dict[Any, Any] | None: url = "http://" + network["host_or_ip"] + ":" + str(network["port"]) + "/" try: res = requests.get(url, timeout=DEFAULT_TIMEOUT) # nosec - online = "This is a PyGrid Network node." in res.text + online = "This is a Syft Gateway server." in res.text except Exception: online = False - # networks without frontend have a /ping route in 0.7.0 + # networks without frontend if not online: try: - ping_url = url + "ping" + ping_url = url + "api/v2/" res = requests.get(ping_url, timeout=DEFAULT_TIMEOUT) # nosec online = res.status_code == 200 except Exception: @@ -83,23 +120,46 @@ def check_network(network: dict) -> dict[Any, Any] | None: executor.map(lambda network: check_network(network), networks) ) - online_networks = [] - for each in _online_networks: - if each is not None: - online_networks.append(each) - return online_networks + return [network for network in _online_networks if network is not None] def _repr_html_(self) -> str: on = self.online_networks if len(on) == 0: - return "(no gateways online - try syft.gateways.all_gateways to see offline gateways)" - return pd.DataFrame(on)._repr_html_() # type: ignore + return "(no gateways online - try syft.gateways.all_networks to see offline gateways)" + df = pd.DataFrame(on) + total_df = pd.DataFrame( + [ + [ + f"{len(on)} / {len(self.all_networks)} (online networks / all networks)" + ] + + [""] * (len(df.columns) - 1) + ], + columns=df.columns, + index=["Total"], + ) + df = pd.concat([df, total_df]) + return df._repr_html_() # type: ignore def __repr__(self) -> str: on = self.online_networks if len(on) == 0: - return "(no gateways online - try syft.gateways.all_gateways to see offline gateways)" - return pd.DataFrame(on).to_string() + return "(no gateways online - try syft.gateways.all_networks to see offline gateways)" + df = pd.DataFrame(on) + total_df = pd.DataFrame( + [ + [ + f"{len(on)} / {len(self.all_networks)} (online networks / all networks)" + ] + + [""] * (len(df.columns) - 1) + ], + columns=df.columns, + index=["Total"], + ) + df = pd.concat([df, total_df]) + return df.to_string() + + def __len__(self) -> int: + return len(self.all_networks) @staticmethod def create_client(network: dict[str, Any]) -> Client: @@ -110,12 +170,11 @@ def create_client(network: dict[str, Any]) -> Client: port = int(network["port"]) protocol = network["protocol"] host_or_ip = network["host_or_ip"] - grid_url = GridURL(port=port, protocol=protocol, host_or_ip=host_or_ip) - client = connect(url=str(grid_url)) + server_url = ServerURL(port=port, protocol=protocol, host_or_ip=host_or_ip) + client = connect(url=str(server_url)) return client.guest() except Exception as e: - error(f"Failed to login with: {network}. {e}") - raise SyftException(f"Failed to login with: {network}. {e}") + raise SyftException(public_message=f"Failed to login with: {network}. {e}") def __getitem__(self, key: str | int) -> Client: if isinstance(key, int): @@ -128,19 +187,169 @@ def __getitem__(self, key: str | int) -> Client: raise KeyError(f"Invalid key: {key} for {on}") -class DomainRegistry: +class Datasite(SyftObject): + __canonical_name__ = "ServerMetadata" + # __version__ = SYFT_OBJECT_VERSION_1 + + name: str + host_or_ip: str + version: str + protocol: str + admin_email: str + website: str + slack: str + slack_channel: str + + __attr_searchable__ = [ + "name", + "host_or_ip", + "version", + "port", + "admin_email", + "website", + "slack", + "slack_channel", + "protocol", + ] + __attr_unique__ = [ + "name", + "host_or_ip", + "version", + "port", + "admin_email", + "website", + "slack", + "slack_channel", + "protocol", + ] + __repr_attrs__ = [ + "name", + "host_or_ip", + "version", + "port", + "admin_email", + "website", + "slack", + "slack_channel", + "protocol", + ] + __table_sort_attr__ = "name" + + +class DatasiteRegistry: + def __init__(self) -> None: + self.all_datasites: list[dict] = [] + try: + response = requests.get(DATASITE_REGISTRY_URL) # nosec + datasites_json = response.json() + self.all_datasites = datasites_json["datasites"] + except Exception as e: + logger.warning( + f"Failed to get Datasite Registry, go checkout: {DATASITE_REGISTRY_URL}. {e}" + ) + + @property + def online_datasites(self) -> list[dict]: + datasites = self.all_datasites + + def check_datasite(datasite: dict) -> dict[Any, Any] | None: + url = "http://" + datasite["host_or_ip"] + ":" + str(datasite["port"]) + "/" + try: + res = requests.get(url, timeout=DEFAULT_TIMEOUT) # nosec + if "status" in res.json(): + online = res.json()["status"] == "ok" + elif "detail" in res.json(): + online = True + except Exception: + online = False + if online: + version = datasite.get("version", None) + # Check if syft version was described in DatasiteRegistry + # If it's unknown, try to update it to an available version. + if not version or version == "unknown": + # If not defined, try to ask in /syft/version endpoint (supported by 0.7.0) + try: + version_url = url + "api/v2/metadata" + res = requests.get(version_url, timeout=DEFAULT_TIMEOUT) # nosec + if res.status_code == 200: + datasite["version"] = res.json()["syft_version"] + else: + datasite["version"] = "unknown" + except Exception: + datasite["version"] = "unknown" + return datasite + return None + + # We can use a with statement to ensure threads are cleaned up promptly + with futures.ThreadPoolExecutor(max_workers=20) as executor: + # map + _online_datasites = list( + executor.map(lambda datasite: check_datasite(datasite), datasites) + ) + + online_datasites = [each for each in _online_datasites if each is not None] + return online_datasites + + def _repr_html_(self) -> str: + on = self.online_datasites + if len(on) == 0: + return "(no gateways online - try syft.gateways.all_networks to see offline gateways)" + + # df = pd.DataFrame(on) + print( + "Add your datasite to this list: https://github.com/OpenMined/NetworkRegistry/" + ) + # return df._repr_html_() # type: ignore + return ([Datasite(**ds) for ds in on])._repr_html_() + + @staticmethod + def create_client(datasite: dict[str, Any]) -> Client: + # relative + from .client import connect + + try: + port = int(datasite["port"]) + protocol = datasite["protocol"] + host_or_ip = datasite["host_or_ip"] + server_url = ServerURL(port=port, protocol=protocol, host_or_ip=host_or_ip) + client = connect(url=str(server_url)) + return client.guest() + except Exception as e: + raise SyftException(public_message=f"Failed to login with: {datasite}. {e}") + + def __getitem__(self, key: str | int) -> Client: + if isinstance(key, int): + return self.create_client(datasite=self.online_datasites[key]) + else: + on = self.online_datasites + for datasite in on: + if datasite["name"] == key: + return self.create_client(datasite=datasite) + raise KeyError(f"Invalid key: {key} for {on}") + + +class NetworksOfDatasitesRegistry: def __init__(self) -> None: self.all_networks: list[dict] = [] - self.all_domains: list = [] + self.all_datasites: dict[str, ServerPeer] = {} try: - response = requests.get(NETWORK_REGISTRY_URL) # nosec - network_json = response.json() - self.all_networks = network_json["2.0.0"]["gateways"] + network_json = NetworkRegistry.load_network_registry_json() + self.all_networks = _get_all_networks( + network_json=network_json, version="2.0.0" + ) + self._get_all_datasites() except Exception as e: - warning( + logger.warning( f"Failed to get Network Registry, go checkout: {NETWORK_REGISTRY_REPO}. {e}" ) + def _get_all_datasites(self) -> None: + for network in self.all_networks: + network_client = NetworkRegistry.create_client(network) + datasites: list[ServerPeer] = network_client.datasites.retrieve_servers() + for datasite in datasites: + self.all_datasites[str(datasite.id)] = datasite + @property def online_networks(self) -> list[dict]: networks = self.all_networks @@ -149,14 +358,14 @@ def check_network(network: dict) -> dict[Any, Any] | None: url = "http://" + network["host_or_ip"] + ":" + str(network["port"]) + "/" try: res = requests.get(url, timeout=DEFAULT_TIMEOUT) - online = "This is a PyGrid Network node." in res.text + online = "This is a Syft Gateway server." in res.text except Exception: online = False - # networks without frontend have a /ping route in 0.7.0 + # networks without frontend if not online: try: - ping_url = url + "ping" + ping_url = url + "api/v2/" res = requests.get(ping_url, timeout=DEFAULT_TIMEOUT) online = res.status_code == 200 except Exception: @@ -178,6 +387,7 @@ def check_network(network: dict) -> dict[Any, Any] | None: except Exception: network["version"] = "unknown" return network + return None # We can use a with statement to ensure threads are cleaned up promptly @@ -187,94 +397,103 @@ def check_network(network: dict) -> dict[Any, Any] | None: executor.map(lambda network: check_network(network), networks) ) - online_networks = [] - for each in _online_networks: - if each is not None: - online_networks.append(each) - return online_networks + return [network for network in _online_networks if network is not None] @property - def online_domains(self) -> list[tuple[NodePeer, NodeMetadataJSON | None]]: - def check_domain( - peer: NodePeer, - ) -> tuple[NodePeer, NodeMetadataJSON | None] | None: - try: - guest_client = peer.guest_client - metadata = guest_client.metadata - return peer, metadata - except Exception: # nosec - pass - return None - + def online_datasites(self) -> list[tuple[ServerPeer, ServerMetadataJSON | None]]: networks = self.online_networks - self.all_domains = [] - # We can use a with statement to ensure threads are cleaned up promptly - with futures.ThreadPoolExecutor(max_workers=20) as executor: - # map - _all_online_domains = [] - for network in networks: + _all_online_datasites = [] + for network in networks: + try: network_client = NetworkRegistry.create_client(network) - domains = network_client.domains - self.all_domains += domains - _online_domains = list( - executor.map(lambda domain: check_domain(domain), domains) - ) - _all_online_domains += _online_domains - - online_domains = [] - for each in _all_online_domains: - if each is not None: - online_domains.append(each) - return online_domains + except Exception as e: + logger.error(f"Error in creating network client {e}") + continue + + datasites: list[ServerPeer] = network_client.datasites.retrieve_servers() + for datasite in datasites: + self.all_datasites[str(datasite.id)] = datasite + + _all_online_datasites += [ + (datasite, datasite.guest_client.metadata) + for datasite in datasites + if datasite.ping_status == ServerPeerConnectionStatus.ACTIVE + ] + + return [datasite for datasite in _all_online_datasites if datasite is not None] def __make_dict__(self) -> list[dict[str, Any]]: - on = self.online_domains - domains = [] - domain_dict: dict[str, Any] = {} - for domain, metadata in on: - domain_dict["name"] = domain.name + on = self.online_datasites + datasites: list[dict[str, Any]] = [] + for datasite, metadata in on: + datasite_dict: dict[str, Any] = {} + datasite_dict["name"] = datasite.name if metadata is not None: - domain_dict["organization"] = metadata.organization - domain_dict["version"] = metadata.syft_version + datasite_dict["organization"] = metadata.organization + datasite_dict["version"] = metadata.syft_version route = None - if len(domain.node_routes) > 0: - route = domain.pick_highest_priority_route() - domain_dict["host_or_ip"] = route.host_or_ip if route else "-" - domain_dict["protocol"] = route.protocol if route else "-" - domain_dict["port"] = route.port if route else "-" - domain_dict["id"] = domain.id - domains.append(domain_dict) - return domains + if len(datasite.server_routes) > 0: + route = datasite.pick_highest_priority_route() + datasite_dict["host_or_ip"] = route.host_or_ip if route else "-" + datasite_dict["protocol"] = route.protocol if route else "-" + datasite_dict["port"] = route.port if route else "-" + datasite_dict["id"] = datasite.id + datasites.append(datasite_dict) + + return datasites def _repr_html_(self) -> str: - on = self.__make_dict__() + on: list[dict[str, Any]] = self.__make_dict__() if len(on) == 0: - return "(no domains online - try syft.domains.all_domains to see offline domains)" - return pd.DataFrame(on)._repr_html_() # type: ignore + return "(no datasites online - try syft.datasites.all_datasites to see offline datasites)" + df = pd.DataFrame(on) + total_df = pd.DataFrame( + [ + [ + f"{len(on)} / {len(self.all_datasites)} (online datasites / all datasites)" + ] + + [""] * (len(df.columns) - 1) + ], + columns=df.columns, + index=["Total"], + ) + df = pd.concat([df, total_df]) + return df._repr_html_() # type: ignore def __repr__(self) -> str: - on = self.__make_dict__() + on: list[dict[str, Any]] = self.__make_dict__() if len(on) == 0: - return "(no domains online - try syft.domains.all_domains to see offline domains)" - return pd.DataFrame(on).to_string() - - def create_client(self, peer: NodePeer) -> Client: + return "(no datasites online - try syft.datasites.all_datasites to see offline datasites)" + df = pd.DataFrame(on) + total_df = pd.DataFrame( + [ + [ + f"{len(on)} / {len(self.all_datasites)} (online datasites / all datasites)" + ] + + [""] * (len(df.columns) - 1) + ], + columns=df.columns, + index=["Total"], + ) + df = pd.concat([df, total_df]) + return df._repr_html_() # type: ignore + + def create_client(self, peer: ServerPeer) -> Client: try: return peer.guest_client except Exception as e: - error(f"Failed to login to: {peer}. {e}") - raise SyftException(f"Failed to login to: {peer}. {e}") + raise SyftException(public_message=f"Failed to login to: {peer}. {e}") def __getitem__(self, key: str | int) -> Client: if isinstance(key, int): - return self.create_client(self.online_domains[key][0]) + return self.create_client(self.online_datasites[key][0]) else: - on = self.online_domains + on = self.online_datasites count = 0 - for domain, _ in on: - if domain.name == key: - return self.create_client(self.online_domains[count][0]) + for datasite, _ in on: + if datasite.name == key: + return self.create_client(self.online_datasites[count][0]) count += 1 raise KeyError(f"Invalid key: {key} for {on}") @@ -293,7 +512,7 @@ def __init__(self) -> None: enclaves_json = response.json() self.all_enclaves = enclaves_json["2.0.0"]["enclaves"] except Exception as e: - warning( + logger.warning( f"Failed to get Enclave Registry, go checkout: {ENCLAVE_REGISTRY_REPO}. {e}" ) @@ -305,7 +524,7 @@ def check_enclave(enclave: dict) -> dict[Any, Any] | None: url = "http://" + enclave["host_or_ip"] + ":" + str(enclave["port"]) + "/" try: res = requests.get(url, timeout=DEFAULT_TIMEOUT) # nosec - online = "OpenMined Enclave Node Running" in res.text + online = "OpenMined Enclave Server Running" in res.text except Exception: online = False @@ -334,10 +553,7 @@ def check_enclave(enclave: dict) -> dict[Any, Any] | None: executor.map(lambda enclave: check_enclave(enclave), enclaves) ) - online_enclaves = [] - for each in _online_enclaves: - if each is not None: - online_enclaves.append(each) + online_enclaves = [each for each in _online_enclaves if each is not None] return online_enclaves def _repr_html_(self) -> str: @@ -361,12 +577,11 @@ def create_client(enclave: dict[str, Any]) -> Client: port = int(enclave["port"]) protocol = enclave["protocol"] host_or_ip = enclave["host_or_ip"] - grid_url = GridURL(port=port, protocol=protocol, host_or_ip=host_or_ip) - client = connect(url=str(grid_url)) + server_url = ServerURL(port=port, protocol=protocol, host_or_ip=host_or_ip) + client = connect(url=str(server_url)) return client.guest() except Exception as e: - error(f"Failed to login with: {enclave}. {e}") - raise SyftException(f"Failed to login with: {enclave}. {e}") + raise SyftException(public_message=f"Failed to login with: {enclave}. {e}") def __getitem__(self, key: str | int) -> Client: if isinstance(key, int): diff --git a/packages/syft/src/syft/client/search.py b/packages/syft/src/syft/client/search.py index 37e3af2c488..f9e62354321 100644 --- a/packages/syft/src/syft/client/search.py +++ b/packages/syft/src/syft/client/search.py @@ -1,12 +1,11 @@ # stdlib +# third party + # relative from ..service.dataset.dataset import Dataset -from ..service.metadata.node_metadata import NodeMetadataJSON -from ..service.network.network_service import NodePeer from ..types.uid import UID from .client import SyftClient -from .registry import DomainRegistry class SearchResults: @@ -48,31 +47,56 @@ def client_for(self, key: Dataset | int | str | UID) -> SyftClient: dataset = self.__getitem__(key) return self._dataset_client[dataset.id] + def __len__(self) -> int: + return len(self._datasets) + + +# class Search: +# def __init__(self, datasites: DatasiteRegistry) -> None: +# self.datasites: list[tuple[ServerPeer, ServerMetadataJSON | None]] = ( +# datasites.online_datasites +# ) + +# @staticmethod +# def __search_one_server( +# peer_tuple: tuple[ServerPeer, ServerMetadataJSON], name: str +# ) -> tuple[SyftClient | None, list[Dataset]]: +# try: +# peer, server_metadata = peer_tuple +# client = peer.guest_client +# results = client.api.services.dataset.search(name=name) +# return (client, results) +# except Exception as e: # noqa +# warning = SyftWarning( +# message=f"Got exception {e} at server {server_metadata.name}" +# ) +# display(warning) +# return (None, []) + + +# def __search(self, name: str) -> list[tuple[SyftClient, list[Dataset]]]: +# with ThreadPoolExecutor(max_workers=20) as executor: +# # results: list[tuple[SyftClient | None, list[Dataset]]] = [ +# # self.__search_one_server(peer_tuple, name) for peer_tuple in self.datasites +# # ] +# results: list[tuple[SyftClient | None, list[Dataset]]] = list( +# executor.map( +# lambda peer_tuple: self.__search_one_server(peer_tuple, name), +# self.datasites, +# ) +# ) +# filtered = [(client, result) for client, result in results if client and result] + +# return filtered + +# def search(self, name: str) -> SearchResults: +# """ +# Searches for a specific dataset by name. + +# Args: +# name (str): The name of the dataset to search for. -class Search: - def __init__(self, domains: DomainRegistry): - self.domains = domains.online_domains - - @staticmethod - def __search_one_node( - peer_tuple: tuple[NodePeer, NodeMetadataJSON], name: str - ) -> tuple[SyftClient | None, list[Dataset]]: - try: - peer, _ = peer_tuple - client = peer.guest_client - results = client.api.services.dataset.search(name=name) - return (client, results) - except: # noqa - return (None, []) - - def __search(self, name: str) -> list[tuple[SyftClient, list[Dataset]]]: - results = [ - self.__search_one_node(peer_tuple, name) for peer_tuple in self.domains - ] - - # filter out SyftError - filtered = [(client, result) for client, result in results if client and result] - return filtered - - def search(self, name: str) -> SearchResults: - return SearchResults(self.__search(name)) +# Returns: +# SearchResults: An object containing the search results. +# """ +# return SearchResults(self.__search(name)) diff --git a/packages/syft/src/syft/client/sync_decision.py b/packages/syft/src/syft/client/sync_decision.py new file mode 100644 index 00000000000..30f0098bab7 --- /dev/null +++ b/packages/syft/src/syft/client/sync_decision.py @@ -0,0 +1,26 @@ +# stdlib +from enum import Enum + +# relative +from ..serde.serializable import serializable + + +@serializable(canonical_name="SyncDirection", version=1) +class SyncDirection(str, Enum): + LOW_TO_HIGH = "low_to_high" + HIGH_TO_LOW = "high_to_low" + + def to_sync_decision(self) -> "SyncDecision": + if self == SyncDirection.LOW_TO_HIGH: + return SyncDecision.LOW + elif self == SyncDirection.HIGH_TO_LOW: + return SyncDecision.HIGH + else: + raise ValueError("Invalid SyncDirection") + + +class SyncDecision(Enum): + LOW = "low" + HIGH = "high" + SKIP = "skip" + IGNORE = "ignore" diff --git a/packages/syft/src/syft/client/syncing.py b/packages/syft/src/syft/client/syncing.py index e42fa4671e5..4fd6494a8a5 100644 --- a/packages/syft/src/syft/client/syncing.py +++ b/packages/syft/src/syft/client/syncing.py @@ -1,257 +1,331 @@ # stdlib -from time import sleep + +# stdlib +from collections.abc import Collection +import logging + +# third party +from IPython.display import display # relative -from ..service.action.action_permissions import ActionObjectPermission -from ..service.action.action_permissions import ActionPermission -from ..service.action.action_permissions import StoragePermission -from ..service.code.user_code import UserCode -from ..service.job.job_stash import Job -from ..service.sync.diff_state import NodeDiff -from ..service.sync.diff_state import ObjectDiff +from ..abstract_server import ServerSideType +from ..server.credentials import SyftVerifyKey +from ..service.response import SyftSuccess +from ..service.response import SyftWarning from ..service.sync.diff_state import ObjectDiffBatch -from ..service.sync.diff_state import ResolvedSyncState -from ..service.sync.diff_state import SyncDecision +from ..service.sync.diff_state import ServerDiff +from ..service.sync.diff_state import SyncInstruction +from ..service.sync.resolve_widget import PaginatedResolveWidget +from ..service.sync.resolve_widget import ResolveWidget from ..service.sync.sync_state import SyncState +from ..types.errors import SyftException +from ..types.uid import UID +from ..util.decorators import deprecated +from ..util.util import prompt_warning_message +from .datasite_client import DatasiteClient +from .sync_decision import SyncDecision +from .sync_decision import SyncDirection + +logger = logging.getLogger(__name__) + + +def sync( + from_client: DatasiteClient, + to_client: DatasiteClient, + include_ignored: bool = False, + include_same: bool = False, + filter_by_email: str | None = None, + include_types: Collection[str | type] | None = None, + exclude_types: Collection[str | type] | None = None, + hide_usercode: bool = True, +) -> PaginatedResolveWidget | SyftSuccess: + diff = compare_clients( + from_client=from_client, + to_client=to_client, + include_ignored=include_ignored, + include_same=include_same, + filter_by_email=filter_by_email, + include_types=include_types, + exclude_types=exclude_types, + hide_usercode=hide_usercode, + ) + if diff.low_state.errors: + error_list_text = "
    ".join("- " + x for x in diff.low_state.errors.values()) + warning = SyftWarning( + message=( + f"Server {to_client.name} had the following errors while trying to retrieve its sync state. " + + f"Objects corresponding to these errors will be ignored in comparison.
    {error_list_text}" + ) + ) + display(warning) + if diff.high_state.errors: + error_list_text = "
    ".join("- " + x for x in diff.high_state.errors.values()) + warning = SyftWarning( + message=( + f"Server {to_client.name} had the following errors while trying to retrieve its sync state. " + + f"Objects corresponding to these errors will be ignored in comparison.
    {error_list_text}" + ) + ) + display(warning) + + return diff.resolve() + + +def compare_states( + from_state: SyncState, + to_state: SyncState, + include_ignored: bool = False, + include_same: bool = False, + filter_by_email: str | None = None, + include_types: Collection[str | type] | None = None, + exclude_types: Collection[str | type] | None = None, + hide_usercode: bool = True, +) -> ServerDiff: + # ServerDiff + if ( + from_state.server_side_type == ServerSideType.LOW_SIDE + and to_state.server_side_type == ServerSideType.HIGH_SIDE + ): + low_state = from_state + high_state = to_state + direction = SyncDirection.LOW_TO_HIGH + elif ( + from_state.server_side_type == ServerSideType.HIGH_SIDE + and to_state.server_side_type == ServerSideType.LOW_SIDE + ): + low_state = to_state + high_state = from_state + direction = SyncDirection.HIGH_TO_LOW + else: + raise SyftException( + public_message="Invalid server side types: can only compare a high and low server" + ) -def compare_states(low_state: SyncState, high_state: SyncState) -> NodeDiff: - return NodeDiff.from_sync_state(low_state=low_state, high_state=high_state) - - -def get_user_input_for_resolve() -> str | None: - print( - "Do you want to keep the low state or the high state for these objects? choose 'low' or 'high'" + if hide_usercode: + prompt_warning_message( + "UserCodes are hidden by default, and are part of the Requests." + " If you want to include them as separate objects, set `hide_usercode=False`" + ) + exclude_types = exclude_types or [] + exclude_types.append("usercode") + + return ServerDiff.from_sync_state( + low_state=low_state, + high_state=high_state, + direction=direction, + include_ignored=include_ignored, + include_same=include_same, + filter_by_email=filter_by_email, + include_types=include_types, + exclude_types=exclude_types, ) - while True: - decision = input() - decision = decision.lower() - if decision in ["low", "high"]: - return decision - else: - print("Please choose between `low` or `high`") +def compare_clients( + from_client: DatasiteClient, + to_client: DatasiteClient, + include_ignored: bool = False, + include_same: bool = False, + filter_by_email: str | None = None, + include_types: Collection[str | type] | None = None, + exclude_types: Collection[str | type] | None = None, + hide_usercode: bool = True, +) -> ServerDiff: + from_state = from_client.get_sync_state() + to_state = to_client.get_sync_state() + return compare_states( + from_state=from_state, + to_state=to_state, + include_ignored=include_ignored, + include_same=include_same, + filter_by_email=filter_by_email, + include_types=include_types, + exclude_types=exclude_types, + hide_usercode=hide_usercode, + ) def resolve( - state: NodeDiff, - decision: str | None = None, - share_private_objects: bool = False, - ask_for_input: bool = True, -) -> tuple[ResolvedSyncState, ResolvedSyncState]: - # TODO: only add permissions for objects where we manually give permission - # Maybe default read permission for some objects (high -> low) - resolved_state_low = ResolvedSyncState(node_uid=state.low_node_uid, alias="low") - resolved_state_high = ResolvedSyncState(node_uid=state.high_node_uid, alias="high") - - for batch_diff in state.hierarchies: - batch_decision = decision - if all(diff.status == "SAME" for diff in batch_diff.diffs): - # Hierarchy has no diffs - continue - - print(batch_diff.__repr__()) + obj: ObjectDiffBatch | ServerDiff, +) -> ResolveWidget | PaginatedResolveWidget | SyftSuccess: + if not isinstance(obj, ObjectDiffBatch | ServerDiff): + raise ValueError( + f"Invalid type: could not resolve object with type {type(obj).__qualname__}" + ) + return obj.resolve() + + +@deprecated(reason="resolve_single has been renamed to resolve", return_syfterror=True) +def resolve_single( + obj_diff_batch: ObjectDiffBatch, +) -> ResolveWidget | PaginatedResolveWidget | SyftSuccess: + return resolve(obj_diff_batch) + + +def handle_sync_batch( + obj_diff_batch: ObjectDiffBatch, + share_private_data: dict[UID, bool], + mockify: dict[UID, bool], +) -> SyftSuccess: + # Infer SyncDecision + sync_direction = obj_diff_batch.sync_direction + if sync_direction is None: + raise SyftException( + message="Cannot sync an object without a specified sync direction." + ) - if batch_decision is None: - batch_decision = get_user_input_for_resolve() + decision = sync_direction.to_sync_decision() - sync_decisions: list[SyncDecision] = get_sync_decisions_for_batch_items( - batch_diff, - batch_decision, - share_private_objects=share_private_objects, - ask_for_input=ask_for_input, + # Validate decision + if decision not in [SyncDecision.LOW, SyncDecision.HIGH]: + raise SyftException(public_message="Invalid sync decision") + elif obj_diff_batch.is_unchanged: + return SyftSuccess(message="No changes to sync") + elif obj_diff_batch.decision is SyncDecision.IGNORE: + raise SyftException( + public_message="Attempted to sync an ignored object, please unignore first" + ) + elif obj_diff_batch.decision is not None: + raise SyftException( + public_message="Attempted to sync an object that has already been synced" ) - print(f"Decision: Syncing {len(batch_diff)} objects from {batch_decision} side") + src_client = obj_diff_batch.source_client + tgt_client = obj_diff_batch.target_client + src_resolved_state, tgt_resolved_state = obj_diff_batch.create_new_resolved_states() - for sync_decision in sync_decisions: - resolved_state_low.add_sync_decision(sync_decision) - resolved_state_high.add_sync_decision(sync_decision) + obj_diff_batch.decision = decision - print() - print("=" * 100) - print() + sync_instructions = [] + for diff in obj_diff_batch.get_dependencies(include_roots=True): + # figure out the right verify key to share to + # in case of a job with user code, share to user code owner + # without user code, share to job owner + share_to_user: SyftVerifyKey | None = ( + getattr(obj_diff_batch.user_code_high, "user_verify_key", None) + or obj_diff_batch.user_verify_key_high + ) + share_private_data_for_diff = share_private_data[diff.object_id] + mockify_for_diff = mockify[diff.object_id] + instruction = SyncInstruction.from_batch_decision( + diff=diff, + share_private_data=share_private_data_for_diff, + mockify=mockify_for_diff, + sync_direction=sync_direction, + decision=decision, + share_to_user=share_to_user, + ) + sync_instructions.append(instruction) + + logger.debug(f"Decision: Syncing {len(sync_instructions)} objects") + + # Apply sync instructions to target side + for sync_instruction in sync_instructions: + tgt_resolved_state.add_sync_instruction(sync_instruction) + src_resolved_state.add_sync_instruction(sync_instruction) + # Apply empty state to source side to signal that we are done syncing + # We also add permissions for users from the low side to mark L0 request as approved + src_client.apply_state(src_resolved_state) + return tgt_client.apply_state(tgt_resolved_state) + + +def handle_ignore_batch( + obj_diff_batch: ObjectDiffBatch, + all_batches: list[ObjectDiffBatch], +) -> SyftSuccess: + if obj_diff_batch.decision is SyncDecision.IGNORE: + return SyftSuccess(message="This batch is already ignored") + elif obj_diff_batch.decision is not None: + raise SyftException( + message="Attempted to sync an object that has already been synced" + ) - return resolved_state_low, resolved_state_high + obj_diff_batch.decision = SyncDecision.IGNORE + other_batches = [b for b in all_batches if b is not obj_diff_batch] + other_ignore_batches = get_other_ignore_batches(obj_diff_batch, other_batches) + for other_batch in other_ignore_batches: + other_batch.decision = SyncDecision.IGNORE + logger.debug(f"Ignoring other batch with root {other_batch.root_type.__name__}") -def get_sync_decisions_for_batch_items( - batch_diff: ObjectDiffBatch, - decision: str, - share_private_objects: bool = False, - ask_for_input: bool = True, -) -> list[SyncDecision]: - sync_decisions: list[SyncDecision] = [] + src_client = obj_diff_batch.source_client + tgt_client = obj_diff_batch.target_client + src_resolved_state, tgt_resolved_state = obj_diff_batch.create_new_resolved_states() - unpublished_private_high_diffs: list[ObjectDiff] = [] - for diff in batch_diff.diffs: - is_high_private_object = ( - diff.high_obj is not None and diff.high_obj._has_private_sync_attrs() - ) - is_low_published_object = diff.low_node_uid in diff.low_storage_permissions - if is_high_private_object and not is_low_published_object: - unpublished_private_high_diffs.append(diff) - - user_codes_high: list[UserCode] = [ - diff.high_obj - for diff in batch_diff.diffs - if isinstance(diff.high_obj, UserCode) - ] - if len(user_codes_high) > 1: - raise ValueError("too many user codes") - if len(user_codes_high) == 0: - user_code_high = None - else: - user_code_high = user_codes_high[0] + for batch in [obj_diff_batch] + other_ignore_batches: + src_resolved_state.add_ignored(batch) + tgt_resolved_state.add_ignored(batch) - if user_code_high is None and len(unpublished_private_high_diffs): - raise ValueError("Found unpublished private objects without user code") + src_client.apply_state(src_resolved_state) + return tgt_client.apply_state(tgt_resolved_state) - if share_private_objects: - private_high_diffs_to_share = unpublished_private_high_diffs - elif ask_for_input: - private_high_diffs_to_share = ask_user_input_permission( - user_code_high, unpublished_private_high_diffs - ) - else: - private_high_diffs_to_share = [] - - for diff in batch_diff.diffs: - is_unpublished_private_diff = diff in unpublished_private_high_diffs - has_share_decision = diff in private_high_diffs_to_share - - if isinstance(diff.high_obj, Job): - if user_code_high is None: - raise ValueError("Job without user code") - # Jobs are always shared - new_permissions_low_side = [ - ActionObjectPermission( - uid=diff.object_id, - permission=ActionPermission.READ, - credentials=user_code_high.user_verify_key, - ) - ] - mockify = False - - elif is_unpublished_private_diff and has_share_decision: - # private + want to share - new_permissions_low_side = [ - ActionObjectPermission( - uid=diff.object_id, - permission=ActionPermission.READ, - credentials=user_code_high.user_verify_key, # type: ignore - ) - ] - mockify = False - - elif is_unpublished_private_diff and not has_share_decision: - # private + do not share - new_permissions_low_side = [] - mockify = True - - else: - # any other object is shared - new_permissions_low_side = [] - mockify = False - - new_storage_permissions_lowside = [] - if not mockify: - new_storage_permissions_lowside = [ - StoragePermission(uid=diff.object_id, node_uid=diff.low_node_uid) - ] - - # Always share to high_side - if diff.status == "NEW" and diff.high_obj is None: - new_storage_permissions_highside = [ - StoragePermission(uid=diff.object_id, node_uid=diff.high_node_uid) - ] - else: - new_storage_permissions_highside = [] - - sync_decisions.append( - SyncDecision( - diff=diff, - decision=decision, - new_permissions_lowside=new_permissions_low_side, - new_storage_permissions_lowside=new_storage_permissions_lowside, - new_storage_permissions_highside=new_storage_permissions_highside, - mockify=mockify, - ) - ) - return sync_decisions +def handle_unignore_batch( + obj_diff_batch: ObjectDiffBatch, + all_batches: list[ObjectDiffBatch], +) -> SyftSuccess: + src_client = obj_diff_batch.source_client + tgt_client = obj_diff_batch.target_client + src_resolved_state, tgt_resolved_state = obj_diff_batch.create_new_resolved_states() + obj_diff_batch.decision = None + src_resolved_state.add_unignored(obj_diff_batch.root_id) + tgt_resolved_state.add_unignored(obj_diff_batch.root_id) -QUESTION_SHARE_PRIVATE_OBJS = """You currently have the following private objects: + # Unignore dependencies + other_batches = [b for b in all_batches if b is not obj_diff_batch] + other_unignore_batches = get_other_unignore_batches(obj_diff_batch, other_batches) + for other_batch in other_unignore_batches: + logger.debug(f"Ignoring other batch with root {other_batch.root_type.__name__}") + other_batch.decision = None + src_resolved_state.add_unignored(other_batch.root_id) + tgt_resolved_state.add_unignored(other_batch.root_id) -{objects_str} + src_client.apply_state(src_resolved_state) + return tgt_client.apply_state(tgt_resolved_state) -Do you want to share some of these private objects? If so type the first 3 characters of the id e.g. 'abc'. -If you dont want to share any more private objects, type "no" -""" -CONFIRMATION_SHARE_PRIVATE_OBJ = """Setting permissions for {object_type} #{object_id} to share with {user_verify_key}, -this will become effective when you call client.apply_state()) -""" +def get_other_unignore_batches( + batch: ObjectDiffBatch, + other_batches: list[ObjectDiffBatch], +) -> list[ObjectDiffBatch]: + if batch.decision is not None: + return [] + other_unignore_batches = [] + required_dependencies = { + d.object_id for d in batch.get_dependencies(include_roots=True) + } -def ask_user_input_permission( - user_code: UserCode, all_private_high_diffs: list[ObjectDiff] -) -> list[ObjectDiff]: - if len(all_private_high_diffs) == 0: + for other_batch in other_batches: + if other_batch == batch: + continue + elif ( + other_batch.decision == SyncDecision.IGNORE + and other_batch.root_id in required_dependencies + ): + other_unignore_batches.append(other_batch) + return other_unignore_batches + + +def get_other_ignore_batches( + batch: ObjectDiffBatch, + other_batches: list[ObjectDiffBatch], +) -> list[ObjectDiffBatch]: + if batch.decision != SyncDecision.IGNORE: return [] - user_verify_key = user_code.user_verify_key - private_high_diffs_to_share = [] - print( - f"""This batch of updates contains new private objects on the high side that you may want \ - to share with user {user_verify_key}.""" - ) - - remaining_private_high_diffs = all_private_high_diffs[:] - while len(remaining_private_high_diffs): - objects_str = "\n".join( - [ - f"{diff.object_type} #{diff.object_id}" - for diff in remaining_private_high_diffs - ] - ) - print(QUESTION_SHARE_PRIVATE_OBJS.format(objects_str=objects_str), flush=True) - - sleep(0.1) - res = input() - if res == "no": - break - elif len(res) >= 3: - matches = [ - diff - for diff in remaining_private_high_diffs - if str(diff.object_id).startswith(res) - ] - if len(matches) == 0: - print("Invalid input") - continue - elif len(matches) == 1: - diff = matches[0] - print() - print("=" * 100) - print() - print( - CONFIRMATION_SHARE_PRIVATE_OBJ.format( - object_type=diff.object_type, - object_id=diff.object_id, - user_verify_key=user_verify_key, - ) - ) - - remaining_private_high_diffs.remove(diff) - private_high_diffs_to_share.append(diff) - - else: - print("Found multiple matches for provided id, exiting") - break - else: - print("invalid input") - - return private_high_diffs_to_share + other_ignore_batches = [] + ignored_ids = {x.object_id for x in batch.get_dependents(include_roots=False)} + for other_batch in other_batches: + if other_batch.decision != SyncDecision.IGNORE: + # Currently, this is not recursive, in the future it might be + other_batch_ids = { + d.object_id for d in other_batch.get_dependencies(include_roots=True) + } + if len(other_batch_ids & ignored_ids) != 0: + other_ignore_batches.append(other_batch) + ignored_ids.update(other_batch_ids) + + return other_ignore_batches diff --git a/packages/syft/src/syft/custom_worker/builder.py b/packages/syft/src/syft/custom_worker/builder.py index 1df2506e5db..e47f341a27f 100644 --- a/packages/syft/src/syft/custom_worker/builder.py +++ b/packages/syft/src/syft/custom_worker/builder.py @@ -22,8 +22,8 @@ class CustomWorkerBuilder: TYPE_CPU = "cpu" TYPE_GPU = "gpu" - TEMPLATE_DIR_PROD = os.path.expandvars("$APPDIR/grid/") - TEMPLATE_DIR_DEV = "../../../../../grid/backend/" + TEMPLATE_DIR_PROD = os.path.expandvars("$APPDIR/grid/images/") + TEMPLATE_DIR_DEV = "../../../../../grid/backend/grid/images/" CUSTOM_IMAGE_PREFIX = "custom-worker" @@ -102,7 +102,7 @@ def _build_template( type = self.TYPE_GPU if config.build.gpu else self.TYPE_CPU - dockerfile_path = self._find_template_path(type) + dockerfile_path = self.find_worker_image(type) imgtag = config.get_signature()[:8] @@ -119,11 +119,11 @@ def _build_template( buildargs=build_args, ) - def _find_template_path(self, type: str) -> Path: + def find_worker_image(self, type: str) -> Path: """ Find the Worker Dockerfile and it's context path - PROD will be in `$APPDIR/grid/` - - DEV will be in `packages/grid/backend` + - DEV will be in `packages/grid/backend/grid/images` - In both the cases context dir does not matter (unless we're calling COPY) Args: diff --git a/packages/syft/src/syft/custom_worker/builder_docker.py b/packages/syft/src/syft/custom_worker/builder_docker.py index d08ee824e49..2e544f3842f 100644 --- a/packages/syft/src/syft/custom_worker/builder_docker.py +++ b/packages/syft/src/syft/custom_worker/builder_docker.py @@ -41,6 +41,8 @@ def build_image( tag=tag, timeout=BUILD_IMAGE_TIMEOUT_SEC, buildargs=buildargs, + rm=True, + labels={"orgs.openmined.syft": f"Build image {tag}"}, **kwargs, ) return ImageBuildResult( @@ -53,9 +55,9 @@ def build_image( def push_image( self, tag: str, - username: str, - password: str, registry_url: str, + username: str | None = None, + password: str | None = None, **kwargs: Any, ) -> ImagePushResult: with contextlib.closing(docker.from_env()) as client: diff --git a/packages/syft/src/syft/custom_worker/builder_k8s.py b/packages/syft/src/syft/custom_worker/builder_k8s.py index 4deeb309751..0d72e55decf 100644 --- a/packages/syft/src/syft/custom_worker/builder_k8s.py +++ b/packages/syft/src/syft/custom_worker/builder_k8s.py @@ -1,6 +1,7 @@ # stdlib from hashlib import sha256 from pathlib import Path +from secrets import token_hex from typing import Any # third party @@ -9,28 +10,33 @@ from kr8s.objects import Secret # relative +from ..types.errors import SyftException from .builder_types import BUILD_IMAGE_TIMEOUT_SEC from .builder_types import BuilderBase from .builder_types import ImageBuildResult from .builder_types import ImagePushResult from .builder_types import PUSH_IMAGE_TIMEOUT_SEC from .k8s import INTERNAL_REGISTRY_HOST -from .k8s import JOB_COMPLETION_TTL +from .k8s import KANIKO_VERSION from .k8s import KUBERNETES_NAMESPACE from .k8s import KubeUtils +from .k8s import USE_INTERNAL_REGISTRY from .k8s import get_kr8s_client from .utils import ImageUtils __all__ = ["KubernetesBuilder"] -class BuildFailed(Exception): - pass - - class KubernetesBuilder(BuilderBase): + # app.kubernetes.io/component COMPONENT = "builder" + # service account for the Job, useful for workload identity + SERVICE_ACCOUNT = "builder-service-account" + + # Time after which Job will be deleted + JOB_COMPLETION_TTL = 60 + def __init__(self) -> None: self.client = get_kr8s_client() @@ -46,12 +52,23 @@ def build_image( logs = None config = None job_id = self._new_job_id(tag) + kaniko_extra_args = None if dockerfile: pass elif dockerfile_path: dockerfile = dockerfile_path.read_text() + if USE_INTERNAL_REGISTRY: + # if we are using internal registry, we tweak the tag to point to it + tag = ImageUtils.change_registry(tag, registry=INTERNAL_REGISTRY_HOST) + + # and let kaniko know about the internal registry + kaniko_extra_args = [ + f"--insecure-registry={INTERNAL_REGISTRY_HOST}", + f"--skip-tls-verify-registry={INTERNAL_REGISTRY_HOST}", + ] + try: # Create a ConfigMap with the Dockerfile config = self._create_build_config(job_id, dockerfile) @@ -63,6 +80,7 @@ def build_image( tag=tag, job_config=config, build_args=buildargs, + kaniko_extra_args=kaniko_extra_args, ) # wait for job to complete/fail @@ -77,10 +95,12 @@ def build_image( image_digest = self._get_image_digest(job) if not image_digest: exit_code = self._get_exit_code(job) - raise BuildFailed( - "Failed to build the image. " - f"Kaniko exit code={exit_code}. " - f"Logs={logs}" + raise SyftException( + public_message=( + "Failed to build the image." + f" Kaniko exit code={exit_code}." + f" Logs={logs}" + ) ) except Exception: @@ -97,22 +117,33 @@ def build_image( def push_image( self, tag: str, - username: str, - password: str, registry_url: str, + username: str | None = None, + password: str | None = None, **kwargs: Any, ) -> ImagePushResult: exit_code = 1 logs = None job_id = self._new_job_id(tag) push_secret = None + registry_auths = [] + + if USE_INTERNAL_REGISTRY: + # local registry auth can be anything + registry_auths.append((INTERNAL_REGISTRY_HOST, "admin", token_hex(4))) + else: + # kaniko has already pushed the image directly + return ImagePushResult(logs="Already pushed", exit_code=0) + + # if we have external registry credentials, add them to the list + # elsea leave it for workload identity + if username and password: + registry_auths.append((registry_url, username, password)) try: push_secret = self._create_push_secret( - id=job_id, - url=registry_url, - username=username, - password=password, + job_id=job_id, + registry_auths=registry_auths, ) push_secret.refresh() @@ -142,17 +173,17 @@ def _get_tag_hash(self, tag: str) -> str: return sha256(tag.encode()).hexdigest() def _get_image_digest(self, job: Job) -> str | None: - selector = {"batch.kubernetes.io/job-name": job.metadata.name} + selector = {"job-name": job.metadata.name} pods = self.client.get("pods", label_selector=selector) return KubeUtils.get_container_exit_message(pods) def _get_exit_code(self, job: Job) -> list[int]: - selector = {"batch.kubernetes.io/job-name": job.metadata.name} + selector = {"job-name": job.metadata.name} pods = self.client.get("pods", label_selector=selector) return KubeUtils.get_container_exit_code(pods) def _get_logs(self, job: Job) -> str: - selector = {"batch.kubernetes.io/job-name": job.metadata.name} + selector = {"job-name": job.metadata.name} pods = self.client.get("pods", label_selector=selector) return KubeUtils.get_logs(pods) @@ -180,13 +211,13 @@ def _create_kaniko_build_job( tag: str, job_config: ConfigMap, build_args: dict | None = None, + kaniko_extra_args: list[str] | None = None, ) -> Job: # for push build_args = build_args or {} + kaniko_extra_args = kaniko_extra_args or [] build_args_list = [] - internal_tag = ImageUtils.change_registry(tag, registry=INTERNAL_REGISTRY_HOST) - for k, v in build_args.items(): build_args_list.append(f'--build-arg="{k}={v}"') @@ -202,35 +233,34 @@ def _create_kaniko_build_job( }, "spec": { "backoffLimit": 0, - "ttlSecondsAfterFinished": JOB_COMPLETION_TTL, + "ttlSecondsAfterFinished": KubernetesBuilder.JOB_COMPLETION_TTL, "template": { "spec": { "restartPolicy": "Never", + "serviceAccountName": KubernetesBuilder.SERVICE_ACCOUNT, "containers": [ { "name": "kaniko", - "image": "gcr.io/kaniko-project/executor:latest", + "image": f"gcr.io/kaniko-project/executor:{KANIKO_VERSION}", "args": [ "--dockerfile=Dockerfile", "--context=dir:///workspace", - f"--destination={internal_tag}", + f"--destination={tag}", # Disabling --reproducible because it eats up a lot of CPU+RAM # https://github.com/GoogleContainerTools/kaniko/issues/1960 # https://github.com/GoogleContainerTools/kaniko/pull/2477 # "--reproducible", - # cache args + # Cache "--cache=true", "--cache-copy-layers", "--cache-run-layers", - f"--cache-repo={INTERNAL_REGISTRY_HOST}/builder-cache", # outputs args "--digest-file=/dev/termination-log", # other kaniko conf - f"--insecure-registry={INTERNAL_REGISTRY_HOST}", - f"--skip-tls-verify-registry={INTERNAL_REGISTRY_HOST}", "--log-format=text", "--verbosity=info", ] + + kaniko_extra_args + build_args_list, "volumeMounts": [ { @@ -238,17 +268,6 @@ def _create_kaniko_build_job( "mountPath": "/workspace", }, ], - "resources": { - "requests": { - "memory": "4Gi", - "cpu": "2", - }, - "limits": { - "memory": "16Gi", - "cpu": "4", - }, - "ephemeral-storage": "10Gi", - }, } ], "volumes": [ @@ -279,11 +298,11 @@ def _create_push_job( run_cmds = [ # push with credentials "echo Pushing image...", - f"crane copy {internal_tag} {tag}", + f"krane copy {internal_tag} {tag}", # cleanup image from internal registry "echo Cleaning up...", - f"IMG_DIGEST=$(crane digest {internal_tag})", - f"crane delete {internal_reg}/{internal_repo}@$IMG_DIGEST; echo Done", + f"IMG_DIGEST=$(krane digest {internal_tag})", + f"krane delete {internal_reg}/{internal_repo}@$IMG_DIGEST; echo Done", ] job = Job( @@ -299,15 +318,16 @@ def _create_push_job( }, "spec": { "backoffLimit": 0, - "ttlSecondsAfterFinished": JOB_COMPLETION_TTL, + "ttlSecondsAfterFinished": KubernetesBuilder.JOB_COMPLETION_TTL, "template": { "spec": { "restartPolicy": "Never", + "serviceAccountName": KubernetesBuilder.SERVICE_ACCOUNT, "containers": [ { "name": "crane", # debug is needed for "sh" to be available - "image": "gcr.io/go-containerregistry/crane:debug", + "image": "gcr.io/go-containerregistry/krane:debug", "command": ["sh"], "args": ["-c", " && ".join(run_cmds)], "volumeMounts": [ @@ -318,17 +338,6 @@ def _create_push_job( "readOnly": True, }, ], - "resources": { - "requests": { - "memory": "2Gi", - "cpu": "1", - }, - "limits": { - "memory": "4Gi", - "cpu": "2", - "ephemeral-storage": "1Gi", - }, - }, } ], "volumes": [ @@ -353,14 +362,12 @@ def _create_push_job( return KubeUtils.create_or_get(job) def _create_push_secret( - self, id: str, url: str, username: str, password: str + self, + job_id: str, + registry_auths: list[tuple[str, str, str]], ) -> Secret: return KubeUtils.create_dockerconfig_secret( - secret_name=f"push-secret-{id}", + secret_name=f"push-secret-{job_id}", component=KubernetesBuilder.COMPONENT, - registries=[ - # TODO: authorize internal registry? - (INTERNAL_REGISTRY_HOST, "username", id), - (url, username, password), - ], + registries=registry_auths, ) diff --git a/packages/syft/src/syft/custom_worker/builder_types.py b/packages/syft/src/syft/custom_worker/builder_types.py index 386e0c5539b..c7f34ec395d 100644 --- a/packages/syft/src/syft/custom_worker/builder_types.py +++ b/packages/syft/src/syft/custom_worker/builder_types.py @@ -29,6 +29,10 @@ class ImagePushResult(BaseModel): logs: str exit_code: int + @property + def has_failed(self) -> bool: + return self.exit_code != 0 + class BuilderBase(ABC): @abstractmethod @@ -46,9 +50,9 @@ def build_image( def push_image( self, tag: str, - username: str, - password: str, registry_url: str, + username: str | None = None, + password: str | None = None, **kwargs: Any, ) -> ImagePushResult: pass diff --git a/packages/syft/src/syft/custom_worker/config.py b/packages/syft/src/syft/custom_worker/config.py index 5e9522c2b88..6410c990eac 100644 --- a/packages/syft/src/syft/custom_worker/config.py +++ b/packages/syft/src/syft/custom_worker/config.py @@ -14,9 +14,10 @@ # relative from ..serde.serializable import serializable -from ..service.response import SyftError +from ..serde.serialize import _serialize from ..service.response import SyftSuccess from ..types.base import SyftBaseModel +from ..types.errors import SyftException from .utils import iterator_to_string PYTHON_DEFAULT_VER = "3.12" @@ -79,11 +80,16 @@ def merged_custom_cmds(self, sep: str = ";") -> str: return sep.join(self.custom_cmds) +@serializable(canonical_name="WorkerConfig", version=1) class WorkerConfig(SyftBaseModel): pass + def hash(self) -> str: + _bytes = _serialize(self, to_bytes=True, for_hashing=True) + return sha256(_bytes).digest().hex() -@serializable() + +@serializable(canonical_name="CustomWorkerConfig", version=1) class CustomWorkerConfig(WorkerConfig): build: CustomBuildConfig version: str = "1" @@ -107,7 +113,7 @@ def get_signature(self) -> str: return sha256(self.json(sort_keys=True).encode()).hexdigest() -@serializable() +@serializable(canonical_name="PrebuiltWorkerConfig", version=1) class PrebuiltWorkerConfig(WorkerConfig): # tag that is already built and pushed in some registry tag: str @@ -122,8 +128,11 @@ def __str__(self) -> str: def set_description(self, description_text: str) -> None: self.description = description_text + def __hash__(self) -> int: + return hash(self.tag) + -@serializable() +@serializable(canonical_name="DockerWorkerConfig", version=1) class DockerWorkerConfig(WorkerConfig): dockerfile: str file_name: str | None = None @@ -164,19 +173,26 @@ def __str__(self) -> str: def set_description(self, description_text: str) -> None: self.description = description_text - def test_image_build(self, tag: str, **kwargs: Any) -> SyftSuccess | SyftError: + def test_image_build(self, tag: str, **kwargs: Any) -> SyftSuccess: try: with contextlib.closing(docker.from_env()) as client: if not client.ping(): - return SyftError( + raise SyftException( "Cannot reach docker server. Please check if docker is running." ) kwargs["fileobj"] = io.BytesIO(self.dockerfile.encode("utf-8")) _, logs = client.images.build( tag=tag, + rm=True, + labels={"orgs.openmined.syft": "Test image build"}, **kwargs, ) return SyftSuccess(message=iterator_to_string(iterator=logs)) except Exception as e: - return SyftError(message=f"Failed to build: {e}") + # stdlib + import traceback + + raise SyftException( + public_message=f"Failed to build: {e} {traceback.format_exc()}" + ) diff --git a/packages/syft/src/syft/custom_worker/k8s.py b/packages/syft/src/syft/custom_worker/k8s.py index 54224456e58..319b691db50 100644 --- a/packages/syft/src/syft/custom_worker/k8s.py +++ b/packages/syft/src/syft/custom_worker/k8s.py @@ -9,20 +9,25 @@ # third party import kr8s from kr8s.objects import APIObject +from kr8s.objects import ConfigMap from kr8s.objects import Pod from kr8s.objects import Secret +from kr8s.objects import Service from pydantic import BaseModel from typing_extensions import Self -# Time after which Job will be deleted -JOB_COMPLETION_TTL = 60 - # Kubernetes namespace KUBERNETES_NAMESPACE = os.getenv("K8S_NAMESPACE", "syft") # Kubernetes runtime flag IN_KUBERNETES = os.getenv("CONTAINER_HOST") == "k8s" +# skip pushing to internal registry +USE_INTERNAL_REGISTRY = os.getenv("USE_INTERNAL_REGISTRY", "true").lower() == "true" + +# Kaniko version +KANIKO_VERSION = os.getenv("KANIKO_VERSION", "latest") + # Internal registry URL DEFAULT_INTERNAL_REGISTRY = f"registry.{KUBERNETES_NAMESPACE}.svc.cluster.local" INTERNAL_REGISTRY_HOST = os.getenv("INTERNAL_REGISTRY_HOST", DEFAULT_INTERNAL_REGISTRY) @@ -120,14 +125,11 @@ def resolve_pod(client: kr8s.Api, pod: str | Pod) -> Pod | None: @staticmethod def get_logs(pods: list[Pod]) -> str: - """Combine and return logs for all the pods as string""" - logs = [] - for pod in pods: - logs.append(f"----------Logs for pod={pod.metadata.name}----------") - for log in pod.logs(): - logs.append(log) - - return "\n".join(logs) + """Combine and return logs for all the pods as a single string.""" + return "\n".join( + f"----------Logs for pod={pod.metadata.name}----------\n{''.join(pod.logs())}" + for pod in pods + ) @staticmethod def get_pod_status(pod: Pod) -> PodStatus | None: @@ -150,11 +152,11 @@ def get_pod_env(pod: Pod) -> list[dict] | None: @staticmethod def get_container_exit_code(pods: list[Pod]) -> list[int]: """Return the exit codes of all the containers in the given pods.""" - exit_codes = [] - for pod in pods: - for container_status in pod.status.containerStatuses: - exit_codes.append(container_status.state.terminated.exitCode) - return exit_codes + return [ + container_status.state.terminated.exitCode + for pod in pods + for container_status in pod.status.containerStatuses + ] @staticmethod def get_container_exit_message(pods: list[Pod]) -> str | None: @@ -171,6 +173,25 @@ def b64encode_secret(data: str) -> str: """Convert the data to base64 encoded string for Secret.""" return base64.b64encode(data.encode()).decode() + @staticmethod + def get_configmap(client: kr8s.Api, name: str) -> ConfigMap | None: + config_map = client.get("configmaps", name) + return config_map[0] if config_map else None + + @staticmethod + def get_service(client: kr8s.Api, name: str) -> Service | None: + service = client.get("services", name) + return service[0] if service else None + + @staticmethod + def update_configmap( + config_map: ConfigMap, + patch: dict, + ) -> None: + existing_data = config_map.raw + existing_data.update(patch) + config_map.patch(patch=existing_data) + @staticmethod def create_dockerconfig_secret( secret_name: str, diff --git a/packages/syft/src/syft/custom_worker/runner_k8s.py b/packages/syft/src/syft/custom_worker/runner_k8s.py index 81f18c02983..e320bae8e94 100644 --- a/packages/syft/src/syft/custom_worker/runner_k8s.py +++ b/packages/syft/src/syft/custom_worker/runner_k8s.py @@ -13,7 +13,7 @@ from .k8s import get_kr8s_client JSONPATH_AVAILABLE_REPLICAS = "{.status.availableReplicas}" -CREATE_POOL_TIMEOUT_SEC = 60 +CREATE_POOL_TIMEOUT_SEC = 380 SCALE_POOL_TIMEOUT_SEC = 60 @@ -28,19 +28,21 @@ def create_pool( replicas: int = 1, env_vars: list[dict] | None = None, mount_secrets: dict | None = None, - reg_username: str | None = None, - reg_password: str | None = None, + registry_username: str | None = None, + registry_password: str | None = None, reg_url: str | None = None, + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, **kwargs: Any, ) -> StatefulSet: try: # create pull secret if registry credentials are passed pull_secret = None - if reg_username and reg_password and reg_url: + if registry_username and registry_password and reg_url: pull_secret = self._create_image_pull_secret( pool_name, - reg_username, - reg_password, + registry_username, + registry_password, reg_url, ) @@ -52,6 +54,8 @@ def create_pool( env_vars=env_vars, mount_secrets=mount_secrets, pull_secret=pull_secret, + pod_annotations=pod_annotations, + pod_labels=pod_labels, **kwargs, ) @@ -60,8 +64,6 @@ def create_pool( f"jsonpath='{JSONPATH_AVAILABLE_REPLICAS}'={replicas}", timeout=CREATE_POOL_TIMEOUT_SEC, ) - except Exception: - raise finally: if pull_secret: pull_secret.delete(propagation_policy="Foreground") @@ -71,12 +73,13 @@ def create_pool( def scale_pool(self, pool_name: str, replicas: int) -> StatefulSet | None: deployment = self.get_pool(pool_name) + timeout = max(SCALE_POOL_TIMEOUT_SEC * replicas, SCALE_POOL_TIMEOUT_SEC) if not deployment: return None deployment.scale(replicas) deployment.wait( f"jsonpath='{JSONPATH_AVAILABLE_REPLICAS}'={replicas}", - timeout=SCALE_POOL_TIMEOUT_SEC, + timeout=timeout, ) return deployment @@ -93,9 +96,11 @@ def delete_pool(self, pool_name: str) -> bool: selector = {"app.kubernetes.io/component": pool_name} for _set in self.client.get("statefulsets", label_selector=selector): _set.delete(propagation_policy="Foreground") + _set.wait(conditions="delete") for _secret in self.client.get("secrets", label_selector=selector): _secret.delete(propagation_policy="Foreground") + _secret.wait(conditions="delete") return True @@ -128,8 +133,8 @@ def get_pod_env_vars(self, pod: str | Pod) -> list[dict] | None: def _create_image_pull_secret( self, pool_name: str, - reg_username: str, - reg_password: str, + registry_username: str, + registry_password: str, reg_url: str, **kwargs: Any, ) -> Secret: @@ -137,7 +142,7 @@ def _create_image_pull_secret( secret_name=f"pull-secret-{pool_name}", component=pool_name, registries=[ - (reg_url, reg_username, reg_password), + (reg_url, registry_username, registry_password), ], ) @@ -149,6 +154,8 @@ def _create_stateful_set( env_vars: list[dict] | None = None, mount_secrets: dict | None = None, pull_secret: Secret | None = None, + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, **kwargs: Any, ) -> StatefulSet: """Create a stateful set for a pool""" @@ -184,6 +191,16 @@ def _create_stateful_set( } ] + default_pod_labels = { + "app.kubernetes.io/name": KUBERNETES_NAMESPACE, + "app.kubernetes.io/component": pool_name, + } + + if isinstance(pod_labels, dict): + pod_labels = {**default_pod_labels, **pod_labels} + else: + pod_labels = default_pod_labels + stateful_set = StatefulSet( { "metadata": { @@ -203,12 +220,12 @@ def _create_stateful_set( }, "template": { "metadata": { - "labels": { - "app.kubernetes.io/name": KUBERNETES_NAMESPACE, - "app.kubernetes.io/component": pool_name, - } + "labels": pod_labels, + "annotations": pod_annotations, }, "spec": { + # TODO: make this configurable + "serviceAccountName": "backend-service-account", "containers": [ { "name": pool_name, @@ -216,6 +233,23 @@ def _create_stateful_set( "image": tag, "env": env_vars, "volumeMounts": volume_mounts, + "livenessProbe": { + "httpGet": { + "path": "/api/v2/metadata?probe=livenessProbe", + "port": 80, + }, + "periodSeconds": 15, + "timeoutSeconds": 5, + "failureThreshold": 3, + }, + "startupProbe": { + "httpGet": { + "path": "/api/v2/metadata?probe=startupProbe", + "port": 80, + }, + "failureThreshold": 30, + "periodSeconds": 10, + }, } ], "volumes": volumes, diff --git a/packages/syft/src/syft/custom_worker/workerpool_upgrade_utils.py b/packages/syft/src/syft/custom_worker/workerpool_upgrade_utils.py new file mode 100644 index 00000000000..b1c3ae3308a --- /dev/null +++ b/packages/syft/src/syft/custom_worker/workerpool_upgrade_utils.py @@ -0,0 +1,236 @@ +# stdlib +from pathlib import Path +from typing import cast + +# third party +from IPython.display import display + +# syft absolute +import syft as sy + +# relative +from ..client.client import SyftClient +from ..service.migration.object_migration_state import MigrationData +from ..service.worker.image_identifier import SyftWorkerImageIdentifier +from ..service.worker.worker_image import SyftWorkerImage +from ..service.worker.worker_pool import WorkerPool + + +def upgrade_custom_workerpools( + client: SyftClient, + migration_data: str | Path | MigrationData, + mode: str = "manual", +) -> None: + """Upgrade custom workerpools to the new syft version + + Args: + client (SyftClient): Admin client to upgrade workerpools with + migration_data (str | Path | MigrationData): Path to migration data or MigrationData object + mode (str, optional): if "auto" the upgrade will be done automatically. "auto" assumes + all images and tags use Syft versioning. Defaults to "manual". + + Raises: + ValueError: if mode is not "manual" or "auto" + """ + print("This is a utility to upgrade workerpools to the new syft version") + print("If an upgrade fails, it is always possible to start the workerpool manually") + + if mode not in ["manual", "auto"]: + raise ValueError("mode must be either 'manual' or 'auto'") + + if isinstance(migration_data, str | Path): + print("loading migration data...") + migration_data = MigrationData.from_file(migration_data) + + # mypy does not recognize instance check for str | Path + migration_data = cast(MigrationData, migration_data) + worker_pools = migration_data.get_items_by_canonical_name( + WorkerPool.__canonical_name__ + ) + num_upgraded = 0 + for pool in worker_pools: + is_upgraded = upgrade_workerpool(client, pool, migration_data, mode) + if is_upgraded: + num_upgraded += 1 + print() + + print(f"Upgraded {num_upgraded} workerpools to the new syft version") + print("Please verify your upgraded pools with `client.worker_pools`") + + +def upgrade_workerpool( + client: SyftClient, + pool: WorkerPool, + migration_data: MigrationData, + mode: str = "manual", +) -> bool: + if pool.name == migration_data.default_pool_name: + print("Skipping default pool, this pool has already been upgraded") + return False + + print(f"Upgrading workerpool {pool.name}") + + images = migration_data.get_items_by_canonical_name( + SyftWorkerImage.__canonical_name__ + ) + image_id = pool.image_id + old_image: SyftWorkerImage = [img for img in images if img.id == image_id][0] + + if old_image.is_prebuilt: + new_image = upgrade_prebuilt_image(client, old_image, mode) + else: + new_image = upgrade_syft_image(client, old_image, mode) + + if not new_image: + print(f"Failed to upgrade workerpool {pool.name}, could not build new image") + return False + + print(f"starting new pool `{pool.name}` with {pool.max_count} workers") + try: + result = client.api.services.worker_pool.launch( + pool_name=pool.name, + image_uid=new_image.id, + num_workers=pool.max_count, + ) + display(result) + return True + except Exception as e: + display(e) + print(f"failed to start workerpool {pool.name}, please start the pool manually") + return False + + +def upgrade_prebuilt_image( + client: SyftClient, + old_image: SyftWorkerImage, + mode: str = "manual", +) -> SyftWorkerImage | None: + print(f"Found outdated prebuilt worker image `{old_image.image_identifier}`") + if mode == "auto": + new_syft_version = client.metadata.syft_version # type: ignore + new_identifier = upgrade_image_identifier( + old_image.image_identifier, new_syft_version + ) + new_image_tag = new_identifier.full_name_with_tag + else: + new_image_tag_or_none = get_tag_from_input() + if not new_image_tag_or_none: + return None + new_image_tag = new_image_tag_or_none + + new_config = sy.PrebuiltWorkerConfig( + tag=new_image_tag, description=old_image.config.description + ) + + print("submitting new prebuilt image...") + try: + result = client.api.services.worker_image.submit(worker_config=new_config) + display(result) + return result.value + except Exception as e: + print("could not submit new image") + display(e) + return None + + +def upgrade_syft_image( + client: SyftClient, + old_image: SyftWorkerImage, + mode: str = "manual", +) -> SyftWorkerImage | None: + old_identifier = old_image.image_identifier + old_config = old_image.config + new_syft_version = client.metadata.syft_version # type: ignore + + if old_identifier is None: + raise ValueError("old image does not have an image identifier") + + print(f"Found outdated custom worker image `{old_image.image_identifier}`") + + new_dockerfile = update_dockerfile_baseimage_tag( + old_config.dockerfile, new_syft_version + ) + + if mode == "manual": + confirm = confirm_dockerfile_update(old_config.dockerfile, new_dockerfile) + if not confirm: + return None + + # NOTE do not copy filename, it does not match the new dockerfile + new_config = sy.DockerWorkerConfig( + dockerfile=new_dockerfile, description=old_config.description, file_name=None + ) + new_identifier = upgrade_image_identifier(old_identifier, new_syft_version) + print( + f"Updating image tag from {old_identifier.repo_with_tag} to {new_identifier.repo_with_tag}" + ) + + print("submitting new image...") + try: + submit_result = client.api.services.worker_image.submit( + worker_config=new_config + ) + custom_image = submit_result.value + except Exception as e: + print("could not submit new image") + display(e) + return None + + print("building new image...") + try: + client.api.services.worker_image.build( + image_uid=custom_image.id, + tag=new_identifier.repo_with_tag, + ) + except Exception as e: + print("could not build new image") + display(e) + return None + + return custom_image + + +def get_tag_from_input() -> str | None: + new_image_tag = input( + "Please enter the tag for the upgraded image. Type 'skip' to skip upgrading this workerpool" + ) + if new_image_tag.lower() == "skip": + return None + return new_image_tag + + +def update_dockerfile_baseimage_tag(old_dockerfile: str, new_tag: str) -> str: + is_updated = False + new_dockerfile_ = [] + for line in old_dockerfile.splitlines(): + if line.startswith("FROM openmined/syft-backend:"): + updated_line = f"FROM openmined/syft-backend:{new_tag}" + new_dockerfile_.append(updated_line) + is_updated = True + else: + new_dockerfile_.append(line) + + if not is_updated: + raise ValueError("Could not update baseimage") + return "\n".join(new_dockerfile_) + + +def confirm_dockerfile_update(old_dockerfile: str, new_dockerfile: str) -> bool: + print("updated your dockerfile baseimage:") + print("- Old dockerfile ----") + print(old_dockerfile) + print("- New dockerfile ----") + print(new_dockerfile) + print("---------------------") + confirmation = input("is this correct? [y/n]") + if confirmation.lower() not in ["y", "n"]: + return confirm_dockerfile_update(old_dockerfile, new_dockerfile) + return confirmation.lower() == "y" + + +def upgrade_image_identifier( + old_identifier: SyftWorkerImageIdentifier, new_tag: str +) -> SyftWorkerImageIdentifier: + return SyftWorkerImageIdentifier( + registry=old_identifier.registry, repo=old_identifier.repo, tag=new_tag + ) diff --git a/packages/syft/src/syft/deployment_type.py b/packages/syft/src/syft/deployment_type.py new file mode 100644 index 00000000000..be17763780d --- /dev/null +++ b/packages/syft/src/syft/deployment_type.py @@ -0,0 +1,21 @@ +# Can also be specified by the environment variable +# ORCHESTRA_DEPLOYMENT_TYPE +# stdlib +from enum import Enum + +# relative +from .serde.serializable import serializable +from .types.syft_object import SYFT_OBJECT_VERSION_1 + + +@serializable() +class DeploymentType(str, Enum): + __canonical_name__ = "DeploymentType" + __version__ = SYFT_OBJECT_VERSION_1 + + PYTHON = "python" + REMOTE = "remote" + + def __str__(self) -> str: + # Use values when transforming ServerType to str + return self.value diff --git a/packages/syft/src/syft/dev/__init__.py b/packages/syft/src/syft/dev/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/dev/prof.py b/packages/syft/src/syft/dev/prof.py new file mode 100644 index 00000000000..a6aeffc5780 --- /dev/null +++ b/packages/syft/src/syft/dev/prof.py @@ -0,0 +1,47 @@ +# stdlib +import contextlib +import os +import signal +import subprocess # nosec +import tempfile +import time + + +@contextlib.contextmanager +def pyspy() -> None: # type: ignore + """Profile a block of code using py-spy. Intended for development purposes only. + + Example: + ``` + with pyspy(): + # do some work + a = [i for i in range(1000000)] + ``` + """ + fd, fname = tempfile.mkstemp(".svg") + os.close(fd) + + command = [ + "sudo", + "-S", + "py-spy", + "record", + "-r", + "100", + "-o", + fname, + "--pid", + str(os.getpid()), + ] + process = subprocess.Popen(command, preexec_fn=os.setsid) # nosec + + start_time = time.time() + yield process + end_time = time.time() + + print(f"Execution time: {end_time - start_time}") + try: + os.killpg(os.getpgid(process.pid), signal.SIGINT) + os.chmod(fname, 0o444) + except Exception as e: + print(f"Error: {e}") diff --git a/packages/syft/src/syft/exceptions/exception.py b/packages/syft/src/syft/exceptions/exception.py deleted file mode 100644 index bad097bdb81..00000000000 --- a/packages/syft/src/syft/exceptions/exception.py +++ /dev/null @@ -1,28 +0,0 @@ -# stdlib - -# third party -from typing_extensions import Self - -# relative -from ..service.context import NodeServiceContext -from ..service.response import SyftError -from ..service.user.user_roles import ServiceRole - - -class PySyftException(Exception): - """Base class for all PySyft exceptions.""" - - def __init__(self, message: str, roles: list[ServiceRole] | None = None): - super().__init__(message) - self.message = message - self.roles = roles if roles else [ServiceRole.ADMIN] - - def raise_with_context(self, context: NodeServiceContext) -> Self: - self.context = context - return self - - def handle(self) -> SyftError: - # if self.context and self.context.role in self.roles: - return SyftError(message=self.message) - # else: - # return SyftError(message="Access denied to exception message.") diff --git a/packages/syft/src/syft/exceptions/user.py b/packages/syft/src/syft/exceptions/user.py deleted file mode 100644 index 59147e29522..00000000000 --- a/packages/syft/src/syft/exceptions/user.py +++ /dev/null @@ -1,9 +0,0 @@ -# stdlib - -# relative -from ..service.user.user_roles import ServiceRole -from .exception import PySyftException - -UserAlreadyExistsException = PySyftException( - message="User already exists", roles=[ServiceRole.ADMIN] -) diff --git a/packages/syft/src/syft/external/__init__.py b/packages/syft/src/syft/external/__init__.py deleted file mode 100644 index b03c6594322..00000000000 --- a/packages/syft/src/syft/external/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -"""This module contains all the external libraries that Syft supports. -We lazy load the external libraries when they are needed. -""" - -# stdlib -import importlib -import os -from typing import Any - -# relative -from ..service.response import SyftError -from ..service.response import SyftSuccess -from ..service.service import AbstractService -from ..util.util import str_to_bool - -# Contains all the external libraries that Syft supports. -# Used to check if a library is supported -# if the external library is not installed, we prompt the user -# to install it with the pip package name. - -OBLV_ENABLED = str_to_bool(os.getenv("OBLV_ENABLED", "false")) - -EXTERNAL_LIBS = { - "oblv": { - "pip_package_name": "oblv-ctl", - "module_name": "oblv_ctl", - } -} - - -def OblvServiceProvider(*args: Any, **kwargs: Any) -> type[AbstractService] | None: - if OBLV_ENABLED: - # relative - from .oblv.oblv_service import OblvService - - return OblvService(*args, **kwargs) - return None - - -def package_exists(package_name: str) -> bool: - try: - importlib.import_module(package_name) - return True - except ImportError: - return False - - -def enable_external_lib(lib_name: str) -> SyftSuccess | SyftError: - if lib_name in EXTERNAL_LIBS: - syft_module_name = f"syft.external.{lib_name}" - pip_package_name = EXTERNAL_LIBS[lib_name]["pip_package_name"] - if not package_exists(EXTERNAL_LIBS[lib_name]["module_name"]): - return SyftError( - message=f"Package: {pip_package_name} for library: {lib_name} not installed.\n" - + f"Kindly install it with 'pip install {pip_package_name}'" - ) - - importlib.import_module(syft_module_name) - return SyftSuccess(message=f"Successfully enabled external library: {lib_name}") - else: - return SyftError( - message=f"External library {lib_name} not supported. \n" - + f"Supported external libraries are: {list(EXTERNAL_LIBS.keys())}" - ) diff --git a/packages/syft/src/syft/external/oblv/__init__.py b/packages/syft/src/syft/external/oblv/__init__.py deleted file mode 100644 index 1fa96bd120e..00000000000 --- a/packages/syft/src/syft/external/oblv/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# relative -from ...serde.deserialize import _deserialize -from ...serde.serializable import recursive_serde_register -from ...serde.serialize import _serialize -from .auth import login # noqa: F401 -from .deployment import create_deployment # noqa: F401 -from .oblv_proxy import check_oblv_proxy_installation_status # noqa: F401 -from .oblv_proxy import create_oblv_key_pair # noqa: F401 -from .oblv_proxy import get_oblv_public_key # noqa: F401 -from .oblv_proxy import install_oblv_proxy # noqa: F401 - -try: - # third party - from oblv_ctl.oblv_client import OblvClient - - # Oblivious Client serde - recursive_serde_register( - OblvClient, - serialize=lambda x: _serialize([x.token, x.oblivious_user_id], to_bytes=True), - deserialize=lambda x: OblvClient(*_deserialize(x, from_bytes=True)), - ) - -except Exception: # nosec - pass diff --git a/packages/syft/src/syft/external/oblv/auth.py b/packages/syft/src/syft/external/oblv/auth.py deleted file mode 100644 index 0bb6b9aec78..00000000000 --- a/packages/syft/src/syft/external/oblv/auth.py +++ /dev/null @@ -1,13 +0,0 @@ -# stdlib -from getpass import getpass -from typing import Any - -# third party -from oblv_ctl import authenticate - - -def login(apikey: str | None = None) -> Any: - if apikey is None: - apikey = getpass("Please provide your oblv API_KEY to login:") - - return authenticate(apikey) diff --git a/packages/syft/src/syft/external/oblv/constants.py b/packages/syft/src/syft/external/oblv/constants.py deleted file mode 100644 index 444c915f986..00000000000 --- a/packages/syft/src/syft/external/oblv/constants.py +++ /dev/null @@ -1,10 +0,0 @@ -INFRA = "m5.4xlarge" -REPO_OWNER = "OpenMined" -REPO_NAME = "syft-enclave" -REF = "dev" -REGION = "us-west-2" -VCS = "github" -VISIBILITY = "private" -REF_TYPE = "branch" -LOCAL_MODE = True -OBLV_LOCALHOST_PORT = 3030 diff --git a/packages/syft/src/syft/external/oblv/deployment.py b/packages/syft/src/syft/external/oblv/deployment.py deleted file mode 100644 index 23750e28577..00000000000 --- a/packages/syft/src/syft/external/oblv/deployment.py +++ /dev/null @@ -1,140 +0,0 @@ -# stdlib -from typing import Any - -# third party -from oblv_ctl import OblvClient -from oblv_ctl.models import CreateDeploymentInput -import yaml - -# relative -from ...util.util import bcolors -from .auth import login -from .constants import INFRA -from .constants import REF -from .constants import REF_TYPE -from .constants import REGION -from .constants import REPO_NAME -from .constants import REPO_OWNER -from .constants import VCS -from .constants import VISIBILITY -from .deployment_client import DeploymentClient -from .exceptions import OblvKeyNotFoundError -from .oblv_proxy import create_oblv_key_pair -from .oblv_proxy import get_oblv_public_key - -SUPPORTED_REGION_LIST = ["us-east-1", "us-west-2", "eu-central-1", "eu-west-2"] - -SUPPORTED_INFRA = [ - "c5.xlarge", - "m5.xlarge", - "r5.xlarge", - "c5.2xlarge", - "m5.2xlarge", - "m5.4xlarge", -] - - -def create_deployment( - domain_clients: list, - deployment_name: str | None = None, - key_name: str | None = None, - oblv_client: OblvClient | None = None, - infra: str = INFRA, - region: str = REGION, -) -> DeploymentClient: - """Creates a new deployment with predefined codebase - Args: - client : Oblivious Client. - domain_clients: List of domain_clients. - deployment_name: Unique name for the deployment. - key_name: User's key to be used for deployment creation. - infra: Represent the AWS infrastructure to be used. Default is "m5.2xlarge". The available options are\n - - "c5.xlarge" {'CPU':4, 'RAM':8, 'Total/hr':0.68}\n - - "m5.xlarge" {'CPU':4, 'RAM':16, 'Total/hr':0.768}\n - - "r5.xlarge" {'CPU':4, 'RAM':32, 'Total/hr':1.008}\n - - "c5.2xlarge" {'CPU':8, 'RAM':16, 'Total/hr':1.36}\n - - "m5.2xlarge" {'CPU':8, 'RAM':32, 'Total/hr':1.536}\n - As of now, PySyft only works with RAM >= 32 - region: AWS Region to be deployed in. Default is "us-east-1". The available options are \n - - "us-east-1" : "US East (N. Virginia)",\n - - "us-west-2" : "US West (Oregon)",\n - - "eu-central-1" : "Europe (Frankfurt)",\n - - "eu-west-2" : "Europe (London)" - - Returns: - resp: Deployment Client Object - """ - if not oblv_client: - oblv_client = login() - if deployment_name is None: - deployment_name = input("Kindly provide deployment name") - if key_name is None: - key_name = input("Please provide your key name") - - while not SUPPORTED_INFRA.__contains__(infra): - infra = input(f"Provide infra from one of the following - {SUPPORTED_INFRA}") - - while not SUPPORTED_REGION_LIST.__contains__(region): - region = input( - f"Provide region from one of the following - {SUPPORTED_REGION_LIST}" - ) - - try: - user_public_key = get_oblv_public_key(key_name) - except FileNotFoundError: - user_public_key = create_oblv_key_pair(key_name) - print( - bcolors.green(bcolors.bold("Created")) - + f" a new public/private key pair with key_name: {key_name}" - ) - except Exception as e: - raise Exception(e) - build_args: dict[str, Any] = { - "auth": {}, - "users": {"domain": [], "user": []}, - "additional_args": {}, - "infra_reqs": infra, - "runtime_args": "", - } - users = [] - runtime_args: list[str] = [] - for domain_client in domain_clients: - try: - users.append( - { - "user_name": domain_client.name, - "public key": domain_client.api.services.oblv.get_public_key(), - } - ) - except OblvKeyNotFoundError: - raise OblvKeyNotFoundError( - f"Oblv Public Key not found for {domain_client.name}" - ) - - build_args["runtime_args"] = yaml.dump({"outbound": runtime_args}) - build_args["users"]["domain"] = users - profile = oblv_client.user_profile() - users = [{"user_name": profile.oblivious_login, "public key": user_public_key}] - build_args["users"]["user"] = users - depl_input = CreateDeploymentInput( - owner=REPO_OWNER, - repo=REPO_NAME, - account_type=VCS, - ref=REF, - ref_type=REF_TYPE, - region_name=region, - deployment_name=deployment_name, - visibility=VISIBILITY, - is_dev_env=True, - tags=[], - build_args=build_args, - ) - # By default the deployment is in PROD mode - res = oblv_client.create_deployment(depl_input) - result = DeploymentClient( - deployment_id=res.deployment_id, - oblv_client=oblv_client, - domain_clients=domain_clients, - key_name=key_name, - ) - return result diff --git a/packages/syft/src/syft/external/oblv/deployment_client.py b/packages/syft/src/syft/external/oblv/deployment_client.py deleted file mode 100644 index 4ea10db2602..00000000000 --- a/packages/syft/src/syft/external/oblv/deployment_client.py +++ /dev/null @@ -1,374 +0,0 @@ -# future -from __future__ import annotations - -# stdlib -from collections.abc import Callable -from datetime import datetime -import os -from signal import SIGTERM -import subprocess # nosec -import sys -import time -from typing import Any -from typing import TYPE_CHECKING - -# third party -from oblv_ctl import OblvClient -from pydantic import field_validator -import requests - -# relative -from ...client.api import SyftAPI -from ...client.client import SyftClient -from ...client.client import login -from ...client.client import login_as_guest -from ...client.enclave_client import EnclaveMetadata -from ...node.credentials import SyftSigningKey -from ...serde.serializable import serializable -from ...service.response import SyftError -from ...types.uid import UID -from ...util.util import bcolors -from .constants import LOCAL_MODE -from .exceptions import OblvEnclaveError -from .exceptions import OblvUnAuthorizedError -from .oblv_proxy import check_oblv_proxy_installation_status - -if TYPE_CHECKING: - # relative - from ...service.code.user_code import SubmitUserCode - - -@serializable() -class OblvMetadata(EnclaveMetadata): - """Contains Metadata to connect to Oblivious Enclave""" - - deployment_id: str | None = None - oblv_client: OblvClient | None = None - - @field_validator("deployment_id") - @classmethod - def check_valid_deployment_id(cls, deployment_id: str) -> str: - if not deployment_id and not LOCAL_MODE: - raise ValueError( - f"Deployment ID should be a valid string: {deployment_id}" - + "in cloud deployment of enclave" - + "For testing set the LOCAL_MODE variable in constants.py" - ) - return deployment_id - - @field_validator("oblv_client") - @classmethod - def check_valid_oblv_client(cls, oblv_client: OblvClient) -> OblvClient: - if not oblv_client and not LOCAL_MODE: - raise ValueError( - f"Oblivious Client should be a valid client: {oblv_client}" - + "in cloud deployment of enclave" - + "For testing set the LOCAL_MODE variable in constants.py" - ) - return oblv_client - - -class DeploymentClient: - deployment_id: str - key_name: str - domain_clients: list[SyftClient] # List of domain client objects - oblv_client: OblvClient = None - __conn_string: str - __logs: Any - __process: Any - __enclave_client: SyftClient | None - - def __init__( - self, - domain_clients: list[SyftClient], - deployment_id: str, - oblv_client: OblvClient | None = None, - key_name: str | None = None, - api: SyftAPI | None = None, - ): - if not domain_clients: - raise Exception( - "domain_clients should be populated with valid domain nodes" - ) - self.deployment_id = deployment_id - self.key_name: str | None = key_name - self.oblv_client = oblv_client - self.domain_clients = domain_clients - self.__conn_string = "" - self.__process = None - self.__logs = None - self._api = api - self.__enclave_client: SyftClient | None = None - - def make_request_to_enclave( - self, - request_method: Callable, - connection_string: str, - params: dict | None = None, - files: dict | None = None, - data: dict | None = None, - json: dict | None = None, - ) -> Any: - header = {} - if LOCAL_MODE: - header["x-oblv-user-name"] = "enclave_test" - header["x-oblv-user-role"] = "user" - else: - depl = self.oblv_client.deployment_info(self.deployment_id) - if depl.is_deleted: - raise Exception( - "User cannot connect to this deployment, as it is no longer available." - ) - return request_method( - connection_string, - headers=header, - params=params, - files=files, - data=data, - json=json, - ) - - def set_conn_string(self, url: str) -> None: - self.__conn_string = url - - def initiate_connection(self, connection_port: int = 3030) -> None: - if LOCAL_MODE: - self.__conn_string = f"http://127.0.0.1:{connection_port}" - return - check_oblv_proxy_installation_status() - self.close_connection() # To close any existing connections - public_file_name = os.path.join( - os.path.expanduser("~"), - ".ssh", - self.key_name, - self.key_name + "_public.der", - ) - private_file_name = os.path.join( - os.path.expanduser("~"), - ".ssh", - self.key_name, - self.key_name + "_private.der", - ) - log_file_name = os.path.join( - os.path.expanduser("~"), - ".oblv_syft_logs", - "proxy_logs_" + datetime.now().strftime("%d_%m_%Y_%H_%M_%S") + ".log", - ) - # Creating directory if not exist - os.makedirs(os.path.dirname(log_file_name), exist_ok=True) - log_file = open(log_file_name, "wb") - depl = self.oblv_client.deployment_info(self.deployment_id) - if depl.is_deleted: - raise Exception( - "User cannot connect to this deployment, as it is no longer available." - ) - try: - if depl.is_dev_env: - process = subprocess.Popen( # nosec - [ - "oblv", - "connect", - "--private-key", - private_file_name, - "--public-key", - public_file_name, - "--url", - depl.instance.service_url, - "--pcr0", - depl.pcr_codes[0], - "--pcr1", - depl.pcr_codes[1], - "--pcr2", - depl.pcr_codes[2], - "--port", - "443", - "--lport", - str(connection_port), - "--disable-pcr-check", - ], - stdout=log_file, - stderr=log_file, - ) - else: - process = subprocess.Popen( # nosec - [ - "oblv", - "connect", - "--private-key", - private_file_name, - "--public-key", - public_file_name, - "--url", - depl.instance.service_url, - "--pcr0", - depl.pcr_codes[0], - "--pcr1", - depl.pcr_codes[1], - "--pcr2", - depl.pcr_codes[2], - "--port", - "443", - "--lport", - str(connection_port), - ], - stdout=log_file, - stderr=log_file, - ) - with open(log_file_name) as log_file_read: - while True: - log_line = log_file_read.readline() - if "Error: Invalid PCR Values" in log_line: - raise Exception("PCR Validation Failed") - if "Only one usage of each socket address" in log_line: - raise Exception( - "Another oblv proxy instance running. Either close that connection" - + "or change the *connection_port*" - ) - elif "error" in log_line.lower(): - raise Exception(log_line) - elif "listening on" in log_line: - break - except Exception as e: - raise e - else: - print( - f"Successfully connected to proxy on port {connection_port}. The logs can be found at {log_file_name}" - ) - self.__conn_string = f"http://127.0.0.1:{connection_port}" - self.__logs = log_file_name - self.__process = process - return - - def register( - self, - name: str, - email: str, - password: str, - institution: str | None = None, - website: str | None = None, - ) -> SyftError | SyftSigningKey | None: - self.check_connection_string() - guest_client = login_as_guest(url=self.__conn_string) - return guest_client.register( - name=name, - email=email, - password=password, - institution=institution, - website=website, - ) - - def login( - self, - email: str, - password: str, - ) -> None: - self.check_connection_string() - self.__enclave_client = login( - url=self.__conn_string, email=email, password=password - ) - - def check_connection_string(self) -> None: - if not self.__conn_string: - raise Exception( - "Either proxy not running or not initiated using syft." - + " Run the method initiate_connection to initiate the proxy connection" - ) - - def sanity_check_oblv_response(self, req: requests.Response) -> str: - if req.status_code == 401: - raise OblvUnAuthorizedError() - elif req.status_code == 400: - raise OblvEnclaveError(req.json()["detail"]) - elif req.status_code == 422: - print(req.text) - # ToDo - Update here - elif req.status_code != 200: - raise OblvEnclaveError( - f"Failed to perform the operation with status {req.status_code}, {req.content!r}" - ) - return "Failed" - - def request_code_execution(self, code: SubmitUserCode) -> Any: - # relative - from ...service.code.user_code import SubmitUserCode - - if not isinstance(code, SubmitUserCode): - raise Exception( - f"The input code should be of type: {SubmitUserCode} got:{type(code)}" - ) - - enclave_metadata = OblvMetadata( - deployment_id=self.deployment_id, oblv_client=self.oblv_client - ) - - code_id = UID() - code.id = code_id - code.enclave_metadata = enclave_metadata - - for domain_client in self.domain_clients: - domain_client.code.request_code_execution(code=code) - print(f"Sent code execution request to {domain_client.name}") - - res = self.api.services.code.request_code_execution(code=code) - print(f"Execution will be done on {self.__enclave_client.name}") - - return res - - @property - def api(self) -> SyftAPI: - if not self.__enclave_client: - raise Exception("Kindly login or register with the enclave") - - return self.__enclave_client.api - - def close_connection(self) -> str | None: - if self.check_proxy_running(): - os.kill(self.__process.pid, SIGTERM) - return None - else: - return "No Proxy Connection Running" - - def check_proxy_running(self) -> bool: - if self.__process is not None: - if self.__process.poll() is not None: - return False - else: - return True - return False - - def fetch_current_proxy_logs( - self, follow: bool = False, tail: bool = False - ) -> None: - """Returns the logs of the running enclave instance - - Args: - follow (bool, optional): To follow the logs as they grow. Defaults to False. - tail (bool, optional): Only show the new generated logs. - To be used only when follow is True. Defaults to False. - """ - if self.__logs is None: - print( - bcolors.RED - + bcolors.BOLD - + "Exception" - + bcolors.BLACK - + bcolors.ENDC - + ": Logs not initiated", - file=sys.stderr, - ) - log_file = open(self.__logs) - if not follow: - print(log_file.read()) - else: - if tail: - log_file.seek(0, 2) - while True: - line = log_file.readline() - if not line: - time.sleep(0.1) - continue - print(line) - - -# Todo - Method to check if proxy is running -# Todo diff --git a/packages/syft/src/syft/external/oblv/exceptions.py b/packages/syft/src/syft/external/oblv/exceptions.py deleted file mode 100644 index 05acc2e47cf..00000000000 --- a/packages/syft/src/syft/external/oblv/exceptions.py +++ /dev/null @@ -1,54 +0,0 @@ -class OblvProxyConnectPCRError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = "Failed to connect to enclave. Unauthorized deployment provided." - super().__init__(message) - - -class OblvEnclaveUnAuthorizedError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = "Domain unauthorized to perform this action in enclave" - super().__init__(message) - - -class OblvEnclaveError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = "Failed to connect to the enclave" - super().__init__(message) - - -class OblvError(Exception): - def __init__(self, message: str = "") -> None: - super().__init__(message) - - -class OblvUnAuthorizedError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = "User unauthorized to perform this action in enclave" - super().__init__(message) - - -class OblvKeyAlreadyExistsError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = "Currently each domain node could have only one oblv public/private key pair" - super().__init__(message) - - -class OblvLocalEnclaveError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = ( - "Failed to connect to locally deployed FastAPI based enclave services." - ) - super().__init__(message) - - -class OblvKeyNotFoundError(Exception): - def __init__(self, message: str = "") -> None: - if not message: - message = "Oblivious public key not found. Kindly request admin to create a new one" - super().__init__(message) diff --git a/packages/syft/src/syft/external/oblv/oblv_keys.py b/packages/syft/src/syft/external/oblv/oblv_keys.py deleted file mode 100644 index 040d41e1824..00000000000 --- a/packages/syft/src/syft/external/oblv/oblv_keys.py +++ /dev/null @@ -1,19 +0,0 @@ -# relative -from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SyftObject - - -@serializable() -class OblvKeys(SyftObject): - # version - __canonical_name__ = "OblvKeys" - __version__ = SYFT_OBJECT_VERSION_2 - - # fields - public_key: bytes - private_key: bytes - - # serde / storage rules - __attr_searchable__ = ["private_key", "public_key"] - __attr_unique__ = ["private_key", "public_key"] diff --git a/packages/syft/src/syft/external/oblv/oblv_keys_stash.py b/packages/syft/src/syft/external/oblv/oblv_keys_stash.py deleted file mode 100644 index 8d4ba434418..00000000000 --- a/packages/syft/src/syft/external/oblv/oblv_keys_stash.py +++ /dev/null @@ -1,68 +0,0 @@ -# stdlib -from typing import Any - -# third party -from result import Err -from result import Ok -from result import Result - -# relative -from ...node.credentials import SyftVerifyKey -from ...serde.serializable import serializable -from ...service.response import SyftError -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...store.document_store import UIDPartitionKey -from ...types.uid import UID -from .oblv_keys import OblvKeys - - -@serializable() -class OblvKeysStash(BaseStash): - object_type = OblvKeys - settings: PartitionSettings = PartitionSettings( - name=OblvKeys.__canonical_name__, object_type=OblvKeys, db_name="app" - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - - def check_type(self, obj: Any, type_: type) -> Result[Any, str]: - return ( - Ok(obj) - if isinstance(obj, type_) - else Err(f"{type(obj)} does not match required type: {type_}") - ) - - def set( - self, credentials: SyftVerifyKey, oblv_keys: OblvKeys - ) -> Result[OblvKeys, Err]: - if not len(self): - valid = self.check_type(oblv_keys, self.object_type) - if valid.is_err(): - return SyftError(message=valid.err()) - - return super().set(credentials, oblv_keys) - else: - return Err("Domain Node already has an existing public/private key pair") - - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[OblvKeys | None, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - return Ok(self.query_one(credentials=credentials, qks=qks)) - - def delete_by_uid(self, credentials: SyftVerifyKey, uid: UID) -> Result[bool, str]: - qk = UIDPartitionKey.with_obj(uid) - return super().delete(qk=qk) - - def update( - self, credentials: SyftVerifyKey, task: OblvKeys - ) -> Result[OblvKeys, str]: - valid = self.check_type(task, self.object_type) - if valid.is_err(): - return SyftError(message=valid.err()) - - return super().update(credentials, task) diff --git a/packages/syft/src/syft/external/oblv/oblv_proxy.py b/packages/syft/src/syft/external/oblv/oblv_proxy.py deleted file mode 100644 index c8f6ff9d7b1..00000000000 --- a/packages/syft/src/syft/external/oblv/oblv_proxy.py +++ /dev/null @@ -1,214 +0,0 @@ -# stdlib -import base64 -import os -import platform -import subprocess # nosec -import sys -import tarfile -import zipfile - -# third party -import requests - -# relative -from ...util.util import bcolors - - -def check_oblv_proxy_installation_status(): - try: - result = subprocess.run(["oblv", "-V"], capture_output=True, text=True) # nosec - if result.stderr: - raise subprocess.CalledProcessError( # nosec - returncode=result.returncode, cmd=result.args, stderr=result.stderr - ) - result = result.stdout.strip() - return result - except Exception as e: - if e.__class__ == FileNotFoundError: - system_name = platform.system() - result = "Oblv Proxy Not Installed. Call the method install_oblv_proxy " - if system_name == "Windows": - result += ( - "to install the proxy for this session. If you already have the proxy installed," - " add it to your PATH." - ) - elif system_name == "Linux": - result += ( - "to install the proxy globally. If you already have the proxy installed," - " create a link to the installation as /usr/local/bin/oblv" - ) - - print( - bcolors.RED - + bcolors.BOLD - + "Exception" - + bcolors.BLACK - + bcolors.ENDC - + ": " - + result, - file=sys.stderr, - ) - else: - raise Exception(e) - - -def install_oblv_proxy(with_package: bool = False): - """Oblivious Proxy Installation - - Args: - with_package (bool, optional): Only available for .msi, .deb, .rpm. Defaults to False. - """ - system_name = platform.system() - if system_name == "Windows": - windows_proxy_installation(with_package) - elif system_name == "Linux": - linux_proxy_installation(with_package) - elif system_name == "Darwin": - darwin_proxy_installation() - - -def windows_proxy_installation(with_package: bool = False): - try: - if with_package: - url = "https://api.oblivious.ai/oblv-ccli/0.4.0/packages/oblv-0.4.0-x86_64.msi" - res = requests.get(url) # nosec - path = os.path.join(os.path.expanduser("~"), "oblv-0.4.0-x86_64.msi") - with open(path, "wb") as f: - f.write(res.content) - os.system(f"msiexec /I {path} /quiet /QB-!") # nosec - else: - url = "https://api.oblivious.ai/oblv-ccli/0.4.0/oblv-ccli-0.4.0-x86_64-pc-windows-msvc.zip" - res = requests.get(url) # nosec - path = ( - os.getcwd().replace("\\", "/") - + "/oblv-ccli-0.4.0-x86_64-pc-windows-msvc.zip" - ) - with open(path, "wb") as f: - f.write(res.content) - with zipfile.ZipFile(path, "r") as zipObj: # nosec - zipObj.extractall() # nosec - os.environ["PATH"] += ( - ";" + os.getcwd() + "\\oblv-ccli-0.4.0-x86_64-pc-windows-msvc;" - ) - except Exception as e: - print( - bcolors.RED - + bcolors.BOLD - + "Exception" - + bcolors.BLACK - + bcolors.ENDC - + ": " - + e.__cause__, - file=sys.stderr, - ) - - -def linux_proxy_installation(with_package: bool = False): - try: - if with_package: - try: - os.system("dpkg") # nosec - except Exception: - url = "https://api.oblivious.ai/oblv-ccli/0.4.0/packages/oblv-0.4.0-1.x86_64.rpm" - res = requests.get(url) # nosec - path = os.path.join(os.path.expanduser("~"), "oblv-0.4.0-1.x86_64.rpm") - with open(path, "wb") as f: - f.write(res.content) - os.system(f"rpm -i {path}") # nosec - else: - url = "https://api.oblivious.ai/oblv-ccli/0.4.0/packages/oblv_0.4.0_amd64.deb" - res = requests.get(url) # nosec - path = os.path.join(os.path.expanduser("~"), "oblv_0.4.0_amd64.deb") - with open(path, "wb") as f: - f.write(res.content) - os.system(f"dpkg -i {path}") # nosec - else: - url = "https://api.oblivious.ai/oblv-ccli/0.4.0/oblv-ccli-0.4.0-x86_64-unknown-linux-musl.tar.gz" - file_name = "oblv-ccli-0.4.0-x86_64-unknown-linux-musl.tar.gz" - res = requests.get(url, stream=True) # nosec - if res.status_code == 200: - with open(file_name, "wb") as f: - f.write(res.raw.read()) - path = os.getcwd() + "/oblv-ccli-0.4.0-x86_64-unknown-linux-musl" - file = tarfile.open(file_name) # nosec - file.extractall(path) # nosec - - os.symlink( - "/usr/local/bin/oblv", - os.getcwd() + "/oblv-ccli-0.4.0-x86_64-unknown-linux-musl/oblv", - ) - print( - bcolors.green(bcolors.bold("Successfully")) + " installed Oblivous CLI" - ) - except Exception as e: - print( - bcolors.RED - + bcolors.BOLD - + "Exception" - + bcolors.BLACK - + bcolors.ENDC - + ": " - + e.__cause__, - file=sys.stderr, - ) - - -def darwin_proxy_installation(): - url = "https://api.oblivious.ai/oblv-ccli/0.4.0/oblv-ccli-0.4.0-x86_64-apple-darwin.tar.gz" - file_name = "oblv-ccli-0.4.0-x86_64-apple-darwin.tar.gz" - res = requests.get(url, stream=True) # nosec - if res.status_code == 200: - with open(file_name, "wb") as f: - f.write(res.raw.read()) - path = os.getcwd() + "/oblv-ccli-0.4.0-x86_64-apple-darwin" - file = tarfile.open(file_name) - file.extractall(path) # nosec - - os.symlink( - "/usr/local/bin/oblv", os.getcwd() + "/oblv-ccli-0.4.0-x86_64-apple-darwin/oblv" - ) - print(bcolors.green(bcolors.bold("Successfully")) + " installed Oblivous CLI") - - -def create_oblv_key_pair(key_name): - if check_oblv_proxy_installation_status() is None: - return - try: - file_path = os.path.join(os.path.expanduser("~"), ".ssh", key_name) - result = subprocess.run( # nosec - ["oblv", "keygen", "--key-name", key_name, "--output", file_path], - capture_output=True, - ) - if result.stderr: - raise subprocess.CalledProcessError( # nosec - returncode=result.returncode, cmd=result.args, stderr=result.stderr - ) - result = result.stdout.strip() - return get_oblv_public_key(key_name) - except Exception as e: - raise Exception(e) - - -def get_oblv_public_key(key_name): - try: - filepath = os.path.join( - os.path.expanduser("~"), ".ssh", key_name, key_name + "_public.der" - ) - with open(filepath, "rb") as f: - public_key = f.read() - public_key = base64.encodebytes(public_key).decode("UTF-8").replace("\n", "") - return public_key - except FileNotFoundError: - print( - bcolors.RED - + bcolors.BOLD - + "Exception" - + bcolors.BLACK - + bcolors.ENDC - + ": " - + "No key found with given name", - file=sys.stderr, - ) - raise FileNotFoundError - except Exception as e: - raise Exception(e) diff --git a/packages/syft/src/syft/external/oblv/oblv_service.py b/packages/syft/src/syft/external/oblv/oblv_service.py deleted file mode 100644 index f72efb532df..00000000000 --- a/packages/syft/src/syft/external/oblv/oblv_service.py +++ /dev/null @@ -1,405 +0,0 @@ -# stdlib -from base64 import encodebytes -from collections.abc import Callable -import os -import random -import subprocess # nosec -from typing import Any -from typing import cast - -# third party -from oblv_ctl import OblvClient -import requests -from result import Err -from result import Ok -from result import Result - -# relative -from ...client.api import SyftAPI -from ...client.client import HTTPConnection -from ...client.client import Routes -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey -from ...serde.deserialize import _deserialize as deserialize -from ...serde.serializable import serializable -from ...service.action.action_object import ActionObject -from ...service.code.user_code import UserCodeStatus -from ...service.context import AuthedServiceContext -from ...service.response import SyftError -from ...service.service import AbstractService -from ...service.service import service_method -from ...service.user.user_roles import GUEST_ROLE_LEVEL -from ...store.document_store import DocumentStore -from ...types.uid import UID -from ...util.util import find_available_port -from .constants import LOCAL_MODE -from .constants import OBLV_LOCALHOST_PORT -from .deployment_client import OblvMetadata -from .exceptions import OblvEnclaveError -from .exceptions import OblvProxyConnectPCRError -from .oblv_keys import OblvKeys -from .oblv_keys_stash import OblvKeysStash - -# caches the connection to Enclave using the deployment ID -OBLV_PROCESS_CACHE: dict[str, list] = {} - - -def connect_to_enclave( - oblv_keys_stash: OblvKeysStash, - verify_key: SyftVerifyKey, - oblv_client: OblvClient, - deployment_id: str, - connection_port: int, - oblv_key_name: str, -) -> subprocess.Popen | None: - global OBLV_PROCESS_CACHE - if deployment_id in OBLV_PROCESS_CACHE: - process = OBLV_PROCESS_CACHE[deployment_id][0] - if process.poll() is None: - return process - # If the process has been terminated create a new connection - del OBLV_PROCESS_CACHE[deployment_id] - - # Always create key file each time, which ensures consistency when there is key change in database - create_keys_from_db( - oblv_keys_stash=oblv_keys_stash, - verify_key=verify_key, - oblv_key_name=oblv_key_name, - ) - oblv_key_path = os.path.expanduser(os.getenv("OBLV_KEY_PATH", "~/.oblv")) - - public_file_name = oblv_key_path + "/" + oblv_key_name + "_public.der" - private_file_name = oblv_key_path + "/" + oblv_key_name + "_private.der" - - depl = oblv_client.deployment_info(deployment_id) - if depl.is_deleted: - raise OblvEnclaveError( - "User cannot connect to this deployment, as it is no longer available." - ) - if depl.is_dev_env: - process = subprocess.Popen( # nosec - [ - "oblv", - "connect", - "--private-key", - private_file_name, - "--public-key", - public_file_name, - "--url", - depl.instance.service_url, - "--pcr0", - depl.pcr_codes[0], - "--pcr1", - depl.pcr_codes[1], - "--pcr2", - depl.pcr_codes[2], - "--port", - "443", - "--lport", - str(connection_port), - "--disable-pcr-check", - ], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - else: - process = subprocess.Popen( # nosec - [ - "oblv", - "connect", - "--private-key", - private_file_name, - "--public-key", - public_file_name, - "--url", - depl.instance.service_url, - "--pcr0", - depl.pcr_codes[0], - "--pcr1", - depl.pcr_codes[1], - "--pcr2", - depl.pcr_codes[2], - "--port", - "443", - "--lport", - str(connection_port), - ], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - while process.poll() is None: - log_line = process.stderr.readline().decode() - if log_line.__contains__("Error: Invalid PCR Values"): - raise OblvProxyConnectPCRError() - elif log_line.lower().__contains__("error"): - raise OblvEnclaveError(message=log_line) - elif log_line.__contains__("listening on"): - break - - OBLV_PROCESS_CACHE[deployment_id] = [process, connection_port] - return None - - -def make_request_to_enclave( - oblv_keys_stash: OblvKeysStash, - verify_key: SyftVerifyKey, - deployment_id: str, - oblv_client: OblvClient, - request_method: Callable, - connection_string: str, - connection_port: int, - oblv_key_name: str, - params: dict | None = None, - files: dict | None = None, - data: dict | None = None, - json: dict | None = None, -) -> Any: - if not LOCAL_MODE: - _ = connect_to_enclave( - oblv_keys_stash=oblv_keys_stash, - verify_key=verify_key, - oblv_client=oblv_client, - deployment_id=deployment_id, - connection_port=connection_port, - oblv_key_name=oblv_key_name, - ) - req = request_method( - connection_string, - params=params, - files=files, - data=data, - json=json, - ) - - return req - else: - headers = {"x-oblv-user-name": "enclave-test", "x-oblv-user-role": "domain"} - - return request_method( - connection_string, - headers=headers, - params=params, - files=files, - data=data, - json=json, - ) - - -def create_keys_from_db( - oblv_keys_stash: OblvKeysStash, verify_key: SyftVerifyKey, oblv_key_name: str -) -> None: - oblv_key_path = os.path.expanduser(os.getenv("OBLV_KEY_PATH", "~/.oblv")) - - os.makedirs(oblv_key_path, exist_ok=True) - # Temporary new key name for the new service - - keys = oblv_keys_stash.get_all(verify_key) - if keys.is_ok(): - keys = keys.ok()[0] - else: - return keys.err() - - f_private = open(oblv_key_path + "/" + oblv_key_name + "_private.der", "w+b") - f_private.write(keys.private_key) - f_private.close() - f_public = open(oblv_key_path + "/" + oblv_key_name + "_public.der", "w+b") - f_public.write(keys.public_key) - f_public.close() - - -def generate_oblv_key(oblv_key_name: str) -> tuple[bytes, bytes]: - oblv_key_path = os.path.expanduser(os.getenv("OBLV_KEY_PATH", "~/.oblv")) - os.makedirs(oblv_key_path, exist_ok=True) - - result = subprocess.run( # nosec - [ - "oblv", - "keygen", - "--key-name", - oblv_key_name, - "--output", - oblv_key_path, - ], - capture_output=True, - ) - - if result.stderr: - raise Err( - subprocess.CalledProcessError( # nosec - returncode=result.returncode, cmd=result.args, stderr=result.stderr - ) - ) - f_private = open(oblv_key_path + "/" + oblv_key_name + "_private.der", "rb") - private_key = f_private.read() - f_private.close() - f_public = open(oblv_key_path + "/" + oblv_key_name + "_public.der", "rb") - public_key = f_public.read() - f_public.close() - - return (public_key, private_key) - - -@serializable() -class OblvService(AbstractService): - store: DocumentStore - oblv_keys_stash: OblvKeysStash - - def __init__(self, store: DocumentStore) -> None: - self.store = store - self.oblv_keys_stash = OblvKeysStash(store=store) - - @service_method(path="oblv.create_key", name="create_key", roles=GUEST_ROLE_LEVEL) - def create_key( - self, - context: AuthedServiceContext, - oblv_key_name: str, - override_existing_key: bool = False, - ) -> Result[Ok, Err]: - """Domain Public/Private Key pair creation""" - # TODO 🟣 Check for permission after it is fully integrated - public_key, private_key = generate_oblv_key(oblv_key_name) - - if override_existing_key: - self.oblv_keys_stash.clear() - oblv_keys = OblvKeys(public_key=public_key, private_key=private_key) - - res = self.oblv_keys_stash.set(context.credentials, oblv_keys) - - if res.is_ok(): - return Ok( - "Successfully created a new public/private RSA key-pair on the domain node" - ) - return res.err() - - @service_method( - path="oblv.get_public_key", name="get_public_key", roles=GUEST_ROLE_LEVEL - ) - def get_public_key( - self, - context: AuthedServiceContext, - ) -> Result[Ok, Err]: - "Retrieves the public key present on the Domain Node." - - if len(self.oblv_keys_stash): - # retrieve the public key from the stash using the node's verify key - # as the public should be accessible to all the users - oblv_keys = self.oblv_keys_stash.get_all(context.node.verify_key) - if oblv_keys.is_ok(): - oblv_keys = oblv_keys.ok()[0] - else: - return oblv_keys.err() - - public_key_str = ( - encodebytes(oblv_keys.public_key).decode("UTF-8").replace("\n", "") - ) - return Ok(public_key_str) - - return Err( - "Public Key not present for the domain node, Kindly request the admin to create a new one" - ) - - def get_api_for( - self, - enclave_metadata: OblvMetadata, - signing_key: SyftSigningKey, - worker_name: str, - ) -> SyftAPI: - deployment_id = enclave_metadata.deployment_id - oblv_client = enclave_metadata.oblv_client - if not LOCAL_MODE: - if ( - deployment_id in OBLV_PROCESS_CACHE - and OBLV_PROCESS_CACHE[deployment_id][0].poll() is None - ): - port = OBLV_PROCESS_CACHE[deployment_id][1] - else: - # randomized port staring point, to quickly find free port - port_start = 3000 + random.randint(1, 10_000) # nosec - port = find_available_port( - host="127.0.0.1", port=port_start, search=True - ) - connection_string = f"http://127.0.0.1:{port}" - else: - port = os.getenv("OBLV_LOCALHOST_PORT", OBLV_LOCALHOST_PORT) - connection_string = f"http://127.0.0.1:{port}" - - # To identify if we are in docker container - if "CONTAINER_HOST" in os.environ: - connection_string = connection_string.replace( - "127.0.0.1", "host.docker.internal" - ) - - params = {"verify_key": str(signing_key.verify_key)} - req = make_request_to_enclave( - connection_string=connection_string + Routes.ROUTE_API.value, - deployment_id=deployment_id, - oblv_client=oblv_client, - oblv_keys_stash=self.oblv_keys_stash, - verify_key=signing_key.verify_key, - request_method=requests.get, - connection_port=port, - oblv_key_name=worker_name, - params=params, - ) - - obj = deserialize(req.content, from_bytes=True) - # TODO 🟣 Retrieve of signing key of user after permission is fully integrated - obj.signing_key = signing_key - obj.connection = HTTPConnection(url=connection_string) - return cast(SyftAPI, obj) - - @service_method( - path="oblv.send_user_code_inputs_to_enclave", - name="send_user_code_inputs_to_enclave", - roles=GUEST_ROLE_LEVEL, - ) - def send_user_code_inputs_to_enclave( - self, - context: AuthedServiceContext, - user_code_id: UID, - inputs: dict, - node_name: str, - ) -> Result[Ok, Err]: - if not context.node or not context.node.signing_key: - return Err(f"{type(context)} has no node") - - user_code_service = context.node.get_service("usercodeservice") - action_service = context.node.get_service("actionservice") - user_code = user_code_service.stash.get_by_uid( - context.node.signing_key.verify_key, uid=user_code_id - ) - if user_code.is_err(): - return SyftError( - message=f"Unable to find {user_code_id} in {type(user_code_service)}" - ) - user_code = user_code.ok() - reason: str = context.extra_kwargs.get("reason", "") - res = user_code.status.mutate( - value=(UserCodeStatus.APPROVED, reason), - node_name=node_name, - verify_key=context.credentials, - ) - if res.is_err(): - return res - user_code.status = res.ok() - user_code_service.update_code_state(context=context, code_item=user_code) - - root_context = context.as_root_context() - - if not action_service.exists(context=context, obj_id=user_code_id): - dict_object = ActionObject.from_obj({}) - dict_object.id = user_code_id - dict_object[str(context.credentials)] = inputs - root_context.extra_kwargs = {"has_result_read_permission": True} - action_service.set(root_context, dict_object) - - else: - res = action_service.get(uid=user_code_id, context=context) - if res.is_ok(): - dict_object = res.ok() - dict_object[str(context.credentials)] = inputs - action_service.set(root_context, dict_object) - else: - return res - - return Ok(Ok(True)) diff --git a/packages/syft/src/syft/gevent_patch.py b/packages/syft/src/syft/gevent_patch.py deleted file mode 100644 index c74b10a45b6..00000000000 --- a/packages/syft/src/syft/gevent_patch.py +++ /dev/null @@ -1,37 +0,0 @@ -# stdlib -import os - - -def str_to_bool(bool_str: str | None) -> bool: - result = False - bool_str = str(bool_str).lower() - if bool_str == "true" or bool_str == "1": - result = True - return result - - -GEVENT_MONKEYPATCH = str_to_bool(os.environ.get("GEVENT_MONKEYPATCH", "False")) - -# 🟡 TODO 30: Move this to where we manage the different concurrency modes later -# make sure its stable in containers and other run targets -# if GEVENT_MONKEYPATCH: -# monkey.patch_all(ssl=False) - - -def is_notebook() -> bool: - # third party - from IPython import get_ipython - - try: - shell = get_ipython().__class__.__name__ - if shell == "ZMQInteractiveShell": - return True # Jupyter notebook or qtconsole - elif shell == "TerminalInteractiveShell": - return False # Terminal running IPython - else: - return False # Other type (?) - except NameError: - return False # Probably standard Python interpreter - - -jupyter_notebook = is_notebook() diff --git a/packages/syft/src/syft/img/base64.py b/packages/syft/src/syft/img/base64.py deleted file mode 100644 index 9ebcbe6df29..00000000000 --- a/packages/syft/src/syft/img/base64.py +++ /dev/null @@ -1,12 +0,0 @@ -# stdlib -import base64 - - -# allows images to work with offline mode -def base64read(fname: str) -> str: - # relative - from .. import SYFT_PATH - - with open(SYFT_PATH / "img" / fname, "rb") as file: - res = base64.b64encode(file.read()) - return f"data:image/png;base64,{res.decode('utf-8')}" diff --git a/packages/syft/src/syft/img/small-grid-symbol-logo.png b/packages/syft/src/syft/img/small-grid-symbol-logo.png deleted file mode 100644 index b8956c2adec..00000000000 Binary files a/packages/syft/src/syft/img/small-grid-symbol-logo.png and /dev/null differ diff --git a/packages/syft/src/syft/node/domain.py b/packages/syft/src/syft/node/domain.py deleted file mode 100644 index 7070e2de756..00000000000 --- a/packages/syft/src/syft/node/domain.py +++ /dev/null @@ -1,8 +0,0 @@ -# relative -from ..serde.serializable import serializable -from .node import Node - - -@serializable(without=["queue_manager"]) -class Domain(Node): - pass diff --git a/packages/syft/src/syft/node/enclave.py b/packages/syft/src/syft/node/enclave.py deleted file mode 100644 index 7ef3505ae93..00000000000 --- a/packages/syft/src/syft/node/enclave.py +++ /dev/null @@ -1,11 +0,0 @@ -# relative -from ..abstract_node import NodeType -from ..serde.serializable import serializable -from .node import Node - - -@serializable() -class Enclave(Node): - def post_init(self) -> None: - self.node_type = NodeType.ENCLAVE - super().post_init() diff --git a/packages/syft/src/syft/node/gateway.py b/packages/syft/src/syft/node/gateway.py deleted file mode 100644 index fc8be3c975e..00000000000 --- a/packages/syft/src/syft/node/gateway.py +++ /dev/null @@ -1,11 +0,0 @@ -# relative -from ..abstract_node import NodeType -from ..serde.serializable import serializable -from .node import Node - - -@serializable() -class Gateway(Node): - def post_init(self) -> None: - self.node_type = NodeType.GATEWAY - super().post_init() diff --git a/packages/syft/src/syft/node/node.py b/packages/syft/src/syft/node/node.py deleted file mode 100644 index ec1f9b198aa..00000000000 --- a/packages/syft/src/syft/node/node.py +++ /dev/null @@ -1,1657 +0,0 @@ -# future -from __future__ import annotations - -# stdlib -from collections import OrderedDict -from collections.abc import Callable -from datetime import datetime -from functools import partial -import hashlib -import os -from pathlib import Path -import shutil -import subprocess # nosec -import tempfile -from time import sleep -import traceback -from typing import Any - -# third party -from nacl.signing import SigningKey -from result import Err -from result import Result -from typing_extensions import Self - -# relative -from .. import __version__ -from ..abstract_node import AbstractNode -from ..abstract_node import NodeSideType -from ..abstract_node import NodeType -from ..client.api import SignedSyftAPICall -from ..client.api import SyftAPI -from ..client.api import SyftAPICall -from ..client.api import SyftAPIData -from ..client.api import debox_signed_syftapicall_response -from ..client.client import SyftClient -from ..exceptions.exception import PySyftException -from ..external import OblvServiceProvider -from ..protocol.data_protocol import PROTOCOL_TYPE -from ..protocol.data_protocol import get_data_protocol -from ..service.action.action_object import Action -from ..service.action.action_object import ActionObject -from ..service.action.action_service import ActionService -from ..service.action.action_store import ActionStore -from ..service.action.action_store import DictActionStore -from ..service.action.action_store import MongoActionStore -from ..service.action.action_store import SQLiteActionStore -from ..service.blob_storage.service import BlobStorageService -from ..service.code.status_service import UserCodeStatusService -from ..service.code.user_code_service import UserCodeService -from ..service.code.user_code_stash import UserCodeStash -from ..service.code_history.code_history_service import CodeHistoryService -from ..service.context import AuthedServiceContext -from ..service.context import NodeServiceContext -from ..service.context import UnauthedServiceContext -from ..service.context import UserLoginCredentials -from ..service.data_subject.data_subject_member_service import DataSubjectMemberService -from ..service.data_subject.data_subject_service import DataSubjectService -from ..service.dataset.dataset_service import DatasetService -from ..service.enclave.enclave_service import EnclaveService -from ..service.job.job_service import JobService -from ..service.job.job_stash import Job -from ..service.job.job_stash import JobStash -from ..service.log.log_service import LogService -from ..service.metadata.metadata_service import MetadataService -from ..service.metadata.node_metadata import NodeMetadataV3 -from ..service.network.network_service import NetworkService -from ..service.notification.notification_service import NotificationService -from ..service.notifier.notifier_service import NotifierService -from ..service.object_search.migration_state_service import MigrateStateService -from ..service.output.output_service import OutputService -from ..service.policy.policy_service import PolicyService -from ..service.project.project_service import ProjectService -from ..service.queue.base_queue import AbstractMessageHandler -from ..service.queue.base_queue import QueueConsumer -from ..service.queue.base_queue import QueueProducer -from ..service.queue.queue import APICallMessageHandler -from ..service.queue.queue import QueueManager -from ..service.queue.queue_service import QueueService -from ..service.queue.queue_stash import ActionQueueItem -from ..service.queue.queue_stash import QueueItem -from ..service.queue.queue_stash import QueueStash -from ..service.queue.zmq_queue import QueueConfig -from ..service.queue.zmq_queue import ZMQClientConfig -from ..service.queue.zmq_queue import ZMQQueueConfig -from ..service.request.request_service import RequestService -from ..service.response import SyftError -from ..service.service import AbstractService -from ..service.service import ServiceConfigRegistry -from ..service.service import UserServiceConfigRegistry -from ..service.settings.settings import NodeSettingsV2 -from ..service.settings.settings_service import SettingsService -from ..service.settings.settings_stash import SettingsStash -from ..service.sync.sync_service import SyncService -from ..service.user.user import User -from ..service.user.user import UserCreate -from ..service.user.user_roles import ServiceRole -from ..service.user.user_service import UserService -from ..service.user.user_stash import UserStash -from ..service.veilid import VeilidServiceProvider -from ..service.worker.image_registry_service import SyftImageRegistryService -from ..service.worker.utils import DEFAULT_WORKER_IMAGE_TAG -from ..service.worker.utils import DEFAULT_WORKER_POOL_NAME -from ..service.worker.utils import create_default_image -from ..service.worker.worker_image_service import SyftWorkerImageService -from ..service.worker.worker_pool import WorkerPool -from ..service.worker.worker_pool_service import SyftWorkerPoolService -from ..service.worker.worker_pool_stash import SyftWorkerPoolStash -from ..service.worker.worker_service import WorkerService -from ..service.worker.worker_stash import WorkerStash -from ..store.blob_storage import BlobStorageConfig -from ..store.blob_storage.on_disk import OnDiskBlobStorageClientConfig -from ..store.blob_storage.on_disk import OnDiskBlobStorageConfig -from ..store.dict_document_store import DictStoreConfig -from ..store.document_store import StoreConfig -from ..store.linked_obj import LinkedObject -from ..store.mongo_document_store import MongoStoreConfig -from ..store.sqlite_document_store import SQLiteStoreClientConfig -from ..store.sqlite_document_store import SQLiteStoreConfig -from ..types.syft_object import SYFT_OBJECT_VERSION_2 -from ..types.syft_object import SyftObject -from ..types.uid import UID -from ..util.experimental_flags import flags -from ..util.telemetry import instrument -from ..util.util import get_env -from ..util.util import get_queue_address -from ..util.util import random_name -from ..util.util import str_to_bool -from ..util.util import thread_ident -from .credentials import SyftSigningKey -from .credentials import SyftVerifyKey -from .worker_settings import WorkerSettings - -# if user code needs to be serded and its not available we can call this to refresh -# the code for a specific node UID and thread -CODE_RELOADER: dict[int, Callable] = {} - - -NODE_PRIVATE_KEY = "NODE_PRIVATE_KEY" -NODE_UID = "NODE_UID" -NODE_TYPE = "NODE_TYPE" -NODE_NAME = "NODE_NAME" -NODE_SIDE_TYPE = "NODE_SIDE_TYPE" - -DEFAULT_ROOT_EMAIL = "DEFAULT_ROOT_EMAIL" -DEFAULT_ROOT_USERNAME = "DEFAULT_ROOT_USERNAME" -DEFAULT_ROOT_PASSWORD = "DEFAULT_ROOT_PASSWORD" # nosec - - -def get_private_key_env() -> str | None: - return get_env(NODE_PRIVATE_KEY) - - -def get_node_type() -> str | None: - return get_env(NODE_TYPE, "domain") - - -def get_node_name() -> str | None: - return get_env(NODE_NAME, None) - - -def get_node_side_type() -> str | None: - return get_env(NODE_SIDE_TYPE, "high") - - -def get_node_uid_env() -> str | None: - return get_env(NODE_UID) - - -def get_default_root_email() -> str | None: - return get_env(DEFAULT_ROOT_EMAIL, "info@openmined.org") - - -def get_default_root_username() -> str | None: - return get_env(DEFAULT_ROOT_USERNAME, "Jane Doe") - - -def get_default_root_password() -> str | None: - return get_env(DEFAULT_ROOT_PASSWORD, "changethis") # nosec - - -def get_dev_mode() -> bool: - return str_to_bool(get_env("DEV_MODE", "False")) - - -def get_enable_warnings() -> bool: - return str_to_bool(get_env("ENABLE_WARNINGS", "False")) - - -def get_container_host() -> str | None: - return get_env("CONTAINER_HOST") - - -def get_default_worker_image() -> str | None: - return get_env("DEFAULT_WORKER_POOL_IMAGE") - - -def get_default_worker_pool_name() -> str | None: - return get_env("DEFAULT_WORKER_POOL_NAME", DEFAULT_WORKER_POOL_NAME) - - -def get_default_worker_pool_count(node: Node) -> int: - return int( - get_env( - "DEFAULT_WORKER_POOL_COUNT", node.queue_config.client_config.n_consumers - ) - ) - - -def in_kubernetes() -> bool: - return get_container_host() == "k8s" - - -def get_venv_packages() -> str: - res = subprocess.getoutput( - "pip list --format=freeze", - ) - return res - - -def get_syft_worker() -> bool: - return str_to_bool(get_env("SYFT_WORKER", "false")) - - -def get_k8s_pod_name() -> str | None: - return get_env("K8S_POD_NAME") - - -def get_syft_worker_uid() -> str | None: - is_worker = get_syft_worker() - pod_name = get_k8s_pod_name() - uid = get_env("SYFT_WORKER_UID") - # if uid is empty is a K8S worker, generate a uid from the pod name - if (not uid) and is_worker and pod_name: - uid = str(UID.with_seed(pod_name)) - return uid - - -signing_key_env = get_private_key_env() -node_uid_env = get_node_uid_env() - -default_root_email = get_default_root_email() -default_root_username = get_default_root_username() -default_root_password = get_default_root_password() - - -class AuthNodeContextRegistry: - __node_context_registry__: dict[str, NodeServiceContext] = OrderedDict() - - @classmethod - def set_node_context( - cls, - node_uid: UID | str, - context: NodeServiceContext, - user_verify_key: SyftVerifyKey | str, - ) -> None: - if isinstance(node_uid, str): - node_uid = UID.from_string(node_uid) - - if isinstance(user_verify_key, str): - user_verify_key = SyftVerifyKey.from_string(user_verify_key) - - key = cls._get_key(node_uid=node_uid, user_verify_key=user_verify_key) - - cls.__node_context_registry__[key] = context - - @staticmethod - def _get_key(node_uid: UID, user_verify_key: SyftVerifyKey) -> str: - return "-".join(str(x) for x in (node_uid, user_verify_key)) - - @classmethod - def auth_context_for_user( - cls, - node_uid: UID, - user_verify_key: SyftVerifyKey, - ) -> AuthedServiceContext | None: - key = cls._get_key(node_uid=node_uid, user_verify_key=user_verify_key) - return cls.__node_context_registry__.get(key) - - -@instrument -class Node(AbstractNode): - signing_key: SyftSigningKey | None - required_signed_calls: bool = True - packages: str - - def __init__( - self, - *, # Trasterisk - name: str | None = None, - id: UID | None = None, - signing_key: SyftSigningKey | SigningKey | None = None, - action_store_config: StoreConfig | None = None, - document_store_config: StoreConfig | None = None, - root_email: str | None = default_root_email, - root_username: str | None = default_root_username, - root_password: str | None = default_root_password, - processes: int = 0, - is_subprocess: bool = False, - node_type: str | NodeType = NodeType.DOMAIN, - local_db: bool = False, - reset: bool = False, - blob_storage_config: BlobStorageConfig | None = None, - queue_config: QueueConfig | None = None, - queue_port: int | None = None, - n_consumers: int = 0, - create_producer: bool = False, - thread_workers: bool = False, - node_side_type: str | NodeSideType = NodeSideType.HIGH_SIDE, - enable_warnings: bool = False, - dev_mode: bool = False, - migrate: bool = False, - in_memory_workers: bool = True, - smtp_username: str | None = None, - smtp_password: str | None = None, - email_sender: str | None = None, - smtp_port: int | None = None, - smtp_host: str | None = None, - ): - # 🟡 TODO 22: change our ENV variable format and default init args to make this - # less horrible or add some convenience functions - self.dev_mode = dev_mode or get_dev_mode() - self.id = UID.from_string(node_uid_env) if node_uid_env else (id or UID()) - self.packages = "" - self.processes = processes - self.is_subprocess = is_subprocess - self.name = name or random_name() - self.enable_warnings = enable_warnings - self.in_memory_workers = in_memory_workers - self.node_type = NodeType(node_type) - self.node_side_type = NodeSideType(node_side_type) - self.client_cache: dict = {} - self.peer_client_cache: dict = {} - - if isinstance(node_type, str): - node_type = NodeType(node_type) - self.node_type = node_type - - if isinstance(node_side_type, str): - node_side_type = NodeSideType(node_side_type) - self.node_side_type = node_side_type - - skey = None - if signing_key_env: - skey = SyftSigningKey.from_string(signing_key_env) - elif isinstance(signing_key, SigningKey): - skey = SyftSigningKey(signing_key=signing_key) - else: - skey = signing_key - self.signing_key = skey or SyftSigningKey.generate() - - self.queue_config = self.create_queue_config( - n_consumers=n_consumers, - create_producer=create_producer, - thread_workers=thread_workers, - queue_port=queue_port, - queue_config=queue_config, - ) - - # must call before initializing stores - if reset: - self.remove_temp_dir() - - use_sqlite = local_db or (processes > 0 and not is_subprocess) - document_store_config = document_store_config or self.get_default_store( - use_sqlite=use_sqlite - ) - action_store_config = action_store_config or self.get_default_store( - use_sqlite=use_sqlite - ) - self.init_stores( - action_store_config=action_store_config, - document_store_config=document_store_config, - ) - - # construct services only after init stores - self._construct_services() - - create_admin_new( # nosec B106 - name=root_username, - email=root_email, - password=root_password, - node=self, - ) - - NotifierService.init_notifier( - node=self, - email_password=smtp_password, - email_username=smtp_username, - email_sender=email_sender, - smtp_port=smtp_port, - smtp_host=smtp_host, - ) - - self.post_init() - - self.create_initial_settings(admin_email=root_email) - - self.init_queue_manager(queue_config=self.queue_config) - - self.init_blob_storage(config=blob_storage_config) - - # Migrate data before any operation on db - if migrate: - self.find_and_migrate_data() - - NodeRegistry.set_node_for(self.id, self) - - @property - def runs_in_docker(self) -> bool: - path = "/proc/self/cgroup" - return ( - os.path.exists("/.dockerenv") - or os.path.isfile(path) - and any("docker" in line for line in open(path)) - ) - - def get_default_store(self, use_sqlite: bool) -> StoreConfig: - if use_sqlite: - return SQLiteStoreConfig( - client_config=SQLiteStoreClientConfig( - filename=f"{self.id}.sqlite", - path=self.get_temp_dir("db"), - ) - ) - return DictStoreConfig() - - def init_blob_storage(self, config: BlobStorageConfig | None = None) -> None: - if config is None: - client_config = OnDiskBlobStorageClientConfig( - base_directory=self.get_temp_dir("blob") - ) - config_ = OnDiskBlobStorageConfig(client_config=client_config) - else: - config_ = config - self.blob_store_config = config_ - self.blob_storage_client = config_.client_type(config=config_.client_config) - - # relative - from ..store.blob_storage.seaweedfs import SeaweedFSConfig - - if isinstance(config, SeaweedFSConfig) and self.signing_key: - blob_storage_service = self.get_service(BlobStorageService) - remote_profiles = blob_storage_service.remote_profile_stash.get_all( - credentials=self.signing_key.verify_key, has_permission=True - ).ok() - for remote_profile in remote_profiles: - self.blob_store_config.client_config.remote_profiles[ - remote_profile.profile_name - ] = remote_profile - - def stop(self) -> None: - for consumer_list in self.queue_manager.consumers.values(): - for c in consumer_list: - c.close() - for p in self.queue_manager.producers.values(): - p.close() - - NodeRegistry.remove_node(self.id) - - def close(self) -> None: - self.stop() - - def cleanup(self) -> None: - self.stop() - self.remove_temp_dir() - - def create_queue_config( - self, - n_consumers: int, - create_producer: bool, - thread_workers: bool, - queue_port: int | None, - queue_config: QueueConfig | None, - ) -> QueueConfig: - if queue_config: - queue_config_ = queue_config - elif queue_port is not None or n_consumers > 0 or create_producer: - if not create_producer and queue_port is None: - print("No queue port defined to bind consumers.") - queue_config_ = ZMQQueueConfig( - client_config=ZMQClientConfig( - create_producer=create_producer, - queue_port=queue_port, - n_consumers=n_consumers, - ), - thread_workers=thread_workers, - ) - else: - queue_config_ = ZMQQueueConfig() - - return queue_config_ - - def init_queue_manager(self, queue_config: QueueConfig) -> None: - MessageHandlers = [APICallMessageHandler] - if self.is_subprocess: - return None - - self.queue_manager = QueueManager(config=queue_config) - for message_handler in MessageHandlers: - queue_name = message_handler.queue_name - # client config - if getattr(queue_config.client_config, "create_producer", True): - context = AuthedServiceContext( - node=self, - credentials=self.verify_key, - role=ServiceRole.ADMIN, - ) - producer: QueueProducer = self.queue_manager.create_producer( - queue_name=queue_name, - queue_stash=self.queue_stash, - context=context, - worker_stash=self.worker_stash, - ) - producer.run() - address = producer.address - else: - port = queue_config.client_config.queue_port - if port is not None: - address = get_queue_address(port) - else: - address = None - - if address is None and queue_config.client_config.n_consumers > 0: - raise ValueError("address unknown for consumers") - - service_name = queue_config.client_config.consumer_service - - if not service_name: - # Create consumers for default worker pool - create_default_worker_pool(self) - else: - # Create consumer for given worker pool - syft_worker_uid = get_syft_worker_uid() - print( - f"Running as consumer with uid={syft_worker_uid} service={service_name}" - ) - - if syft_worker_uid: - self.add_consumer_for_service( - service_name=service_name, - syft_worker_id=UID(syft_worker_uid), - address=address, - message_handler=message_handler, - ) - - def add_consumer_for_service( - self, - service_name: str, - syft_worker_id: UID, - address: str, - message_handler: type[AbstractMessageHandler] = APICallMessageHandler, - ) -> None: - consumer: QueueConsumer = self.queue_manager.create_consumer( - message_handler, - address=address, - service_name=service_name, - worker_stash=self.worker_stash, - syft_worker_id=syft_worker_id, - ) - consumer.run() - - @classmethod - def named( - cls, - *, # Trasterisk - name: str, - processes: int = 0, - reset: bool = False, - local_db: bool = False, - node_type: str | NodeType = NodeType.DOMAIN, - node_side_type: str | NodeSideType = NodeSideType.HIGH_SIDE, - enable_warnings: bool = False, - n_consumers: int = 0, - thread_workers: bool = False, - create_producer: bool = False, - queue_port: int | None = None, - dev_mode: bool = False, - migrate: bool = False, - in_memory_workers: bool = True, - ) -> Self: - uid = UID.with_seed(name) - name_hash = hashlib.sha256(name.encode("utf8")).digest() - key = SyftSigningKey(signing_key=SigningKey(name_hash)) - blob_storage_config = None - - node_type = NodeType(node_type) - node_side_type = NodeSideType(node_side_type) - - return cls( - name=name, - id=uid, - signing_key=key, - processes=processes, - local_db=local_db, - node_type=node_type, - node_side_type=node_side_type, - enable_warnings=enable_warnings, - blob_storage_config=blob_storage_config, - queue_port=queue_port, - n_consumers=n_consumers, - thread_workers=thread_workers, - create_producer=create_producer, - dev_mode=dev_mode, - migrate=migrate, - in_memory_workers=in_memory_workers, - reset=reset, - ) - - def is_root(self, credentials: SyftVerifyKey) -> bool: - return credentials == self.verify_key - - @property - def root_client(self) -> SyftClient: - # relative - from ..client.client import PythonConnection - - connection = PythonConnection(node=self) - client_type = connection.get_client_type() - if isinstance(client_type, SyftError): - return client_type - root_client = client_type(connection=connection, credentials=self.signing_key) - if root_client.api.refresh_api_callback is not None: - root_client.api.refresh_api_callback() - return root_client - - def _find_klasses_pending_for_migration( - self, object_types: list[SyftObject] - ) -> list[SyftObject]: - context = AuthedServiceContext( - node=self, - credentials=self.verify_key, - role=ServiceRole.ADMIN, - ) - migration_state_service = self.get_service(MigrateStateService) - - klasses_to_be_migrated = [] - - for object_type in object_types: - canonical_name = object_type.__canonical_name__ - object_version = object_type.__version__ - - migration_state = migration_state_service.get_state(context, canonical_name) - if isinstance(migration_state, SyftError): - raise Exception( - f"Failed to get migration state for {canonical_name}. Error: {migration_state}" - ) - if ( - migration_state is not None - and migration_state.current_version != migration_state.latest_version - ): - klasses_to_be_migrated.append(object_type) - else: - migration_state_service.register_migration_state( - context, - current_version=object_version, - canonical_name=canonical_name, - ) - - return klasses_to_be_migrated - - def find_and_migrate_data(self) -> None: - # Track all object type that need migration for document store - context = AuthedServiceContext( - node=self, - credentials=self.verify_key, - role=ServiceRole.ADMIN, - ) - document_store_object_types = [ - partition.settings.object_type - for partition in self.document_store.partitions.values() - ] - - object_pending_migration = self._find_klasses_pending_for_migration( - object_types=document_store_object_types - ) - - if object_pending_migration: - print( - "Object in Document Store that needs migration: ", - object_pending_migration, - ) - - # Migrate data for objects in document store - for object_type in object_pending_migration: - canonical_name = object_type.__canonical_name__ - object_partition = self.document_store.partitions.get(canonical_name) - if object_partition is None: - continue - - print(f"Migrating data for: {canonical_name} table.") - migration_status = object_partition.migrate_data( - to_klass=object_type, context=context - ) - if migration_status.is_err(): - raise Exception( - f"Failed to migrate data for {canonical_name}. Error: {migration_status.err()}" - ) - - # Track all object types from action store - action_object_types = [Action, ActionObject] - action_object_types.extend(ActionObject.__subclasses__()) - action_object_pending_migration = self._find_klasses_pending_for_migration( - action_object_types - ) - - if action_object_pending_migration: - print( - "Object in Action Store that needs migration: ", - action_object_pending_migration, - ) - - # Migrate data for objects in action store - for object_type in action_object_pending_migration: - canonical_name = object_type.__canonical_name__ - - migration_status = self.action_store.migrate_data( - to_klass=object_type, credentials=self.verify_key - ) - if migration_status.is_err(): - raise Exception( - f"Failed to migrate data for {canonical_name}. Error: {migration_status.err()}" - ) - print("Data Migrated to latest version !!!") - - @property - def guest_client(self) -> SyftClient: - return self.get_guest_client() - - @property - def current_protocol(self) -> str | int: - data_protocol = get_data_protocol() - return data_protocol.latest_version - - def get_guest_client(self, verbose: bool = True) -> SyftClient: - # relative - from ..client.client import PythonConnection - - connection = PythonConnection(node=self) - if verbose and self.node_side_type: - message: str = ( - f"Logged into <{self.name}: {self.node_side_type.value.capitalize()} " - ) - if self.node_type: - message += f"side {self.node_type.value.capitalize()} > as GUEST" - print(message) - - client_type = connection.get_client_type() - if isinstance(client_type, SyftError): - return client_type - - guest_client = client_type( - connection=connection, credentials=SyftSigningKey.generate() - ) - if guest_client.api.refresh_api_callback is not None: - guest_client.api.refresh_api_callback() - return guest_client - - def __repr__(self) -> str: - service_string = "" - if not self.is_subprocess: - services = [] - for service in self.services: - services.append(service.__name__) - service_string = ", ".join(sorted(services)) - service_string = f"\n\nServices:\n{service_string}" - return f"{type(self).__name__}: {self.name} - {self.id} - {self.node_type}{service_string}" - - def post_init(self) -> None: - context = AuthedServiceContext( - node=self, credentials=self.verify_key, role=ServiceRole.ADMIN - ) - AuthNodeContextRegistry.set_node_context( - node_uid=self.id, user_verify_key=self.verify_key, context=context - ) - - if "usercodeservice" in self.service_path_map: - user_code_service = self.get_service(UserCodeService) - user_code_service.load_user_code(context=context) - - def reload_user_code() -> None: - user_code_service.load_user_code(context=context) - - ti = thread_ident() - if ti is not None: - CODE_RELOADER[ti] = reload_user_code - - def init_stores( - self, - document_store_config: StoreConfig, - action_store_config: StoreConfig, - ) -> None: - # We add the python id of the current node in order - # to create one connection per Node object in MongoClientCache - # so that we avoid closing the connection from a - # different thread through the garbage collection - if isinstance(document_store_config, MongoStoreConfig): - document_store_config.client_config.node_obj_python_id = id(self) - - self.document_store_config = document_store_config - self.document_store = document_store_config.store_type( - node_uid=self.id, - root_verify_key=self.verify_key, - store_config=document_store_config, - ) - - if isinstance(action_store_config, SQLiteStoreConfig): - self.action_store: ActionStore = SQLiteActionStore( - node_uid=self.id, - store_config=action_store_config, - root_verify_key=self.verify_key, - ) - elif isinstance(action_store_config, MongoStoreConfig): - # We add the python id of the current node in order - # to create one connection per Node object in MongoClientCache - # so that we avoid closing the connection from a - # different thread through the garbage collection - action_store_config.client_config.node_obj_python_id = id(self) - - self.action_store = MongoActionStore( - node_uid=self.id, - root_verify_key=self.verify_key, - store_config=action_store_config, - ) - else: - self.action_store = DictActionStore( - node_uid=self.id, - root_verify_key=self.verify_key, - ) - - self.action_store_config = action_store_config - self.queue_stash = QueueStash(store=self.document_store) - - @property - def job_stash(self) -> JobStash: - return self.get_service("jobservice").stash - - @property - def worker_stash(self) -> WorkerStash: - return self.get_service("workerservice").stash - - def _construct_services(self) -> None: - service_path_map: dict[str, AbstractService] = {} - initialized_services: list[AbstractService] = [] - - # A dict of service and init kwargs. - # - "svc" expects a callable (class or function) - # - The callable must return AbstractService or None - # - "store" expects a store type - # - By default all services get the document store - # - Pass a custom "store" to override this - default_services: list[dict] = [ - {"svc": ActionService, "store": self.action_store}, - {"svc": UserService}, - {"svc": WorkerService}, - {"svc": SettingsService}, - {"svc": DatasetService}, - {"svc": UserCodeService}, - {"svc": LogService}, - {"svc": RequestService}, - {"svc": QueueService}, - {"svc": JobService}, - {"svc": DataSubjectService}, - {"svc": NetworkService}, - {"svc": PolicyService}, - {"svc": NotifierService}, - {"svc": NotificationService}, - {"svc": DataSubjectMemberService}, - {"svc": ProjectService}, - {"svc": EnclaveService}, - {"svc": CodeHistoryService}, - {"svc": MetadataService}, - {"svc": BlobStorageService}, - {"svc": MigrateStateService}, - {"svc": SyftWorkerImageService}, - {"svc": SyftWorkerPoolService}, - {"svc": SyftImageRegistryService}, - {"svc": SyncService}, - {"svc": OutputService}, - {"svc": UserCodeStatusService}, - {"svc": VeilidServiceProvider}, # this is lazy - {"svc": OblvServiceProvider}, # this is lazy - ] - - for svc_kwargs in default_services: - ServiceCls = svc_kwargs.pop("svc") - svc_kwargs.setdefault("store", self.document_store) - - svc_instance = ServiceCls(**svc_kwargs) - if not svc_instance: - continue - elif not isinstance(svc_instance, AbstractService): - raise ValueError( - f"Service {ServiceCls.__name__} must be an instance of AbstractService" - ) - - service_path_map[ServiceCls.__name__.lower()] = svc_instance - initialized_services.append(ServiceCls) - - self.services = initialized_services - self.service_path_map = service_path_map - - def get_service_method(self, path_or_func: str | Callable) -> Callable: - if callable(path_or_func): - path_or_func = path_or_func.__qualname__ - return self._get_service_method_from_path(path_or_func) - - def get_service(self, path_or_func: str | Callable) -> AbstractService: - if callable(path_or_func): - path_or_func = path_or_func.__qualname__ - return self._get_service_from_path(path_or_func) - - def _get_service_from_path(self, path: str) -> AbstractService: - path_list = path.split(".") - if len(path_list) > 1: - _ = path_list.pop() - service_name = path_list.pop() - return self.service_path_map[service_name.lower()] - - def _get_service_method_from_path(self, path: str) -> Callable: - path_list = path.split(".") - method_name = path_list.pop() - service_obj = self._get_service_from_path(path=path) - - return getattr(service_obj, method_name) - - def get_temp_dir(self, dir_name: str = "") -> Path: - """ - Get a temporary directory unique to the node. - Provide all dbs, blob dirs, and locks using this directory. - """ - root = os.getenv("SYFT_TEMP_ROOT", "syft") - p = Path(tempfile.gettempdir(), root, str(self.id), dir_name) - p.mkdir(parents=True, exist_ok=True) - return p - - def remove_temp_dir(self) -> None: - """ - Remove the temporary directory for this node. - """ - rootdir = self.get_temp_dir() - if rootdir.exists(): - shutil.rmtree(rootdir, ignore_errors=True) - - @property - def settings(self) -> NodeSettingsV2: - settings_stash = SettingsStash(store=self.document_store) - if self.signing_key is None: - raise ValueError(f"{self} has no signing key") - settings = settings_stash.get_all(self.signing_key.verify_key) - if settings.is_ok() and len(settings.ok()) > 0: - settings_data = settings.ok()[0] - return settings_data - - @property - def metadata(self) -> NodeMetadataV3: - name = "" - organization = "" - description = "" - show_warnings = self.enable_warnings - settings_data = self.settings - name = settings_data.name - organization = settings_data.organization - description = settings_data.description - show_warnings = settings_data.show_warnings - node_type = self.node_type.value if self.node_type else "" - node_side_type = self.node_side_type.value if self.node_side_type else "" - - return NodeMetadataV3( - name=name, - id=self.id, - verify_key=self.verify_key, - highest_version=SYFT_OBJECT_VERSION_2, - lowest_version=SYFT_OBJECT_VERSION_2, - syft_version=__version__, - description=description, - organization=organization, - node_type=node_type, - node_side_type=node_side_type, - show_warnings=show_warnings, - ) - - @property - def icon(self) -> str: - return "🦾" - - @property - def verify_key(self) -> SyftVerifyKey: - if self.signing_key is None: - raise ValueError(f"{self} has no signing key") - return self.signing_key.verify_key - - def __hash__(self) -> int: - return hash(self.id) - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, type(self)): - return False - - if self.id != other.id: - return False - - return True - - def await_future( - self, credentials: SyftVerifyKey, uid: UID - ) -> QueueItem | None | SyftError: - # stdlib - - # relative - from ..service.queue.queue import Status - - while True: - result = self.queue_stash.pop_on_complete(credentials, uid) - if not result.is_ok(): - return result.err() - else: - res = result.ok() - if res.status == Status.COMPLETED: - return res - sleep(0.1) - - def resolve_future( - self, credentials: SyftVerifyKey, uid: UID - ) -> QueueItem | None | SyftError: - result = self.queue_stash.pop_on_complete(credentials, uid) - - if result.is_ok(): - queue_obj = result.ok() - queue_obj._set_obj_location_( - node_uid=self.id, - credentials=credentials, - ) - return queue_obj - return result.err() - - def forward_message( - self, api_call: SyftAPICall | SignedSyftAPICall - ) -> Result[QueueItem | SyftObject, Err]: - node_uid = api_call.message.node_uid - if "networkservice" not in self.service_path_map: - return SyftError( - message=( - "Node has no network service so we can't " - f"forward this message to {node_uid}" - ) - ) - - client = None - - network_service = self.get_service(NetworkService) - peer = network_service.stash.get_by_uid(self.verify_key, node_uid) - - if peer.is_ok() and peer.ok(): - peer = peer.ok() - - # Since we have several routes to a peer - # we need to cache the client for a given node_uid along with the route - peer_cache_key = hash(node_uid) + hash(peer.pick_highest_priority_route()) - - if peer_cache_key in self.peer_client_cache: - client = self.peer_client_cache[peer_cache_key] - else: - context = AuthedServiceContext( - node=self, credentials=api_call.credentials - ) - client = peer.client_with_context(context=context) - self.peer_client_cache[peer_cache_key] = client - - if client: - message: SyftAPICall = api_call.message - if message.path == "metadata": - result = client.metadata - elif message.path == "login": - result = client.connection.login(**message.kwargs) - elif message.path == "register": - result = client.connection.register(**message.kwargs) - elif message.path == "api": - result = client.connection.get_api(**message.kwargs) - else: - signed_result = client.connection.make_call(api_call) - result = debox_signed_syftapicall_response(signed_result=signed_result) - - # relative - from ..store.blob_storage import BlobRetrievalByURL - - # In the case of blob storage, the gateway downloads the result and then passes it to - # the proxy client - if isinstance(result, BlobRetrievalByURL): - blob_route = client.api.connection.to_blob_route( - result.url.url_path - ) - result.url = blob_route - final_res = result.read() - return final_res - - return result - - return SyftError(message=(f"Node has no route to {node_uid}")) - - def get_role_for_credentials(self, credentials: SyftVerifyKey) -> ServiceRole: - role = self.get_service("userservice").get_role_for_credentials( - credentials=credentials - ) - return role - - def handle_api_call( - self, - api_call: SyftAPICall | SignedSyftAPICall, - job_id: UID | None = None, - check_call_location: bool = True, - ) -> Result[SignedSyftAPICall, Err]: - # Get the result - result = self.handle_api_call_with_unsigned_result( - api_call, job_id=job_id, check_call_location=check_call_location - ) - # Sign the result - signed_result = SyftAPIData(data=result).sign(self.signing_key) - - return signed_result - - def handle_api_call_with_unsigned_result( - self, - api_call: SyftAPICall | SignedSyftAPICall, - job_id: UID | None = None, - check_call_location: bool = True, - ) -> Result | QueueItem | SyftObject | SyftError: - if self.required_signed_calls and isinstance(api_call, SyftAPICall): - return SyftError( - message=f"You sent a {type(api_call)}. This node requires SignedSyftAPICall." - ) - else: - if not api_call.is_valid: - return SyftError(message="Your message signature is invalid") - - if api_call.message.node_uid != self.id and check_call_location: - return self.forward_message(api_call=api_call) - if api_call.message.path == "queue": - return self.resolve_future( - credentials=api_call.credentials, uid=api_call.message.kwargs["uid"] - ) - - if api_call.message.path == "metadata": - return self.metadata - - result = None - is_blocking = api_call.message.blocking - - if is_blocking or self.is_subprocess: - credentials: SyftVerifyKey = api_call.credentials - api_call = api_call.message - - role = self.get_role_for_credentials(credentials=credentials) - context = AuthedServiceContext( - node=self, credentials=credentials, role=role, job_id=job_id - ) - AuthNodeContextRegistry.set_node_context(self.id, context, credentials) - - user_config_registry = UserServiceConfigRegistry.from_role(role) - - if api_call.path not in user_config_registry: - if ServiceConfigRegistry.path_exists(api_call.path): - return SyftError( - message=f"As a `{role}`, " - f"you have no access to: {api_call.path}" - ) - else: - return SyftError( - message=f"API call not in registered services: {api_call.path}" - ) - - _private_api_path = user_config_registry.private_path_for(api_call.path) - method = self.get_service_method(_private_api_path) - try: - result = method(context, *api_call.args, **api_call.kwargs) - except PySyftException as e: - return e.handle() - except Exception: - result = SyftError( - message=f"Exception calling {api_call.path}. {traceback.format_exc()}" - ) - else: - return self.add_api_call_to_queue(api_call) - return result - - def add_action_to_queue( - self, - action: Action, - credentials: SyftVerifyKey, - parent_job_id: UID | None = None, - has_execute_permissions: bool = False, - worker_pool_name: str | None = None, - ) -> Job | SyftError: - job_id = UID() - task_uid = UID() - worker_settings = WorkerSettings.from_node(node=self) - - # Extract worker pool id from user code - if action.user_code_id is not None: - result = self.user_code_stash.get_by_uid( - credentials=credentials, uid=action.user_code_id - ) - - # If result is Ok, then user code object exists - if result.is_ok() and result.ok() is not None: - user_code = result.ok() - worker_pool_name = user_code.worker_pool_name - - # If worker pool id is not set, then use default worker pool - # Else, get the worker pool for given uid - if worker_pool_name is None: - worker_pool = self.get_default_worker_pool() - else: - result = self.pool_stash.get_by_name(credentials, worker_pool_name) - if result.is_err(): - return SyftError(message=f"{result.err()}") - worker_pool = result.ok() - - # Create a Worker pool reference object - worker_pool_ref = LinkedObject.from_obj( - worker_pool, - service_type=SyftWorkerPoolService, - node_uid=self.id, - ) - - queue_item = ActionQueueItem( - id=task_uid, - node_uid=self.id, - syft_client_verify_key=credentials, - syft_node_location=self.id, - job_id=job_id, - worker_settings=worker_settings, - args=[], - kwargs={"action": action}, - has_execute_permissions=has_execute_permissions, - worker_pool=worker_pool_ref, # set worker pool reference as part of queue item - ) - return self.add_queueitem_to_queue( - queue_item, credentials, action, parent_job_id - ) - - def add_queueitem_to_queue( - self, - queue_item: QueueItem, - credentials: SyftVerifyKey, - action: Action | None = None, - parent_job_id: UID | None = None, - ) -> Job | SyftError: - log_id = UID() - role = self.get_role_for_credentials(credentials=credentials) - context = AuthedServiceContext(node=self, credentials=credentials, role=role) - - result_obj = ActionObject.empty() - if action is not None: - result_obj = ActionObject.obj_not_ready(id=action.result_id) - result_obj.id = action.result_id - result_obj.syft_resolved = False - result_obj.syft_node_location = self.id - result_obj.syft_client_verify_key = credentials - - action_service = self.get_service("actionservice") - - if not action_service.store.exists(uid=action.result_id): - result = action_service.set_result_to_store( - result_action_object=result_obj, - context=context, - ) - if result.is_err(): - return result.err() - - job = Job( - id=queue_item.job_id, - result=result_obj, - node_uid=self.id, - syft_client_verify_key=credentials, - syft_node_location=self.id, - log_id=log_id, - parent_job_id=parent_job_id, - action=action, - ) - - # 🟡 TODO 36: Needs distributed lock - self.queue_stash.set_placeholder(credentials, queue_item) - self.job_stash.set(credentials, job) - - log_service = self.get_service("logservice") - - result = log_service.add(context, log_id) - if isinstance(result, SyftError): - return result - return job - - def _get_existing_user_code_jobs( - self, context: AuthedServiceContext, user_code_id: UID - ) -> list[Job] | SyftError: - job_service = self.get_service("jobservice") - return job_service.get_by_user_code_id( - context=context, user_code_id=user_code_id - ) - - def _is_usercode_call_on_owned_kwargs( - self, context: AuthedServiceContext, api_call: SyftAPICall - ) -> bool: - if api_call.path != "code.call": - return False - user_code_service = self.get_service("usercodeservice") - return user_code_service.is_execution_on_owned_args(api_call.kwargs, context) - - def add_api_call_to_queue( - self, api_call: SyftAPICall, parent_job_id: UID | None = None - ) -> Job | SyftError: - unsigned_call = api_call - if isinstance(api_call, SignedSyftAPICall): - unsigned_call = api_call.message - - credentials = api_call.credentials - context = AuthedServiceContext( - node=self, - credentials=credentials, - role=self.get_role_for_credentials(credentials=credentials), - ) - - is_user_code = unsigned_call.path == "code.call" - - service_str, method_str = unsigned_call.path.split(".") - - action = None - if is_user_code: - action = Action.from_api_call(unsigned_call) - - is_usercode_call_on_owned_kwargs = self._is_usercode_call_on_owned_kwargs( - context, unsigned_call - ) - # Low side does not execute jobs, unless this is a mock execution - if ( - not is_usercode_call_on_owned_kwargs - and self.node_side_type == NodeSideType.LOW_SIDE - ): - existing_jobs = self._get_existing_user_code_jobs( - context, action.user_code_id - ) - if isinstance(existing_jobs, SyftError): - return existing_jobs - elif len(existing_jobs) > 0: - # Print warning if there are existing jobs for this user code - # relative - from ..util.util import prompt_warning_message - - prompt_warning_message( - "There are existing jobs for this user code, returning the latest one" - ) - return existing_jobs[-1] - else: - return SyftError( - message="Please wait for the admin to allow the execution of this code" - ) - - return self.add_action_to_queue( - action, api_call.credentials, parent_job_id=parent_job_id - ) - - else: - worker_settings = WorkerSettings.from_node(node=self) - default_worker_pool = self.get_default_worker_pool() - worker_pool = LinkedObject.from_obj( - default_worker_pool, - service_type=SyftWorkerPoolService, - node_uid=self.id, - ) - queue_item = QueueItem( - id=UID(), - node_uid=self.id, - syft_client_verify_key=api_call.credentials, - syft_node_location=self.id, - job_id=UID(), - worker_settings=worker_settings, - service=service_str, - method=method_str, - args=unsigned_call.args, - kwargs=unsigned_call.kwargs, - worker_pool=worker_pool, - ) - return self.add_queueitem_to_queue( - queue_item, - api_call.credentials, - action=None, - parent_job_id=parent_job_id, - ) - - @property - def pool_stash(self) -> SyftWorkerPoolStash: - return self.get_service(SyftWorkerPoolService).stash - - @property - def user_code_stash(self) -> UserCodeStash: - return self.get_service(UserCodeService).stash - - def get_default_worker_pool(self) -> WorkerPool | None | SyftError: - result = self.pool_stash.get_by_name( - credentials=self.verify_key, - pool_name=get_default_worker_pool_name(), - ) - if result.is_err(): - return SyftError(message=f"{result.err()}") - worker_pool = result.ok() - return worker_pool - - def get_api( - self, - for_user: SyftVerifyKey | None = None, - communication_protocol: PROTOCOL_TYPE | None = None, - ) -> SyftAPI: - return SyftAPI.for_user( - node=self, - user_verify_key=for_user, - communication_protocol=communication_protocol, - ) - - def get_method_with_context( - self, function: Callable, context: NodeServiceContext - ) -> Callable: - method = self.get_service_method(function) - return partial(method, context=context) - - def get_unauthed_context( - self, login_credentials: UserLoginCredentials - ) -> NodeServiceContext: - return UnauthedServiceContext(node=self, login_credentials=login_credentials) - - def create_initial_settings(self, admin_email: str) -> NodeSettingsV2 | None: - if self.name is None: - self.name = random_name() - try: - settings_stash = SettingsStash(store=self.document_store) - if self.signing_key is None: - print("create_initial_settings failed as there is no signing key") - return None - settings_exists = settings_stash.get_all(self.signing_key.verify_key).ok() - if settings_exists: - self.name = settings_exists[0].name - return None - else: - # Currently we allow automatic user registration on enclaves, - # as enclaves do not have superusers - if self.node_type == NodeType.ENCLAVE: - flags.CAN_REGISTER = True - new_settings = NodeSettingsV2( - id=self.id, - name=self.name, - verify_key=self.verify_key, - node_type=self.node_type, - deployed_on=datetime.now().date().strftime("%m/%d/%Y"), - signup_enabled=flags.CAN_REGISTER, - admin_email=admin_email, - node_side_type=self.node_side_type.value, # type: ignore - show_warnings=self.enable_warnings, - ) - result = settings_stash.set( - credentials=self.signing_key.verify_key, settings=new_settings - ) - if result.is_ok(): - return result.ok() - return None - except Exception as e: - print(f"create_initial_settings failed with error {e}") - return None - - -def create_admin_new( - name: str, - email: str, - password: str, - node: AbstractNode, -) -> User | None: - try: - user_stash = UserStash(store=node.document_store) - row_exists = user_stash.get_by_email( - credentials=node.signing_key.verify_key, email=email - ).ok() - if row_exists: - return None - else: - create_user = UserCreate( - name=name, - email=email, - password=password, - password_verify=password, - role=ServiceRole.ADMIN, - ) - # New User Initialization - # 🟡 TODO: change later but for now this gives the main user super user automatically - user = create_user.to(User) - user.signing_key = node.signing_key - user.verify_key = user.signing_key.verify_key - result = user_stash.set( - credentials=node.signing_key.verify_key, - user=user, - ignore_duplicates=True, - ) - if result.is_ok(): - return result.ok() - else: - raise Exception(f"Could not create user: {result}") - except Exception as e: - print("Unable to create new admin", e) - - return None - - -# def create_oblv_key_pair( -# worker: Node, -# ) -> str | None: -# try: -# # relative -# from ..external.oblv.oblv_keys_stash import OblvKeys -# from ..external.oblv.oblv_keys_stash import OblvKeysStash -# from ..external.oblv.oblv_service import generate_oblv_key - -# oblv_keys_stash = OblvKeysStash(store=worker.document_store) - -# if not len(oblv_keys_stash) and worker.signing_key: -# public_key, private_key = generate_oblv_key(oblv_key_name=worker.name) -# oblv_keys = OblvKeys(public_key=public_key, private_key=private_key) -# res = oblv_keys_stash.set(worker.signing_key.verify_key, oblv_keys) -# if res.is_ok(): -# print("Successfully generated Oblv Key pair at startup") -# return res.err() -# else: -# print(f"Using Existing Public/Private Key pair: {len(oblv_keys_stash)}") -# except Exception as e: -# print("Unable to create Oblv Keys.", e) -# return None - -# return None - - -class NodeRegistry: - __node_registry__: dict[UID, Node] = {} - - @classmethod - def set_node_for( - cls, - node_uid: UID | str, - node: Node, - ) -> None: - if isinstance(node_uid, str): - node_uid = UID.from_string(node_uid) - - cls.__node_registry__[node_uid] = node - - @classmethod - def node_for(cls, node_uid: UID) -> Node: - return cls.__node_registry__.get(node_uid, None) - - @classmethod - def get_all_nodes(cls) -> list[Node]: - return list(cls.__node_registry__.values()) - - @classmethod - def remove_node(cls, node_uid: UID) -> None: - if node_uid in cls.__node_registry__: - del cls.__node_registry__[node_uid] - - -def get_default_worker_tag_by_env(dev_mode: bool = False) -> str | None: - if in_kubernetes(): - return get_default_worker_image() - elif dev_mode: - return "local-dev" - else: - return __version__ - - -def create_default_worker_pool(node: Node) -> SyftError | None: - credentials = node.verify_key - pull_image = not node.dev_mode - image_stash = node.get_service(SyftWorkerImageService).stash - default_pool_name = get_default_worker_pool_name() - default_worker_pool = node.get_default_worker_pool() - default_worker_tag = get_default_worker_tag_by_env(node.dev_mode) - worker_count = get_default_worker_pool_count(node) - context = AuthedServiceContext( - node=node, - credentials=credentials, - role=ServiceRole.ADMIN, - ) - - print(f"Creating default worker image with tag='{default_worker_tag}'") - # Get/Create a default worker SyftWorkerImage - default_image = create_default_image( - credentials=credentials, - image_stash=image_stash, - tag=default_worker_tag, - in_kubernetes=in_kubernetes(), - ) - if isinstance(default_image, SyftError): - print("Failed to create default worker image: ", default_image.message) - return default_image - - if not default_image.is_built: - print(f"Building default worker image with tag={default_worker_tag}") - image_build_method = node.get_service_method(SyftWorkerImageService.build) - # Build the Image for given tag - result = image_build_method( - context, - image_uid=default_image.id, - tag=DEFAULT_WORKER_IMAGE_TAG, - pull=pull_image, - ) - - if isinstance(result, SyftError): - print("Failed to build default worker image: ", result.message) - return None - - # Create worker pool if it doesn't exists - print( - "Setting up worker pool" - f"name={default_pool_name} " - f"workers={worker_count} " - f"image_uid={default_image.id} " - f"in_memory={node.in_memory_workers}" - ) - if default_worker_pool is None: - worker_to_add_ = worker_count - create_pool_method = node.get_service_method(SyftWorkerPoolService.launch) - result = create_pool_method( - context, - name=default_pool_name, - image_uid=default_image.id, - num_workers=worker_count, - ) - else: - # Else add a worker to existing worker pool - worker_to_add_ = max(default_worker_pool.max_count, worker_count) - len( - default_worker_pool.worker_list - ) - add_worker_method = node.get_service_method(SyftWorkerPoolService.add_workers) - result = add_worker_method( - context=context, - number=worker_to_add_, - pool_name=default_pool_name, - ) - - if isinstance(result, SyftError): - print(f"Default worker pool error. {result.message}") - return None - - for n in range(worker_to_add_): - container_status = result[n] - if container_status.error: - print( - f"Failed to create container: Worker: {container_status.worker}," - f"Error: {container_status.error}" - ) - return None - - print("Created default worker pool.") - return None diff --git a/packages/syft/src/syft/node/routes.py b/packages/syft/src/syft/node/routes.py deleted file mode 100644 index b141ff145b2..00000000000 --- a/packages/syft/src/syft/node/routes.py +++ /dev/null @@ -1,203 +0,0 @@ -# stdlib - -# stdlib -from typing import Annotated - -# third party -from fastapi import APIRouter -from fastapi import Body -from fastapi import Depends -from fastapi import Request -from fastapi import Response -from fastapi.responses import JSONResponse -from loguru import logger -from pydantic import ValidationError - -# relative -from ..abstract_node import AbstractNode -from ..protocol.data_protocol import PROTOCOL_TYPE -from ..serde.deserialize import _deserialize as deserialize -from ..serde.serialize import _serialize as serialize -from ..service.context import NodeServiceContext -from ..service.context import UnauthedServiceContext -from ..service.metadata.node_metadata import NodeMetadataJSON -from ..service.response import SyftError -from ..service.user.user import UserCreate -from ..service.user.user import UserPrivateKey -from ..service.user.user_service import UserService -from ..util.telemetry import TRACE_MODE -from .credentials import SyftVerifyKey -from .credentials import UserLoginCredentials -from .worker import Worker - - -def make_routes(worker: Worker) -> APIRouter: - if TRACE_MODE: - # third party - try: - # third party - from opentelemetry import trace - from opentelemetry.propagate import extract - except Exception: - print("Failed to import opentelemetry") - - router = APIRouter() - - async def get_body(request: Request) -> bytes: - return await request.body() - - @router.get( - "/", - name="healthcheck", - status_code=200, - response_class=JSONResponse, - ) - def root() -> dict[str, str]: - """ - Currently, all service backends must satisfy either of the following requirements to - pass the HTTP health checks sent to it from the GCE loadbalancer: 1. Respond with a - 200 on '/'. The content does not matter. 2. Expose an arbitrary url as a readiness - probe on the pods backing the Service. - """ - return {"status": "ok"} - - # provide information about the node in JSON - @router.get("/metadata", response_class=JSONResponse) - def syft_metadata() -> JSONResponse: - return worker.metadata.to(NodeMetadataJSON) - - @router.get("/metadata_capnp") - def syft_metadata_capnp() -> Response: - result = worker.metadata - return Response( - serialize(result, to_bytes=True), - media_type="application/octet-stream", - ) - - def handle_syft_new_api( - user_verify_key: SyftVerifyKey, communication_protocol: PROTOCOL_TYPE - ) -> Response: - return Response( - serialize( - worker.get_api(user_verify_key, communication_protocol), to_bytes=True - ), - media_type="application/octet-stream", - ) - - # get the SyftAPI object - @router.get("/api") - def syft_new_api( - request: Request, verify_key: str, communication_protocol: PROTOCOL_TYPE - ) -> Response: - user_verify_key: SyftVerifyKey = SyftVerifyKey.from_string(verify_key) - if TRACE_MODE: - with trace.get_tracer(syft_new_api.__module__).start_as_current_span( - syft_new_api.__qualname__, - context=extract(request.headers), - kind=trace.SpanKind.SERVER, - ): - return handle_syft_new_api(user_verify_key, communication_protocol) - else: - return handle_syft_new_api(user_verify_key, communication_protocol) - - def handle_new_api_call(data: bytes) -> Response: - obj_msg = deserialize(blob=data, from_bytes=True) - result = worker.handle_api_call(api_call=obj_msg) - return Response( - serialize(result, to_bytes=True), - media_type="application/octet-stream", - ) - - # make a request to the SyftAPI - @router.post("/api_call") - def syft_new_api_call( - request: Request, data: Annotated[bytes, Depends(get_body)] - ) -> Response: - if TRACE_MODE: - with trace.get_tracer(syft_new_api_call.__module__).start_as_current_span( - syft_new_api_call.__qualname__, - context=extract(request.headers), - kind=trace.SpanKind.SERVER, - ): - return handle_new_api_call(data) - else: - return handle_new_api_call(data) - - def handle_login(email: str, password: str, node: AbstractNode) -> Response: - try: - login_credentials = UserLoginCredentials(email=email, password=password) - except ValidationError as e: - return {"Error": e.json()} - - method = node.get_service_method(UserService.exchange_credentials) - context = UnauthedServiceContext(node=node, login_credentials=login_credentials) - result = method(context=context) - - if isinstance(result, SyftError): - logger.bind(payload={"email": email}).error(result.message) - response = result - else: - user_private_key = result - if not isinstance(user_private_key, UserPrivateKey): - raise Exception(f"Incorrect return type: {type(user_private_key)}") - response = user_private_key - - return Response( - serialize(response, to_bytes=True), - media_type="application/octet-stream", - ) - - def handle_register(data: bytes, node: AbstractNode) -> Response: - user_create = deserialize(data, from_bytes=True) - - if not isinstance(user_create, UserCreate): - raise Exception(f"Incorrect type received: {user_create}") - - context = NodeServiceContext(node=node) - method = node.get_method_with_context(UserService.register, context) - - result = method(new_user=user_create) - - if isinstance(result, SyftError): - logger.bind(payload={"user": user_create}).error(result.message) - response = SyftError(message=f"{result.message}") - else: - response = result - - return Response( - serialize(response, to_bytes=True), - media_type="application/octet-stream", - ) - - # exchange email and password for a SyftSigningKey - @router.post("/login", name="login", status_code=200) - def login( - request: Request, - email: Annotated[str, Body(example="info@openmined.org")], - password: Annotated[str, Body(example="changethis")], - ) -> Response: - if TRACE_MODE: - with trace.get_tracer(login.__module__).start_as_current_span( - login.__qualname__, - context=extract(request.headers), - kind=trace.SpanKind.SERVER, - ): - return handle_login(email, password, worker) - else: - return handle_login(email, password, worker) - - @router.post("/register", name="register", status_code=200) - def register( - request: Request, data: Annotated[bytes, Depends(get_body)] - ) -> Response: - if TRACE_MODE: - with trace.get_tracer(register.__module__).start_as_current_span( - register.__qualname__, - context=extract(request.headers), - kind=trace.SpanKind.SERVER, - ): - return handle_register(data, worker) - else: - return handle_register(data, worker) - - return router diff --git a/packages/syft/src/syft/node/run.py b/packages/syft/src/syft/node/run.py deleted file mode 100644 index d82d88c9a97..00000000000 --- a/packages/syft/src/syft/node/run.py +++ /dev/null @@ -1,107 +0,0 @@ -# stdlib -import argparse - -# third party -from hagrid.orchestra import NodeHandle - -# relative -from ..client.deploy import Orchestra - - -def str_to_bool(bool_str: str | None) -> bool: - result = False - bool_str = str(bool_str).lower() - if bool_str == "true" or bool_str == "1": - result = True - return result - - -def run() -> NodeHandle | None: - parser = argparse.ArgumentParser() - parser.add_argument("command", help="command: launch", type=str, default="none") - parser.add_argument( - "--name", help="node name", type=str, default="syft-node", dest="name" - ) - parser.add_argument( - "--node-type", help="node type", type=str, default="python", dest="node_type" - ) - parser.add_argument( - "--host", - help="host for binding", - type=str, - default="0.0.0.0", # nosec - dest="host", - ) - - parser.add_argument( - "--port", help="port for binding", type=int, default=8080, dest="port" - ) - parser.add_argument( - "--dev-mode", - help="developer mode", - type=str, - default="True", - dest="dev_mode", - ) - parser.add_argument( - "--reset", - help="reset", - type=str, - default="True", - dest="reset", - ) - parser.add_argument( - "--local-db", - help="reset", - type=str, - default="False", - dest="local_db", - ) - parser.add_argument( - "--processes", - help="processing mode", - type=int, - default=0, - dest="processes", - ) - parser.add_argument( - "--tail", - help="tail mode", - type=str, - default="True", - dest="tail", - ) - parser.add_argument( - "--cmd", - help="cmd mode", - type=str, - default="False", - dest="cmd", - ) - - args = parser.parse_args() - - if args.command != "launch": - print("syft launch is the only command currently supported") - - args.dev_mode = str_to_bool(args.dev_mode) - args.reset = str_to_bool(args.reset) - args.local_db = str_to_bool(args.local_db) - args.tail = str_to_bool(args.tail) - args.cmd = str_to_bool(args.cmd) - - node = Orchestra.launch( - name=args.name, - node_type=args.node_type, - host=args.host, - port=args.port, - dev_mode=args.dev_mode, - reset=args.reset, - local_db=args.local_db, - processes=args.processes, - tail=args.tail, - cmd=args.cmd, - ) - if not args.tail: - return node - return None diff --git a/packages/syft/src/syft/node/server.py b/packages/syft/src/syft/node/server.py deleted file mode 100644 index 855197ba637..00000000000 --- a/packages/syft/src/syft/node/server.py +++ /dev/null @@ -1,310 +0,0 @@ -# stdlib -import asyncio -from collections.abc import Callable -from enum import Enum -import logging -import multiprocessing -import os -import platform -import signal -import subprocess # nosec -import time - -# third party -from fastapi import APIRouter -from fastapi import FastAPI -import requests -from starlette.middleware.cors import CORSMiddleware -import uvicorn - -# relative -from ..abstract_node import NodeSideType -from ..client.client import API_PATH -from ..util.constants import DEFAULT_TIMEOUT -from ..util.util import os_name -from .domain import Domain -from .enclave import Enclave -from .gateway import Gateway -from .node import NodeType -from .routes import make_routes - -if os_name() == "macOS": - # needed on MacOS to prevent [__NSCFConstantString initialize] may have been in - # progress in another thread when fork() was called. - multiprocessing.set_start_method("spawn", True) - -WAIT_TIME_SECONDS = 20 - - -def make_app(name: str, router: APIRouter) -> FastAPI: - app = FastAPI( - title=name, - ) - - api_router = APIRouter() - - api_router.include_router(router) - app.include_router(api_router, prefix="/api/v2") - - app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) - - return app - - -worker_classes = { - NodeType.DOMAIN: Domain, - NodeType.GATEWAY: Gateway, - NodeType.ENCLAVE: Enclave, -} - - -def run_uvicorn( - name: str, - node_type: Enum, - host: str, - port: int, - processes: int, - reset: bool, - dev_mode: bool, - node_side_type: str, - enable_warnings: bool, - in_memory_workers: bool, - queue_port: int | None, - create_producer: bool, - n_consumers: int, -) -> None: - async def _run_uvicorn( - name: str, - node_type: NodeType, - host: str, - port: int, - reset: bool, - dev_mode: bool, - node_side_type: Enum, - ) -> None: - if node_type not in worker_classes: - raise NotImplementedError(f"node_type: {node_type} is not supported") - worker_class = worker_classes[node_type] - if dev_mode: - print( - f"\nWARNING: private key is based on node name: {name} in dev_mode. " - "Don't run this in production." - ) - - worker = worker_class.named( - name=name, - processes=processes, - reset=reset, - local_db=True, - node_type=node_type, - node_side_type=node_side_type, - enable_warnings=enable_warnings, - migrate=True, - in_memory_workers=in_memory_workers, - queue_port=queue_port, - create_producer=create_producer, - n_consumers=n_consumers, - ) - else: - worker = worker_class( - name=name, - processes=processes, - local_db=True, - node_type=node_type, - node_side_type=node_side_type, - enable_warnings=enable_warnings, - migrate=True, - in_memory_workers=in_memory_workers, - queue_port=queue_port, - create_producer=create_producer, - n_consumers=n_consumers, - ) - router = make_routes(worker=worker) - app = make_app(worker.name, router=router) - - if reset: - try: - python_pids = find_python_processes_on_port(port) - for pid in python_pids: - print(f"Stopping process on port: {port}") - kill_process(pid) - time.sleep(1) - except Exception: # nosec - print(f"Failed to kill python process on port: {port}") - - log_level = "critical" - if dev_mode: - log_level = "info" - logging.getLogger("uvicorn").setLevel(logging.CRITICAL) - logging.getLogger("uvicorn.access").setLevel(logging.CRITICAL) - config = uvicorn.Config( - app, host=host, port=port, log_level=log_level, reload=dev_mode - ) - server = uvicorn.Server(config) - - await server.serve() - asyncio.get_running_loop().stop() - - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop.run_until_complete( - _run_uvicorn( - name, - node_type, - host, - port, - reset, - dev_mode, - node_side_type, - ) - ) - loop.close() - - -def serve_node( - name: str, - node_type: NodeType = NodeType.DOMAIN, - node_side_type: NodeSideType = NodeSideType.HIGH_SIDE, - host: str = "0.0.0.0", # nosec - port: int = 8080, - processes: int = 1, - reset: bool = False, - dev_mode: bool = False, - tail: bool = False, - enable_warnings: bool = False, - in_memory_workers: bool = True, - queue_port: int | None = None, - create_producer: bool = False, - n_consumers: int = 0, -) -> tuple[Callable, Callable]: - server_process = multiprocessing.Process( - target=run_uvicorn, - args=( - name, - node_type, - host, - port, - processes, - reset, - dev_mode, - node_side_type, - enable_warnings, - in_memory_workers, - queue_port, - create_producer, - n_consumers, - ), - ) - - def stop() -> None: - print(f"Stopping {name}") - server_process.terminate() - server_process.join(3) - if server_process.is_alive(): - # this is needed because often the process is still alive - server_process.kill() - print("killed") - - def start() -> None: - print(f"Starting {name} server on {host}:{port}") - server_process.start() - - if tail: - try: - while True: - time.sleep(1) - except KeyboardInterrupt: - try: - stop() - except SystemExit: - os._exit(130) - else: - for i in range(WAIT_TIME_SECONDS): - try: - req = requests.get( - f"http://{host}:{port}{API_PATH}/metadata", - timeout=DEFAULT_TIMEOUT, - ) - if req.status_code == 200: - print(" Done.") - break - except Exception: - time.sleep(1) - if i == 0: - print("Waiting for server to start", end="") - else: - print(".", end="") - - return start, stop - - -def find_python_processes_on_port(port: int) -> list[int]: - system = platform.system() - - if system == "Windows": - command = f"netstat -ano | findstr :{port}" - process = subprocess.Popen( # nosec - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - ) - output, _ = process.communicate() - pids = [ - int(line.strip().split()[-1]) for line in output.split("\n") if line.strip() - ] - - else: # Linux and MacOS - command = f"lsof -i :{port} -sTCP:LISTEN -t" - process = subprocess.Popen( # nosec - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - ) - output, _ = process.communicate() - pids = [int(pid.strip()) for pid in output.split("\n") if pid.strip()] - - python_pids = [] - for pid in pids: - try: - if system == "Windows": - command = ( - f"wmic process where (ProcessId='{pid}') get ProcessId,CommandLine" - ) - else: - command = f"ps -p {pid} -o pid,command" - - process = subprocess.Popen( # nosec - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - ) - output, _ = process.communicate() - lines = output.strip().split("\n") - - if len(lines) > 1 and "python" in lines[1].lower(): - python_pids.append(pid) - - except Exception as e: - print(f"Error checking process {pid}: {e}") - - return python_pids - - -def kill_process(pid: int) -> None: - try: - os.kill(pid, signal.SIGTERM) - print(f"Process {pid} terminated.") - except Exception as e: - print(f"Error killing process {pid}: {e}") diff --git a/packages/syft/src/syft/node/worker.py b/packages/syft/src/syft/node/worker.py deleted file mode 100644 index 60d3bd61682..00000000000 --- a/packages/syft/src/syft/node/worker.py +++ /dev/null @@ -1,8 +0,0 @@ -# relative -from ..serde.serializable import serializable -from .node import Node - - -@serializable() -class Worker(Node): - pass diff --git a/packages/syft/src/syft/node/worker_settings.py b/packages/syft/src/syft/node/worker_settings.py deleted file mode 100644 index c3b8954a3e8..00000000000 --- a/packages/syft/src/syft/node/worker_settings.py +++ /dev/null @@ -1,52 +0,0 @@ -# future -from __future__ import annotations - -# third party -from typing_extensions import Self - -# relative -from ..abstract_node import AbstractNode -from ..abstract_node import NodeSideType -from ..abstract_node import NodeType -from ..node.credentials import SyftSigningKey -from ..serde.serializable import serializable -from ..service.queue.base_queue import QueueConfig -from ..store.blob_storage import BlobStorageConfig -from ..store.document_store import StoreConfig -from ..types.syft_object import SYFT_OBJECT_VERSION_3 -from ..types.syft_object import SyftObject -from ..types.uid import UID - - -@serializable() -class WorkerSettings(SyftObject): - __canonical_name__ = "WorkerSettings" - __version__ = SYFT_OBJECT_VERSION_3 - - id: UID - name: str - node_type: NodeType - node_side_type: NodeSideType - signing_key: SyftSigningKey - document_store_config: StoreConfig - action_store_config: StoreConfig - blob_store_config: BlobStorageConfig | None = None - queue_config: QueueConfig | None = None - - @classmethod - def from_node(cls, node: AbstractNode) -> Self: - if node.node_side_type: - node_side_type: str = node.node_side_type.value - else: - node_side_type = NodeSideType.HIGH_SIDE - return cls( - id=node.id, - name=node.name, - node_type=node.node_type, - signing_key=node.signing_key, - document_store_config=node.document_store_config, - action_store_config=node.action_store_config, - node_side_type=node_side_type, - blob_store_config=node.blob_store_config, - queue_config=node.queue_config, - ) diff --git a/packages/syft/src/syft/orchestra.py b/packages/syft/src/syft/orchestra.py new file mode 100644 index 00000000000..c129afcc5c8 --- /dev/null +++ b/packages/syft/src/syft/orchestra.py @@ -0,0 +1,401 @@ +"""Python Level API to launch Syft services.""" + +# future +from __future__ import annotations + +# stdlib +from collections.abc import Callable +import getpass +import inspect +import json +import logging +import os +from pathlib import Path +import sys +from typing import Any + +# third party +from IPython.display import display + +# relative +from .abstract_server import ServerSideType +from .abstract_server import ServerType +from .client.client import login as sy_login +from .client.client import login_as_guest as sy_login_as_guest +from .deployment_type import DeploymentType +from .protocol.data_protocol import stage_protocol_changes +from .server.datasite import Datasite +from .server.enclave import Enclave +from .server.gateway import Gateway +from .server.uvicorn import serve_server +from .service.queue.queue import ConsumerType +from .service.response import SyftInfo +from .types.errors import SyftException +from .util.util import get_random_available_port + +logger = logging.getLogger(__name__) + +DEFAULT_PORT = 8080 +DEFAULT_URL = "http://localhost" + +ClientAlias = Any # we don't want to import Client in case it changes + + +def get_server_type(server_type: str | ServerType | None) -> ServerType | None: + if server_type is None: + server_type = os.environ.get("ORCHESTRA_SERVER_TYPE", ServerType.DATASITE) + try: + return ServerType(server_type) + except ValueError: + print(f"server_type: {server_type} is not a valid ServerType: {ServerType}") + return None + + +def get_deployment_type(deployment_type: str | None) -> DeploymentType | None: + if deployment_type is None: + deployment_type = os.environ.get( + "ORCHESTRA_DEPLOYMENT_TYPE", DeploymentType.PYTHON + ) + + try: + return DeploymentType(deployment_type) + except ValueError: + print( + f"deployment_type: {deployment_type} is not a valid DeploymentType: {DeploymentType}" + ) + return None + + +class ServerHandle: + def __init__( + self, + server_type: ServerType, + deployment_type: DeploymentType, + server_side_type: ServerSideType, + name: str, + port: int | None = None, + url: str | None = None, + python_server: Any | None = None, + shutdown: Callable | None = None, + ) -> None: + self.server_type = server_type + self.name = name + self.port = port + self.url = url + self.python_server = python_server + self.shutdown = shutdown + self.deployment_type = deployment_type + self.server_side_type = server_side_type + + @property + def client(self) -> Any: + if self.port: + return sy_login_as_guest(url=self.url, port=self.port) # type: ignore + elif self.deployment_type == DeploymentType.PYTHON: + return self.python_server.get_guest_client(verbose=False) # type: ignore + else: + raise NotImplementedError( + f"client not implemented for the deployment type:{self.deployment_type}" + ) + + def login_as_guest(self, **kwargs: Any) -> ClientAlias: + return self.client.login_as_guest(**kwargs) + + def login( + self, email: str | None = None, password: str | None = None, **kwargs: Any + ) -> ClientAlias: + if not email: + email = input("Email: ") + + if not password: + password = getpass.getpass("Password: ") + + if self.port: + return sy_login( + email=email, password=password, url=self.url, port=self.port + ) # type: ignore + elif self.deployment_type == DeploymentType.PYTHON: + guest_client = self.python_server.get_guest_client(verbose=False) # type: ignore + return guest_client.login(email=email, password=password, **kwargs) # type: ignore + else: + raise NotImplementedError( + f"client not implemented for the deployment type:{self.deployment_type}" + ) + + def register( + self, + name: str, + email: str | None = None, + password: str | None = None, + password_verify: str | None = None, + institution: str | None = None, + website: str | None = None, + ) -> Any: + if not email: + email = input("Email: ") + if not password: + password = getpass.getpass("Password: ") + if not password_verify: + password_verify = getpass.getpass("Confirm Password: ") + if password != password_verify: + raise SyftException(public_message="Passwords do not match") + + client = self.client + return client.register( + name=name, + email=email, + password=password, + institution=institution, + password_verify=password_verify, + website=website, + ) + + def land(self) -> None: + if self.deployment_type == DeploymentType.PYTHON: + if self.shutdown: + self.shutdown() + else: + print( + f"Shutdown not implemented for the deployment type:{self.deployment_type}", + file=sys.stderr, + ) + + +def deploy_to_python( + server_type_enum: ServerType, + deployment_type_enum: DeploymentType, + port: int | str, + name: str, + host: str, + reset: bool, + tail: bool, + dev_mode: bool, + processes: int, + server_side_type: ServerSideType, + enable_warnings: bool, + n_consumers: int, + thread_workers: bool, + create_producer: bool = False, + queue_port: int | None = None, + association_request_auto_approval: bool = False, + background_tasks: bool = False, + log_level: str | int | None = None, + debug: bool = False, + migrate: bool = False, + consumer_type: ConsumerType | None = None, + db_url: str | None = None, +) -> ServerHandle: + worker_classes = { + ServerType.DATASITE: Datasite, + ServerType.GATEWAY: Gateway, + ServerType.ENCLAVE: Enclave, + } + + if dev_mode: + logger.debug("Staging Protocol Changes...") + stage_protocol_changes() + + kwargs = { + "name": name, + "host": host, + "port": port, + "reset": reset, + "processes": processes, + "dev_mode": dev_mode, + "tail": tail, + "server_type": server_type_enum, + "server_side_type": server_side_type, + "enable_warnings": enable_warnings, + "queue_port": queue_port, + "n_consumers": n_consumers, + "create_producer": create_producer, + "association_request_auto_approval": association_request_auto_approval, + "log_level": log_level, + "background_tasks": background_tasks, + "debug": debug, + "migrate": migrate, + "deployment_type": deployment_type_enum, + "consumer_type": consumer_type, + "db_url": db_url, + } + + if port: + kwargs["in_memory_workers"] = True + if port == "auto": + port = get_random_available_port() + else: + try: + port = int(port) + except ValueError: + raise ValueError( + f"port must be either 'auto' or a valid int not: {port}" + ) + kwargs["port"] = port + + sig = inspect.signature(serve_server) + supported_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters} + + start, stop = serve_server(**supported_kwargs) + start() + return ServerHandle( + server_type=server_type_enum, + deployment_type=deployment_type_enum, + name=name, + port=port, + url="http://localhost", + shutdown=stop, + server_side_type=server_side_type, + ) + else: + kwargs["thread_workers"] = thread_workers + if server_type_enum in worker_classes: + worker_class = worker_classes[server_type_enum] + sig = inspect.signature(worker_class.named) + supported_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters} + if "server_type" in sig.parameters.keys() and "migrate" in sig.parameters: + supported_kwargs["migrate"] = migrate + worker = worker_class.named(**supported_kwargs) + else: + raise NotImplementedError( + f"server_type: {server_type_enum} is not supported" + ) + + def stop() -> None: + worker.stop() + + return ServerHandle( + server_type=server_type_enum, + deployment_type=deployment_type_enum, + name=name, + python_server=worker, + server_side_type=server_side_type, + shutdown=stop, + ) + + +def deploy_to_remote( + server_type_enum: ServerType, + deployment_type_enum: DeploymentType, + name: str, + server_side_type: ServerSideType, + host: str | None = None, + port: int | None = None, + migrate: bool = False, +) -> ServerHandle: + if migrate: + raise ValueError("Cannot migrate via orchestra on remote server") + + # Preference order: Environment Variable > Argument > Default + server_url = os.getenv("SERVER_URL") or host or DEFAULT_URL + server_port = os.getenv("SERVER_PORT") or port or DEFAULT_PORT + if server_port == "auto": + raise ValueError("Cannot use auto port on remote server") + + return ServerHandle( + server_type=server_type_enum, + deployment_type=deployment_type_enum, + name=name, + server_side_type=server_side_type, + url=server_url, + port=int(server_port), + ) + + +class Orchestra: + @staticmethod + def launch( + # server information and deployment + name: str | None = None, + server_type: str | ServerType | None = None, + deploy_to: str | None = None, + server_side_type: str | None = None, + # worker related inputs + port: int | str | None = None, + processes: int = 1, # temporary work around for jax in subprocess + dev_mode: bool = False, + reset: bool = False, + log_level: str | int | None = None, + tail: bool = False, + host: str | None = "0.0.0.0", # nosec + enable_warnings: bool = False, + n_consumers: int = 0, + thread_workers: bool = False, + create_producer: bool = False, + queue_port: int | None = None, + association_request_auto_approval: bool = False, + background_tasks: bool = False, + debug: bool = False, + migrate: bool = False, + from_state_folder: str | Path | None = None, + consumer_type: ConsumerType | None = None, + db_url: str | None = None, + ) -> ServerHandle: + if from_state_folder is not None: + with open(f"{from_state_folder}/config.json") as f: + kwargs = json.load(f) + server_handle = Orchestra.launch(**kwargs) + client = server_handle.login( # nosec + email="info@openmined.org", password="changethis" + ) + client.load_migration_data(f"{from_state_folder}/migration.blob") + return server_handle + if dev_mode is True: + thread_workers = True + os.environ["DEV_MODE"] = str(dev_mode) + + server_type_enum: ServerType | None = get_server_type(server_type=server_type) + server_side_type_enum = ( + ServerSideType.HIGH_SIDE + if server_side_type is None + else ServerSideType(server_side_type) + ) + + deployment_type_enum: DeploymentType | None = get_deployment_type( + deployment_type=deploy_to + ) + + if deployment_type_enum == DeploymentType.PYTHON: + server_handle = deploy_to_python( + server_type_enum=server_type_enum, + deployment_type_enum=deployment_type_enum, + port=port, + name=name, + host=host, + reset=reset, + tail=tail, + dev_mode=dev_mode, + processes=processes, + server_side_type=server_side_type_enum, + enable_warnings=enable_warnings, + log_level=log_level, + n_consumers=n_consumers, + thread_workers=thread_workers, + create_producer=create_producer, + queue_port=queue_port, + association_request_auto_approval=association_request_auto_approval, + background_tasks=background_tasks, + debug=debug, + migrate=migrate, + consumer_type=consumer_type, + db_url=db_url, + ) + display( + SyftInfo( + message=f"You have launched a development server at http://{host}:{server_handle.port}." + + " It is intended only for local use." + ) + ) + return server_handle + elif deployment_type_enum == DeploymentType.REMOTE: + return deploy_to_remote( + server_type_enum=server_type_enum, + deployment_type_enum=deployment_type_enum, + name=name, + host=host, + port=port, + server_side_type=server_side_type_enum, + migrate=migrate, + ) + raise NotImplementedError( + f"deployment_type: {deployment_type_enum} is not supported" + ) diff --git a/packages/syft/src/syft/protocol/data_protocol.py b/packages/syft/src/syft/protocol/data_protocol.py index cf9a4837642..e4e9804603a 100644 --- a/packages/syft/src/syft/protocol/data_protocol.py +++ b/packages/syft/src/syft/protocol/data_protocol.py @@ -3,31 +3,45 @@ from collections.abc import Iterable from collections.abc import MutableMapping from collections.abc import MutableSequence +from functools import cache import hashlib import json from operator import itemgetter import os from pathlib import Path import re +from types import UnionType +import typing from typing import Any +import warnings # third party from packaging.version import parse -from result import OkErr -from result import Result + +# syft absolute +from syft.types.result import Err +from syft.types.result import Ok +from syft.util.util import get_dev_mode # relative from .. import __version__ -from ..serde.recursive import TYPE_BANK -from ..service.response import SyftError -from ..service.response import SyftException from ..service.response import SyftSuccess from ..types.dicttuple import DictTuple +from ..types.errors import SyftException from ..types.syft_object import SyftBaseObject +from ..types.syft_object_registry import SyftObjectRegistry PROTOCOL_STATE_FILENAME = "protocol_version.json" PROTOCOL_TYPE = str | int +IGNORE_TYPES = [ + "mock_type", + "MockWrapper", + "base_stash_mock_object_type", + "MockObjectFromSyftBaseObj", + "MockObjectToSyftBaseObj", +] + def natural_key(key: PROTOCOL_TYPE) -> list[int | str | Any]: """Define key for natural ordering of strings.""" @@ -53,9 +67,42 @@ def protocol_release_dir() -> Path: return data_protocol_dir() / "releases" +def handle_union_type_klass_name(type_klass_name: str) -> str: + if type_klass_name == typing.Union.__name__: + return UnionType.__name__ + return type_klass_name + + +def handle_annotation_repr_(annotation: type) -> str: + """Handle typing representation.""" + origin = typing.get_origin(annotation) + args = typing.get_args(annotation) + + def get_annotation_repr_for_arg(arg: type) -> str: + if hasattr(arg, "__canonical_name__"): + return arg.__canonical_name__ + return getattr(arg, "__name__", str(arg)) + + if origin and args: + args_repr = ", ".join(get_annotation_repr_for_arg(arg) for arg in args) + origin_repr = getattr(origin, "__name__", str(origin)) + + # Handle typing.Union and types.UnionType + origin_repr = handle_union_type_klass_name(origin_repr) + return f"{origin_repr}: [{args_repr}]" + elif args: + args_repr = ", ".join( + getattr(arg, "__name__", str(arg)) for arg in sorted(args) + ) + return args_repr + else: + return repr(annotation) + + class DataProtocol: - def __init__(self, filename: str) -> None: + def __init__(self, filename: str, raise_exception: bool = False) -> None: self.file_path = data_protocol_dir() / filename + self.raise_exception = raise_exception self.load_state() def load_state(self) -> None: @@ -67,8 +114,12 @@ def load_state(self) -> None: @staticmethod def _calculate_object_hash(klass: type[SyftBaseObject]) -> str: # TODO: this depends on what is marked as serde + + # Rebuild the model to ensure that the fields are up to date + # and any ForwardRef are resolved + klass.model_rebuild() field_data = { - field: repr(field_info.annotation) + field: handle_annotation_repr_(field_info.rebuild_annotation()) for field, field_info in sorted( klass.model_fields.items(), key=itemgetter(0) ) @@ -104,12 +155,13 @@ def read_history(self) -> dict: return protocol_history def save_history(self, history: dict) -> None: - for file_path in protocol_release_dir().iterdir(): - for version in self.read_json(file_path): - # Skip adding file if the version is not part of the history - if version not in history.keys(): - continue - history[version] = {"release_name": file_path.name} + if os.path.exists(protocol_release_dir()): + for file_path in protocol_release_dir().iterdir(): + for version in self.read_json(file_path): + # Skip adding file if the version is not part of the history + if version not in history.keys(): + continue + history[version] = {"release_name": file_path.name} self.file_path.write_text(json.dumps(history, indent=2) + "\n") @property @@ -140,9 +192,9 @@ def build_state(self, stop_key: str | None = None) -> dict: or hash_str in state_version_hashes ): raise Exception( - f"Can't add {object_metadata} already in state {versions}" + f"Can't add {object_metadata} for protocol {protocol_number} already in state {versions}" ) - elif action == "remove" and ( + if action == "remove" and ( str(version) not in state_versions.keys() and hash_str not in state_version_hashes ): @@ -161,24 +213,29 @@ def build_state(self, stop_key: str | None = None) -> dict: return state_dict return state_dict + @staticmethod + def obj_json(version: str | int, _hash: str, action: str = "add") -> dict: + return { + "version": int(version), + "hash": _hash, + "action": action, + } + def diff_state(self, state: dict) -> tuple[dict, dict]: compare_dict: dict = defaultdict(dict) # what versions are in the latest code object_diff: dict = defaultdict(dict) # diff in latest code with saved json - for k in TYPE_BANK: - ( - nonrecursive, - serialize, - deserialize, - attribute_list, - exclude_attrs_list, - serde_overrides, - hash_exclude_attrs, - cls, - attribute_types, - version, - ) = TYPE_BANK[k] + all_serde_propeties = [ + serde_properties + for version_dict in SyftObjectRegistry.__object_serialization_registry__.values() + for serde_properties in version_dict.values() + ] + for serde_properties in all_serde_propeties: + cls, version = serde_properties[7], serde_properties[9] if issubclass(cls, SyftBaseObject): canonical_name = cls.__canonical_name__ + if canonical_name in IGNORE_TYPES: + continue + hash_str = DataProtocol._calculate_object_hash(cls) # build this up for later @@ -186,10 +243,8 @@ def diff_state(self, state: dict) -> tuple[dict, dict]: if canonical_name not in state: # new object so its an add - object_diff[canonical_name][str(version)] = {} - object_diff[canonical_name][str(version)]["version"] = int(version) - object_diff[canonical_name][str(version)]["hash"] = hash_str - object_diff[canonical_name][str(version)]["action"] = "add" + obj_to_add = self.obj_json(int(version), hash_str) + object_diff[canonical_name][str(version)] = obj_to_add continue versions = state[canonical_name] @@ -203,28 +258,25 @@ def diff_state(self, state: dict) -> tuple[dict, dict]: is_protocol_dev = versions[str(version)][1] == "dev" if is_protocol_dev: # force overwrite existing object so its an add - object_diff[canonical_name][str(version)] = {} - object_diff[canonical_name][str(version)]["version"] = int( - version - ) - object_diff[canonical_name][str(version)]["hash"] = hash_str - object_diff[canonical_name][str(version)]["action"] = "add" + obj_to_add = self.obj_json(int(version), hash_str) + object_diff[canonical_name][str(version)] = obj_to_add continue - - raise Exception( - f"{canonical_name} for class {cls.__name__} fqn {cls} " - + f"version {version} hash has changed. " - + f"{hash_str} not in {versions.values()}. " - + "Is a unique __canonical_name__ for this subclass missing? " - + "If the class has changed you will need to define a new class with the changes, " - + "with same __canonical_name__ and bump the __version__ number." - ) + error_msg = f"""{canonical_name} for class {cls.__name__} fqn {cls}\ +version {version} hash has changed. {hash_str} not in {versions.values()}. \ +Is a unique __canonical_name__ for this subclass missing? +If the class has changed you will need to define a new class with the changes, \ +with same __canonical_name__ and bump the __version__ number. {cls.model_fields} +""" + + if get_dev_mode() or self.raise_exception: + raise Exception(error_msg) + else: + warnings.warn(error_msg, stacklevel=1, category=UserWarning) + break else: # new object so its an add - object_diff[canonical_name][str(version)] = {} - object_diff[canonical_name][str(version)]["version"] = int(version) - object_diff[canonical_name][str(version)]["hash"] = hash_str - object_diff[canonical_name][str(version)]["action"] = "add" + obj_to_add = self.obj_json(int(version), hash_str) + object_diff[canonical_name][str(version)] = obj_to_add continue # now check for remove actions @@ -232,22 +284,18 @@ def diff_state(self, state: dict) -> tuple[dict, dict]: for version, (hash_str, _) in state[canonical_name].items(): if canonical_name not in compare_dict: # missing so its a remove - object_diff[canonical_name][str(version)] = {} - object_diff[canonical_name][str(version)]["version"] = int(version) - object_diff[canonical_name][str(version)]["hash"] = hash_str - object_diff[canonical_name][str(version)]["action"] = "remove" + obj_to_remove = self.obj_json(int(version), hash_str, "remove") + object_diff[canonical_name][str(version)] = obj_to_remove continue versions = compare_dict[canonical_name] if str(version) not in versions.keys(): # missing so its a remove - object_diff[canonical_name][str(version)] = {} - object_diff[canonical_name][str(version)]["version"] = int(version) - object_diff[canonical_name][str(version)]["hash"] = hash_str - object_diff[canonical_name][str(version)]["action"] = "remove" + obj_to_remove = self.obj_json(int(version), hash_str, "remove") + object_diff[canonical_name][str(version)] = obj_to_remove continue return object_diff, compare_dict - def stage_protocol_changes(self) -> Result[SyftSuccess, SyftError]: + def stage_protocol_changes(self) -> SyftSuccess: change_count = 0 current_history = self.protocol_history if "dev" not in current_history: @@ -291,18 +339,18 @@ def stage_protocol_changes(self) -> Result[SyftSuccess, SyftError]: self.load_state() return SyftSuccess(message=f"{change_count} Protocol Updates Staged to dev") - def bump_protocol_version(self) -> Result[SyftSuccess, SyftError]: + def bump_protocol_version(self) -> SyftSuccess: if len(self.diff): - raise Exception( - "You can't bump the protocol version with unstaged changes." + raise SyftException( + public_message="You can't bump the protocol version with unstaged changes." ) keys = self.protocol_history.keys() if "dev" not in keys: self.validate_release() print("You can't bump the protocol if there are no staged changes.") - return SyftError( - message="Failed to bump version as there are no staged changes." + raise SyftException( + public_message="Failed to bump version as there are no staged changes." ) highest_protocol = 0 @@ -387,7 +435,7 @@ def validate_release(self) -> None: # Reload protocol self.read_history() - def revert_latest_protocol(self) -> Result[SyftSuccess, SyftError]: + def revert_latest_protocol(self) -> None: """Revert latest protocol changes to dev""" # Get current protocol history @@ -401,7 +449,9 @@ def revert_latest_protocol(self) -> Result[SyftSuccess, SyftError]: # If current protocol is dev, skip revert if latest_protocol is None or latest_protocol == "dev": - return SyftError(message="Revert skipped !! Already running dev protocol.") + raise SyftException( + public_message="Revert skipped !! Already running dev protocol." + ) # Read the current released protocol release_name = protocol_history[latest_protocol]["release_name"] @@ -416,14 +466,15 @@ def revert_latest_protocol(self) -> Result[SyftSuccess, SyftError]: # Save history self.save_history(protocol_history) + self.load_state() - def check_protocol(self) -> Result[SyftSuccess, SyftError]: + def check_protocol(self) -> SyftSuccess: if len(self.diff) != 0: - return SyftError(message="Protocol Changes Unstaged") + raise SyftException(public_message="Protocol Changes Unstaged") else: return SyftSuccess(message="Protocol Stable") - def check_or_stage_protocol(self) -> Result[SyftSuccess, SyftError]: + def check_or_stage_protocol(self) -> SyftSuccess: if not self.check_protocol(): self.stage_protocol_changes() result = self.check_protocol() @@ -447,7 +498,7 @@ def calculate_supported_protocols(self) -> dict: # we assume its supported until we prove otherwise protocol_supported[v] = True # iterate through each object - for canonical_name, _ in version_data["object_versions"].items(): + for canonical_name in version_data["object_versions"].keys(): if canonical_name not in self.state: protocol_supported[v] = False break @@ -462,22 +513,38 @@ def has_dev(self) -> bool: return True return False + def reset_dev_protocol(self) -> None: + if self.has_dev: + del self.protocol_history["dev"] + self.save_history(self.protocol_history) -def get_data_protocol() -> DataProtocol: - return DataProtocol(filename=data_protocol_file_name()) +def get_data_protocol(raise_exception: bool = False) -> DataProtocol: + return _get_data_protocol( + filename=data_protocol_file_name(), + raise_exception=raise_exception, + ) -def stage_protocol_changes() -> Result[SyftSuccess, SyftError]: - data_protocol = get_data_protocol() + +@cache +def _get_data_protocol(filename: str, raise_exception: bool = False) -> DataProtocol: + return DataProtocol( + filename=filename, + raise_exception=raise_exception, + ) + + +def stage_protocol_changes() -> SyftSuccess: + data_protocol = get_data_protocol(raise_exception=True) return data_protocol.stage_protocol_changes() -def bump_protocol_version() -> Result[SyftSuccess, SyftError]: - data_protocol = get_data_protocol() +def bump_protocol_version() -> SyftSuccess: + data_protocol = get_data_protocol(raise_exception=True) return data_protocol.bump_protocol_version() -def check_or_stage_protocol() -> Result[SyftSuccess, SyftError]: +def check_or_stage_protocol() -> SyftSuccess: data_protocol = get_data_protocol() return data_protocol.check_or_stage_protocol() @@ -489,7 +556,7 @@ def debox_arg_and_migrate(arg: Any, protocol_state: dict) -> Any: single_entity = False - if isinstance(arg, OkErr): + if isinstance(arg, Ok) or isinstance(arg, Err): constructor = type(arg) arg = arg.value diff --git a/packages/syft/src/syft/protocol/protocol_version.json b/packages/syft/src/syft/protocol/protocol_version.json index 64a5a72dc6c..b30071525df 100644 --- a/packages/syft/src/syft/protocol/protocol_version.json +++ b/packages/syft/src/syft/protocol/protocol_version.json @@ -1,14 +1,14 @@ { "1": { - "release_name": "0.8.2.json" + "release_name": "0.9.1.json" }, "2": { - "release_name": "0.8.3.json" + "release_name": "0.9.2.json" }, "3": { - "release_name": "0.8.4.json" + "release_name": "0.9.3.json" }, "4": { - "release_name": "0.8.5.json" + "release_name": "0.9.5.json" } } diff --git a/packages/syft/src/syft/protocol/releases/.empty b/packages/syft/src/syft/protocol/releases/.empty new file mode 100644 index 00000000000..417fe0de28b --- /dev/null +++ b/packages/syft/src/syft/protocol/releases/.empty @@ -0,0 +1 @@ +# remove this file as soon as we have a release again \ No newline at end of file diff --git a/packages/syft/src/syft/protocol/releases/.gitignore b/packages/syft/src/syft/protocol/releases/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/protocol/releases/0.8.2.json b/packages/syft/src/syft/protocol/releases/0.8.2.json deleted file mode 100644 index 0ea2060243e..00000000000 --- a/packages/syft/src/syft/protocol/releases/0.8.2.json +++ /dev/null @@ -1,763 +0,0 @@ -{ - "1": { - "object_versions": { - "PartialSyftObject": { - "1": { - "version": 1, - "hash": "008917584d8e1c09015cdbef02f59c0622f48e0618877c1b44425c8846befc13", - "action": "add" - } - }, - "NodeMetadataUpdate": { - "1": { - "version": 1, - "hash": "569d124c23590360bda240c19b53314ccc6204c5d1ab0d2898976a028e002191", - "action": "add" - } - }, - "NodeMetadata": { - "1": { - "version": 1, - "hash": "6bee018894dfdf697ea624740d0bf051750e0b0d8470ced59646f6d8812068ac", - "action": "add" - }, - "2": { - "version": 2, - "hash": "f856169fea72486cd436875ce4411ef935da11eb7c5af48121adfa00d4c0cdb6", - "action": "add" - }, - "3": { - "version": 3, - "hash": "3cc67abf394a805066a88aef0bea15bde609b9ecbe7ec15172eac5e7a0b7ef7c", - "action": "add" - } - }, - "StoreConfig": { - "1": { - "version": 1, - "hash": "17de8875cf590311ddb042140347ffc79d4a85028e504dad178ca4e1237ec861", - "action": "add" - } - }, - "MongoDict": { - "1": { - "version": 1, - "hash": "640734396edae801e1601fe7777710e67685e552acb0244ad8b4f689599baca9", - "action": "add" - } - }, - "MongoStoreConfig": { - "1": { - "version": 1, - "hash": "e52aa382e300b0b69aaa2d80aadb4e3a9a3c02b3c741b71d56f959c4d3891ce5", - "action": "add" - } - }, - "LinkedObject": { - "1": { - "version": 1, - "hash": "824567c6933c095d0e2f6995c8de3581c0fbd2e9e4ead35c8159f7964709c28e", - "action": "add" - } - }, - "BaseConfig": { - "1": { - "version": 1, - "hash": "4e5257080ce615aa4122b02bad8487e4c7d6d0f171ff77abbc9e8cd3e33df89a", - "action": "add" - } - }, - "ServiceConfig": { - "1": { - "version": 1, - "hash": "ca91f59bf045d949d82860f7d52655bfbede4cf6bdc5bae8f847f08a16f05d74", - "action": "add" - } - }, - "LibConfig": { - "1": { - "version": 1, - "hash": "c6ff229aea16874c5d9ae4d1f9e500d13f5cf984bbcee7abd16c5841707a2f78", - "action": "add" - } - }, - "APIEndpoint": { - "1": { - "version": 1, - "hash": "c0e83867b107113e6fed06364ba364c24b2f4af35b15a3869b176318d3be7989", - "action": "add" - } - }, - "LibEndpoint": { - "1": { - "version": 1, - "hash": "153eac6d8990774eebfffaa75a9895e7c4e1a0e09465d5da0baf4c3a3b03369d", - "action": "add" - } - }, - "SignedSyftAPICall": { - "1": { - "version": 1, - "hash": "e66a116de2fa44ebdd0d4c2d7d5a047dedb555fd201a0f431cd8017d9d33a61d", - "action": "add" - } - }, - "SyftAPICall": { - "1": { - "version": 1, - "hash": "014bd1d0933f6070888a313edba239170759de24eae49bf2374c1be4dbe2b4d7", - "action": "add" - } - }, - "SyftAPIData": { - "1": { - "version": 1, - "hash": "db101a75227e34750d7056785a1e87bb2e8ad6604f19c372d0cb6aa437243bf5", - "action": "add" - } - }, - "SyftAPI": { - "1": { - "version": 1, - "hash": "2bba1d9fcf677a58e35bf903de3da22ee4913af138aa3012af9c46b3609579cd", - "action": "add" - } - }, - "User": { - "1": { - "version": 1, - "hash": "078636e64f737e60245b39cf348d30fb006531e80c12b70aa7cf98254e1bb37a", - "action": "add" - } - }, - "UserUpdate": { - "1": { - "version": 1, - "hash": "839dd90aeb611e1dc471c8fd6daf230e913465c0625c6a297079cb7f0a271195", - "action": "add" - } - }, - "UserCreate": { - "1": { - "version": 1, - "hash": "dab78b63544ae91c09f9843c323cb237c0a6fcfeb71c1acf5f738e2fcf5c277f", - "action": "add" - } - }, - "UserSearch": { - "1": { - "version": 1, - "hash": "69d1e10b81c8a4143cf70e4f911d8562732af2458ebbc455ca64542f11373dd1", - "action": "add" - } - }, - "UserView": { - "1": { - "version": 1, - "hash": "63289383fe7e7584652f242a4362ce6e2f0ade52f6416ab6149b326a506b0675", - "action": "add" - } - }, - "UserViewPage": { - "1": { - "version": 1, - "hash": "16dac6209b19a934d286ef1efa874379e0040c324e71023c57d1bc6d2d367171", - "action": "add" - } - }, - "UserPrivateKey": { - "1": { - "version": 1, - "hash": "7cb196587887f0f3bffb298dd9f3b88509e9b2748792bf8dc03bdd0d6b98714a", - "action": "add" - } - }, - "NodeSettingsUpdate": { - "1": { - "version": 1, - "hash": "b6ddc66ff270a3c2c4760e31e1a55d72ed04ccae2d0115ebe2fba6f2bf9bd119", - "action": "add" - } - }, - "NodeSettings": { - "1": { - "version": 1, - "hash": "b662047bb278f4f5db77c102f94b733c3a929839271b3d6b82ea174a60e2aaf0", - "action": "add" - }, - "2": { - "version": 2, - "hash": "29a82afcb006a044b6ae04c6ea8a067d145d28b4210bb038ea9fa86ebde108c8", - "action": "add" - } - }, - "HTTPConnection": { - "1": { - "version": 1, - "hash": "5ee19eaf55ecbe7945ea45924c036ec0f500114a2f64176620961a8c2ec94cdb", - "action": "add" - } - }, - "PythonConnection": { - "1": { - "version": 1, - "hash": "011946fc9af0a6987f5c7bc9b0208b2fae9d65217531430bced7ba542788da1a", - "action": "add" - } - }, - "DateTime": { - "1": { - "version": 1, - "hash": "7e9d89309a10d2110a7ae4f97d8f25a7914853269e8fa0c531630790c1253f17", - "action": "add" - } - }, - "BlobFile": { - "1": { - "version": 1, - "hash": "47ed55183d619c6c624e35412360a41de42833e2c24223c1de1ad12a84fdafc2", - "action": "add" - } - }, - "SecureFilePathLocation": { - "1": { - "version": 1, - "hash": "7febc066e2ee5a3a4a891720afede3f5c155cacc0557662ac4d04bf67b964c6d", - "action": "add" - } - }, - "SeaweedSecureFilePathLocation": { - "1": { - "version": 1, - "hash": "5724a38b1a92b8a55da3d9cc34a720365a6d0c32683acda630fc44067173e201", - "action": "add" - } - }, - "BlobStorageEntry": { - "1": { - "version": 1, - "hash": "9f1b027cce390ee6f71c7a81e7420bb71a477b29c6c62ba74e781a97bc5434e6", - "action": "add" - } - }, - "BlobStorageMetadata": { - "1": { - "version": 1, - "hash": "6888943be3f97186190dd26d7eefbdf29b15c6f2fa459e13608065ebcdb799e2", - "action": "add" - } - }, - "CreateBlobStorageEntry": { - "1": { - "version": 1, - "hash": "61a373336e83645f1b6d78a320323d9ea4ee91b3d87b730cb0608fbfa0072262", - "action": "add" - } - }, - "BlobRetrieval": { - "1": { - "version": 1, - "hash": "a8d7e1d6483e7a9b5a130e837fa398862aa6cbb316cc5f4470450d835755fdd9", - "action": "add" - } - }, - "SyftObjectRetrieval": { - "1": { - "version": 1, - "hash": "7ccc62d5b434d2d438b3df661b4d753b0c7c8d593d451d8b86d364da83998c89", - "action": "add" - } - }, - "BlobRetrievalByURL": { - "1": { - "version": 1, - "hash": "18fd860cb9de296532fc9ff075932e6a4377cc8f043dd88ed4f620517321077d", - "action": "add" - } - }, - "BlobDeposit": { - "1": { - "version": 1, - "hash": "c98e6da658a3be01ead4ea6ee6a4c10046879f0ce0f5fc5f946346671579b229", - "action": "add" - } - }, - "WorkerSettings": { - "1": { - "version": 1, - "hash": "0dcd95422ec8a7c74e45ee68a125084c08f898dc94a13d25fe5a5fd0e4fc5027", - "action": "add" - } - }, - "HTTPNodeRoute": { - "1": { - "version": 1, - "hash": "1901b9f53f9970ce2bd8307ba9f7cafc0e7eba1d2ec82e4014c6120e605e3741", - "action": "add" - } - }, - "PythonNodeRoute": { - "1": { - "version": 1, - "hash": "15711e6e7a1ef726c8e8b5c35a6cb2d30b56ba5213cba489524bf63489e136cf", - "action": "add" - } - }, - "EnclaveMetadata": { - "1": { - "version": 1, - "hash": "39f85e475015e6f860ddcc5fea819423eba2db8f4b7d8e004c05a44d6f8444c6", - "action": "add" - } - }, - "DataSubject": { - "1": { - "version": 1, - "hash": "0b8b049d4627727b444c419f5d6a97b7cb97a433088ebf744c854b6a470dadf1", - "action": "add" - } - }, - "DataSubjectCreate": { - "1": { - "version": 1, - "hash": "5a94f9fcba75c50d78d71222f0235c5fd4d8003ae0db4d74bdbc4d56a99de3aa", - "action": "add" - } - }, - "DataSubjectMemberRelationship": { - "1": { - "version": 1, - "hash": "0a820edc9f1a87387acc3c611fe852752fcb3dab7608058f2bc48211be7bfbd2", - "action": "add" - } - }, - "Contributor": { - "1": { - "version": 1, - "hash": "d1d4f25bb87e59c0414501d3335097de66815c164c9ed5a7850ff8bec69fbcdc", - "action": "add" - } - }, - "MarkdownDescription": { - "1": { - "version": 1, - "hash": "519328a3952049f57004013e4fb00840695b24b8575cad983056412c9c9d9ba6", - "action": "add" - } - }, - "Asset": { - "1": { - "version": 1, - "hash": "24350b8d9597df49999918ad42e0eece1328ea30389311f1e0a420be8f39b8a1", - "action": "add" - } - }, - "CreateAsset": { - "1": { - "version": 1, - "hash": "1b4c71569b8da64258672483bd36dc4aa99a32d4cb519659241d15bc898041a6", - "action": "add" - } - }, - "Dataset": { - "1": { - "version": 1, - "hash": "99ca2fa3e46fd9810222d269fac6accb546f632e94d5d57529016ba5e55af5a8", - "action": "add" - } - }, - "DatasetPageView": { - "1": { - "version": 1, - "hash": "b1de14bb9b6a259648dfc59b6a48fa526116afe50a689c24b8bb36fd0e6a97f8", - "action": "add" - } - }, - "CreateDataset": { - "1": { - "version": 1, - "hash": "3b020d9b8928cbd7e91f41c749ab4c932e19520696a183f2c7cd1312ebb640d1", - "action": "add" - } - }, - "ActionDataEmpty": { - "1": { - "version": 1, - "hash": "89b5912fe5416f922051b8068be6071a03c87a4ab264959de524f1b86e95f028", - "action": "add" - } - }, - "ActionFileData": { - "1": { - "version": 1, - "hash": "1f32d94b75b0a6b4e86cec93d94aa905738219e3e7e75f51dd335ee832a6ed3e", - "action": "add" - } - }, - "Action": { - "1": { - "version": 1, - "hash": "5cf71ee35097f17fbb1dd05096f875211d71cf07161205d7f6a9c11fd49d5272", - "action": "add" - } - }, - "ActionObject": { - "1": { - "version": 1, - "hash": "632446f1415102490c93fafb56dd9eb29d79623bcc5e9f2e6e37c4f63c2c51c3", - "action": "add" - } - }, - "AnyActionObject": { - "1": { - "version": 1, - "hash": "bcb31f847907edc9c95d2d120dc5427854604f40940e3f41cd0474a1820ac65e", - "action": "add" - } - }, - "TwinObject": { - "1": { - "version": 1, - "hash": "c42455586b43724a7421becd99122b787a129798daf6081e96954ecaea228099", - "action": "add" - } - }, - "ExactMatch": { - "1": { - "version": 1, - "hash": "e497e2e2380db72766c5e219e8afd13136d8953933d6f1eaf83b14001e887cde", - "action": "add" - } - }, - "OutputHistory": { - "1": { - "version": 1, - "hash": "4ec6e6efd86a972b474251885151bdfe4ef262562174605e8ab6a8abba1aa867", - "action": "add" - } - }, - "OutputPolicyExecuteCount": { - "1": { - "version": 1, - "hash": "6bb24b3b35e19564c43b838ca3f46ccdeadb6596511917f2d220681a378e439d", - "action": "add" - } - }, - "OutputPolicyExecuteOnce": { - "1": { - "version": 1, - "hash": "32a40fc9966b277528eebc61c01041f3a5447417731954abdaffbb14dabc76bb", - "action": "add" - } - }, - "UserPolicy": { - "1": { - "version": 1, - "hash": "c69b17b1d96cace8b45da6d9639165f2da4aa7ff156b6fd922ac217bf7856d8a", - "action": "add" - } - }, - "SubmitUserPolicy": { - "1": { - "version": 1, - "hash": "96f7f39279fadc70c569b8d48ed4d6420a8132db51e37466d272fda19953554b", - "action": "add" - } - }, - "UserCode": { - "1": { - "version": 1, - "hash": "e14c22686cdc7d1fb2b0d01c0aebdea37e62a61b051677c1d30234214f05cd42", - "action": "add" - } - }, - "SubmitUserCode": { - "1": { - "version": 1, - "hash": "f572d32350d09e25b29572c591029d37a216818618c383094404f84bc9c15dd6", - "action": "add" - } - }, - "UserCodeExecutionResult": { - "1": { - "version": 1, - "hash": "49c32e85e78b7b189a7f13b7e26115ef94fcb0b60b578adcbe2b95e289f63a6e", - "action": "add" - } - }, - "CodeHistory": { - "1": { - "version": 1, - "hash": "a7baae93862ae0aa67675f1617574e31aafb15a9ebff633eb817278a3a867161", - "action": "add" - } - }, - "CodeHistoryView": { - "1": { - "version": 1, - "hash": "0ed1a2a04a962ecbcfa38b0b8a03c1e51e8946a4b80f6bf2557148ce658671ce", - "action": "add" - } - }, - "CodeHistoriesDict": { - "1": { - "version": 1, - "hash": "95288411cd5843834f3273a2fd66a7df2e603e980f4ab1d329f9ab17d5d2f643", - "action": "add" - } - }, - "UsersCodeHistoriesDict": { - "1": { - "version": 1, - "hash": "5e1f389c4565ee8558386dd5c934d81e0c68ab1434f86bb9065976b587ef44d1", - "action": "add" - } - }, - "NodePeer": { - "1": { - "version": 1, - "hash": "7b88de7e38490e2d69f31295137673e7ddabc16ab0e2272ff491f6cea1835d63", - "action": "add" - } - }, - "OnDiskBlobDeposit": { - "1": { - "version": 1, - "hash": "5efc230c1ee65c4626d334aa69ed458c796c45265e546a333844c6c2bcd0e6b0", - "action": "add" - } - }, - "SeaweedFSBlobDeposit": { - "1": { - "version": 1, - "hash": "382a9ac178deed2a9591e1ebbb39f265cbe67027fb93a420d473a4c26b7fda11", - "action": "add" - } - }, - "DictStoreConfig": { - "1": { - "version": 1, - "hash": "256e9c623ce0becd555ddd2a55a0c15514e162786b1549388cef98a92a9b18c9", - "action": "add" - } - }, - "NumpyArrayObject": { - "1": { - "version": 1, - "hash": "dcc7b44fa5ad22ae0bc576948f856c172dac1e9de2bc8e2a302e428f3309a278", - "action": "add" - } - }, - "NumpyScalarObject": { - "1": { - "version": 1, - "hash": "5c1b6b6e8ba88bc79e76646d621489b889fe8f9b9fd59f117d594be18a409633", - "action": "add" - } - }, - "NumpyBoolObject": { - "1": { - "version": 1, - "hash": "a5c822a6a3ca9eefd6a2b68f7fd0bc614fba7995f6bcc30bdc9dc882296b9b16", - "action": "add" - } - }, - "PandasDataframeObject": { - "1": { - "version": 1, - "hash": "35058924b3de2e0a604a92f91f4dd2e3cc0dac80c219d34f360e7cedd52f5f4c", - "action": "add" - } - }, - "PandasSeriesObject": { - "1": { - "version": 1, - "hash": "2a0d8a55f1c27bd8fccd276cbe01bf272c40cab10417d7027273983fed423caa", - "action": "add" - } - }, - "ReplyNotification": { - "1": { - "version": 1, - "hash": "34b2ad522f7406c2486573467d9c7acef5c1063a0d9f2177c3bda2d8c4f87572", - "action": "add" - } - }, - "Notification": { - "1": { - "version": 1, - "hash": "d13981f721fe2b3e2717640ee07dc716c596e4ecd442461665c3fdab0b85bf0e", - "action": "add" - } - }, - "CreateNotification": { - "1": { - "version": 1, - "hash": "b1f459de374fe674f873a4a5f3fb8a8aabe0d83faad84a933f0a77dd1141159a", - "action": "add" - } - }, - "Change": { - "1": { - "version": 1, - "hash": "aefebd1601cf5bfd4817b0db75300a78299cc4949ead735a90873cbd22c8d4bc", - "action": "add" - } - }, - "ChangeStatus": { - "1": { - "version": 1, - "hash": "627f6f8e42cc285336aa6fd4916285d796140f4ff901487b7cb3907ef0f116a6", - "action": "add" - } - }, - "ActionStoreChange": { - "1": { - "version": 1, - "hash": "17b865e75eb3fb2693924fb00ba87a25260be45d55a4eb2184c4ead22d787cbe", - "action": "add" - } - }, - "Request": { - "1": { - "version": 1, - "hash": "e054307eeb7f13683cde9ce7613d5ca2925a13fff7c345b1c9f729a12c955f90", - "action": "add" - } - }, - "RequestInfo": { - "1": { - "version": 1, - "hash": "b76075c138afc0563ce9ac7f6b1131f048951f7486cd516c02736dc1a2a23639", - "action": "add" - } - }, - "RequestInfoFilter": { - "1": { - "version": 1, - "hash": "7103abdc464ae71bb746410f5730f55dd8ed82268aa32bbb0a69e0070488a669", - "action": "add" - } - }, - "SubmitRequest": { - "1": { - "version": 1, - "hash": "96b4ec12beafd9d8a7c97399cb8a23dade4db16d8f521be3fe7b8fec99db5161", - "action": "add" - } - }, - "ObjectMutation": { - "1": { - "version": 1, - "hash": "0ee3dd38d6df0fe9a19d848e8f3aaaf13a6ba86afe3406c239caed6da185651a", - "action": "add" - } - }, - "EnumMutation": { - "1": { - "version": 1, - "hash": "4c02f956ec9b973064972cc57fc8dd9c525e683f93f804642b4e1bfee1b62e57", - "action": "add" - } - }, - "UserCodeStatusChange": { - "1": { - "version": 1, - "hash": "4f5b405cc2b3976ed8f7018df82e873435d9187dff15fa5a23bc85a738969f3f", - "action": "add" - } - }, - "SyftObjectMigrationState": { - "1": { - "version": 1, - "hash": "d3c8126bc15dae4dd243bb035530e3f56cd9e433d403dd6b5f3b45face6d281f", - "action": "add" - } - }, - "ProjectThreadMessage": { - "1": { - "version": 1, - "hash": "1118e935792e8e54103dbf91fa33edbf192a7767d2b1d4526dfa7d4a643cde2e", - "action": "add" - } - }, - "ProjectMessage": { - "1": { - "version": 1, - "hash": "55a3a5171b6949372b4125cc461bf39bc998565e07703804fca6c7ef99695ae4", - "action": "add" - } - }, - "ProjectRequestResponse": { - "1": { - "version": 1, - "hash": "d4c360e845697a0b24695143d0781626cd344cfde43162c90ae90fe67e00ae21", - "action": "add" - } - }, - "ProjectRequest": { - "1": { - "version": 1, - "hash": "514d189df335c68869eea36befcdcafec74bdc682eaf18871fe879e26da4dbb6", - "action": "add" - } - }, - "AnswerProjectPoll": { - "1": { - "version": 1, - "hash": "ff2e1ac7bb764c99d646b96eb3ebfbf9311599b7e3be07aa4a4eb4810bb6dd12", - "action": "add" - } - }, - "ProjectPoll": { - "1": { - "version": 1, - "hash": "b0ac8f1d9c06997374ddbc33fdf1d0af0da15fdb6899f52d91a8574106558964", - "action": "add" - } - }, - "Project": { - "1": { - "version": 1, - "hash": "ec5b7ac1c92808e266f06b175c6ebcd50be81777ad120c02ce8c6074d0004788", - "action": "add" - } - }, - "ProjectSubmit": { - "1": { - "version": 1, - "hash": "0374b37779497d7e0b2ffeabc38d35bfbae2ee762a7674a5a8af75e7c5545e61", - "action": "add" - } - }, - "QueueItem": { - "1": { - "version": 1, - "hash": "5aa94681d9d0715d5b605f9625a54e114927271378cf2ea7245f85c488035e0b", - "action": "add" - } - }, - "ZMQClientConfig": { - "1": { - "version": 1, - "hash": "e6054969b495791569caaf33239039beae3d116e1fe74e9575467c48b9007c45", - "action": "add" - } - }, - "SQLiteStoreConfig": { - "1": { - "version": 1, - "hash": "b656b26c14cf4e97aba702dd62a0927aec7f860c12eed512c2c688e1b7109aa5", - "action": "add" - } - }, - "Plan": { - "1": { - "version": 1, - "hash": "a0bba2b7792c9e08c453e9e256f0ac6e6185610726566bcd50b057ae83b42d9a", - "action": "add" - } - } - } - } -} diff --git a/packages/syft/src/syft/protocol/releases/0.8.3.json b/packages/syft/src/syft/protocol/releases/0.8.3.json deleted file mode 100644 index 0c74b349c3f..00000000000 --- a/packages/syft/src/syft/protocol/releases/0.8.3.json +++ /dev/null @@ -1,194 +0,0 @@ -{ - "2": { - "object_versions": { - "Action": { - "2": { - "version": 2, - "hash": "a13b50c4d23bd6deb7896e394f2a20e6cef4c33c5e6f4ee30f19eaffab708f21", - "action": "add" - } - }, - "ActionObject": { - "2": { - "version": 2, - "hash": "577aa1f010b90194958a18ec38ee21db3718bd96d9e036501c6ddeefabedf432", - "action": "add" - } - }, - "AnyActionObject": { - "2": { - "version": 2, - "hash": "002d8be821140befebbc0503e6bc1ef8779094e24e46305e5da5af6eecb56b13", - "action": "add" - } - }, - "BlobFile": { - "2": { - "version": 2, - "hash": "f2b29d28fe81a04bf5e946c819010283a9f98a97d50519358bead773865a2e09", - "action": "add" - } - }, - "BlobFileOBject": { - "1": { - "version": 1, - "hash": "8da2c80ced4f0414c671313c4b63d05846df1e397c763d99d803be86c29755bb", - "action": "add" - } - }, - "BlobStorageEntry": { - "2": { - "version": 2, - "hash": "5472bdd5bdce6d0b561543a6bac70d47bf0c05c141a21450751460cc538d6b55", - "action": "add" - } - }, - "BlobStorageMetadata": { - "2": { - "version": 2, - "hash": "674f4c52a8444289d5ef389b919008860e2b0e7acbaafa774d58e492d5b6741a", - "action": "add" - } - }, - "BlobRetrieval": { - "2": { - "version": 2, - "hash": "4c4fbdb6df5bb9fcbe914a9890bd1c1b6a1b3f382a04cbc8752a5a1b03130111", - "action": "add" - } - }, - "SyftObjectRetrieval": { - "2": { - "version": 2, - "hash": "d9d7a7e1b8843145c9687fd013c9223700285886073547734267e91ac53e0996", - "action": "add" - } - }, - "BlobRetrievalByURL": { - "1": { - "version": 1, - "hash": "18fd860cb9de296532fc9ff075932e6a4377cc8f043dd88ed4f620517321077d", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "8059ee03016c4d74e408dad9529e877f91829672e0cc42d8cfff9c8e14058adc", - "action": "add" - } - }, - "WorkerSettings": { - "2": { - "version": 2, - "hash": "d623a8a0d6c83b26ba49686bd8be10eccb126f54626fef334a85396c3b8a8ed6", - "action": "add" - } - }, - "QueueItem": { - "2": { - "version": 2, - "hash": "9503b878de4b5b7a1793580301353523b7d6219ebd27d38abe598061979b7570", - "action": "add" - } - }, - "ActionQueueItem": { - "1": { - "version": 1, - "hash": "11a43caf9164eb2a5a21f4bcb0ca361d0a5d134bf3c60173f2c502d0d80219de", - "action": "add" - } - }, - "ZMQClientConfig": { - "2": { - "version": 2, - "hash": "0f9bc88d56cd6eed6fc75459d1f914aed840c66e1195b9e41cc501b488fef2ed", - "action": "add" - } - }, - "JobItem": { - "1": { - "version": 1, - "hash": "7b8723861837b0b7e948b2cf9244159d232185f3407dd6bef108346f941ddf6e", - "action": "add" - }, - "2": { - "version": 2, - "hash": "e99cf5a78c6dd3a0adc37af3472c7c21570a9e747985dff540a2b06d24de6446", - "action": "add" - } - }, - "UserCode": { - "2": { - "version": 2, - "hash": "660e1abc15034f525e91ffdd820c2a2179bfddf83b7b9e3ce7823b2efc515c69", - "action": "add" - } - }, - "SubmitUserCode": { - "1": { - "version": 1, - "hash": "f572d32350d09e25b29572c591029d37a216818618c383094404f84bc9c15dd6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "9b29e060973a3de8d3564a2b7d2bb5c53745aa445bf257576994b613505d7194", - "action": "add" - } - }, - "NumpyArrayObject": { - "2": { - "version": 2, - "hash": "2c631121d9211006edab5620b214dea83e2398bee92244d822227ee316647e22", - "action": "add" - } - }, - "NumpyScalarObject": { - "2": { - "version": 2, - "hash": "0d5d81b9d45c140f6e07b43ed68d31e0ef060d6b4d0431c9b4795997bb35c69d", - "action": "add" - } - }, - "NumpyBoolObject": { - "2": { - "version": 2, - "hash": "24839ba1c88ed833a134124750d5f299abcdf318670315028ed87b254f4578b3", - "action": "add" - } - }, - "PandasDataframeObject": { - "2": { - "version": 2, - "hash": "66729d4ba7a92210d45c5a5c24fbdb4c8e58138a515a7bdb71ac8f6e8b868544", - "action": "add" - } - }, - "PandasSeriesObject": { - "2": { - "version": 2, - "hash": "cb05a714f75b1140a943f56a3622fcc0477b3a1f504cd545a98510959ffe1528", - "action": "add" - } - }, - "UserCodeStatusChange": { - "2": { - "version": 2, - "hash": "d83e0905ae882c824ba8fbbf455cd3881906bf8b2ebbfff07bcf471ef869cedc", - "action": "add" - } - }, - "SyftLog": { - "1": { - "version": 1, - "hash": "bd3f62b8fe4b2718a6380c8f05a93c5c40169fc4ab174db291929298e588429e", - "action": "add" - }, - "2": { - "version": 2, - "hash": "d3ce45794da2e6c4b0cef63b98a553525af50c5d9db42d3d64caef3e7d22b4a9", - "action": "add" - } - } - } - } -} diff --git a/packages/syft/src/syft/protocol/releases/0.8.4.json b/packages/syft/src/syft/protocol/releases/0.8.4.json deleted file mode 100644 index b1581fef20b..00000000000 --- a/packages/syft/src/syft/protocol/releases/0.8.4.json +++ /dev/null @@ -1,242 +0,0 @@ -{ - "3": { - "object_versions": { - "SyftWorkerImage": { - "1": { - "version": 1, - "hash": "2a9585b6a286e24f1a9f3f943d0128730cf853edc549184dc1809d19e1eec54b", - "action": "add" - } - }, - "ActionDataLink": { - "1": { - "version": 1, - "hash": "10bf94e99637695f1ba283f0b10e70743a4ebcb9ee75aefb1a05e6d6e1d21a71", - "action": "add" - } - }, - "ObjectNotReady": { - "1": { - "version": 1, - "hash": "88207988639b11eaca686b6e079616d9caecc3dbc2a8112258e0f39ee5c3e113", - "action": "add" - } - }, - "JobItem": { - "3": { - "version": 3, - "hash": "5b93a59e28574691339d22826d5650969336a2e930b93d6b3fe6d5409ca0cfc4", - "action": "add" - } - }, - "SeaweedSecureFilePathLocation": { - "2": { - "version": 2, - "hash": "5fd63fed2a4efba8c2b6c7a7b5e9b5939181781c331230896aa130b6fd558739", - "action": "add" - } - }, - "AzureSecureFilePathLocation": { - "1": { - "version": 1, - "hash": "1bb15f3f9d7082779f1c9f58de94011487924cb8a8c9c2ec18fd7c161c27fd0e", - "action": "add" - } - }, - "RemoteConfig": { - "1": { - "version": 1, - "hash": "ad7bc4780a8ad52e14ce68601852c93d2fe07bda489809cad7cae786d2461754", - "action": "add" - } - }, - "AzureRemoteConfig": { - "1": { - "version": 1, - "hash": "c05c6caa27db4e385c642536d4b0ecabc0c71e91220d2e6ce21a2761ca68a673", - "action": "add" - } - }, - "BlobRetrievalByURL": { - "2": { - "version": 2, - "hash": "8059ee03016c4d74e408dad9529e877f91829672e0cc42d8cfff9c8e14058adc", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "0b664100ea08413ca4ef04665ca910c2cf9535539617ea4ba33687d05cdfe747", - "action": "add" - } - }, - "QueueItem": { - "3": { - "version": 3, - "hash": "3495f406d2c97050ce86be80c230f49b6b846c63b9a9230cbd6631952f2bad0f", - "action": "add" - } - }, - "ActionQueueItem": { - "2": { - "version": 2, - "hash": "6413ed01e949cac169299a43ce40651f9bf8053e408b6942853f8afa8a693b3d", - "action": "add" - } - }, - "ZMQClientConfig": { - "2": { - "version": 2, - "hash": "0f9bc88d56cd6eed6fc75459d1f914aed840c66e1195b9e41cc501b488fef2ed", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "91ce5953cced58e12c576aa5174d5ca0c91981b01cf42edd5283d347baa3390b", - "action": "add" - } - }, - "SyftWorker": { - "1": { - "version": 1, - "hash": "0d5b367162f3ce55ab090cc1b49bd30e50d4eb144e8431eadc679bd0e743aa70", - "action": "add" - } - }, - "WorkerPool": { - "1": { - "version": 1, - "hash": "250699eb4c452fc427995353d5c5ad6245fb3e9fdac8814f8348784816a0733b", - "action": "add" - } - }, - "SyftImageRegistry": { - "1": { - "version": 1, - "hash": "dc83910c91947e3d9eaa3e6f8592237448f0408668c7cca80450b5fcd54722e1", - "action": "add" - } - }, - "UserCode": { - "3": { - "version": 3, - "hash": "90fcae0f556f375ba1e91d2e345f57241660695c6e2b84c8e311df89d09e6c66", - "action": "add" - } - }, - "SubmitUserCode": { - "3": { - "version": 3, - "hash": "a29160c16d2e2620800d42cdcd9f3637d063a570c477a5d05217a2e64b4bb396", - "action": "add" - } - }, - "CreateCustomImageChange": { - "1": { - "version": 1, - "hash": "bc09dca7995938f3b3a2bd9c8b3c2feffc8484df466144a425cb69cadb2ab635", - "action": "add" - } - }, - "CreateCustomWorkerPoolChange": { - "1": { - "version": 1, - "hash": "86894f8ccc037de61f44f9698fd113ba02c3cf3870a3048c00a46e15dcd1941c", - "action": "add" - } - }, - "JobInfo": { - "1": { - "version": 1, - "hash": "cf26eeac3d9254dfa439917493b816341f8a379a77d182bbecba3b7ed2c1d00a", - "action": "add" - } - }, - "User": { - "1": { - "version": 1, - "hash": "078636e64f737e60245b39cf348d30fb006531e80c12b70aa7cf98254e1bb37a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "ded970c92f202716ed33a2117cf541789f35fad66bd4b1db39da5026b1d7d0e7", - "action": "add" - } - }, - "UserUpdate": { - "1": { - "version": 1, - "hash": "839dd90aeb611e1dc471c8fd6daf230e913465c0625c6a297079cb7f0a271195", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "32cba8fbd786c575f92e26c31384d282e68e3ebfe5c4b0a0e793820b1228d246", - "action": "add" - } - }, - "UserCreate": { - "1": { - "version": 1, - "hash": "dab78b63544ae91c09f9843c323cb237c0a6fcfeb71c1acf5f738e2fcf5c277f", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "2540188c5aaea866914dccff459df6e0f4727108a503414bb1567ff6297d4646", - "action": "add" - } - }, - "UserView": { - "1": { - "version": 1, - "hash": "63289383fe7e7584652f242a4362ce6e2f0ade52f6416ab6149b326a506b0675", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "e410de583bb15bc5af57acef7be55ea5fc56b5b0fc169daa3869f4203c4d7473", - "action": "add" - } - }, - "BlobFile": { - "2": { - "version": 2, - "hash": "f2b29d28fe81a04bf5e946c819010283a9f98a97d50519358bead773865a2e09", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "8f1710c754bb3b39f546b97fd69c4826291398b247976bbc41fa873af431bca9", - "action": "add" - } - }, - "SyftObjectRetrieval": { - "1": { - "version": 1, - "hash": "7ccc62d5b434d2d438b3df661b4d753b0c7c8d593d451d8b86d364da83998c89", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "952958e9afae007bef3cb89aa15be95dddc4c310e3a8ce4191576f90ac6fcbc8", - "action": "add" - } - }, - "ActionFileData": { - "1": { - "version": 1, - "hash": "1f32d94b75b0a6b4e86cec93d94aa905738219e3e7e75f51dd335ee832a6ed3e", - "action": "remove" - } - }, - "SeaweedFSBlobDeposit": { - "2": { - "version": 2, - "hash": "07d84a95324d95d9c868cd7d1c33c908f77aa468671d76c144586aab672bcbb5", - "action": "add" - } - } - } - } -} diff --git a/packages/syft/src/syft/protocol/releases/0.8.5.json b/packages/syft/src/syft/protocol/releases/0.8.5.json deleted file mode 100644 index 40288b126ae..00000000000 --- a/packages/syft/src/syft/protocol/releases/0.8.5.json +++ /dev/null @@ -1,1671 +0,0 @@ -{ - "4": { - "object_versions": { - "ActionObject": { - "1": { - "version": 1, - "hash": "632446f1415102490c93fafb56dd9eb29d79623bcc5e9f2e6e37c4f63c2c51c3", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "577aa1f010b90194958a18ec38ee21db3718bd96d9e036501c6ddeefabedf432", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "37bb8f0f87b1da2525da8f6873e6257dff4a732f2dba293b62931ad0b85ef9e2", - "action": "add" - } - }, - "AnyActionObject": { - "1": { - "version": 1, - "hash": "bcb31f847907edc9c95d2d120dc5427854604f40940e3f41cd0474a1820ac65e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "002d8be821140befebbc0503e6bc1ef8779094e24e46305e5da5af6eecb56b13", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "7c55461e3c6ba36ff999c64eb1b97a65b5a1f27193a973b1355ee2675f14c313", - "action": "add" - } - }, - "BlobFileOBject": { - "1": { - "version": 1, - "hash": "8da2c80ced4f0414c671313c4b63d05846df1e397c763d99d803be86c29755bb", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "1ab941c7669572a41067a17e0e3f2d9c7056f7a4df8f899e87ae2358d9113b02", - "action": "add" - } - }, - "JobInfo": { - "1": { - "version": 1, - "hash": "cf26eeac3d9254dfa439917493b816341f8a379a77d182bbecba3b7ed2c1d00a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "058a7fc0c63e0bcb399088e7fcde9b8522522e269b00cee2d093d1c890550ce8", - "action": "add" - } - }, - "ExecutionOutput": { - "1": { - "version": 1, - "hash": "201c8abcb6595a64140ad0c3b058557229c7790a25fb55ed229ae0efcb63ad07", - "action": "add" - } - }, - "OutputPolicyExecuteCount": { - "1": { - "version": 1, - "hash": "6bb24b3b35e19564c43b838ca3f46ccdeadb6596511917f2d220681a378e439d", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "5bce0120ba3b7cbbe08b28bb92bf035215e66232c36899637b8a3f84300747e3", - "action": "add" - } - }, - "OutputPolicyExecuteOnce": { - "1": { - "version": 1, - "hash": "32a40fc9966b277528eebc61c01041f3a5447417731954abdaffbb14dabc76bb", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "11e2ed5f7fc4bfc701c592352c5377911b0496454c42995c428333ca7ce635c5", - "action": "add" - } - }, - "UserCodeStatusCollection": { - "1": { - "version": 1, - "hash": "8d8bae10ee1733464272031e7de6fc783668885206fa448c9f7cd8e8cfc7486a", - "action": "add" - } - }, - "UserCode": { - "1": { - "version": 1, - "hash": "e14c22686cdc7d1fb2b0d01c0aebdea37e62a61b051677c1d30234214f05cd42", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "660e1abc15034f525e91ffdd820c2a2179bfddf83b7b9e3ce7823b2efc515c69", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "90fcae0f556f375ba1e91d2e345f57241660695c6e2b84c8e311df89d09e6c66", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "84ef96946a18e2028d71e125a7a4b8bed2c9cba3c5a2612634509790506e5b9c", - "action": "add" - } - }, - "UserCodeExecutionOutput": { - "1": { - "version": 1, - "hash": "d20e83362df8a5d2d2e7eb26a2c5723739f9cfbe4c0272d3ae7e37a34bbe5317", - "action": "add" - } - }, - "NumpyArrayObject": { - "1": { - "version": 1, - "hash": "dcc7b44fa5ad22ae0bc576948f856c172dac1e9de2bc8e2a302e428f3309a278", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "2c631121d9211006edab5620b214dea83e2398bee92244d822227ee316647e22", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "709dc84a946267444a3f9968acf4a5e9807d6aa5143626c3fb635c9282108cc1", - "action": "add" - } - }, - "NumpyScalarObject": { - "1": { - "version": 1, - "hash": "5c1b6b6e8ba88bc79e76646d621489b889fe8f9b9fd59f117d594be18a409633", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0d5d81b9d45c140f6e07b43ed68d31e0ef060d6b4d0431c9b4795997bb35c69d", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "5e84c9905a1816d51c0dfb1eedbfb4d831095ca6c89956c6fe200c2a193cbb8f", - "action": "add" - } - }, - "NumpyBoolObject": { - "1": { - "version": 1, - "hash": "a5c822a6a3ca9eefd6a2b68f7fd0bc614fba7995f6bcc30bdc9dc882296b9b16", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "24839ba1c88ed833a134124750d5f299abcdf318670315028ed87b254f4578b3", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "bf936c1923ceee4def4cded06d41766998ea472322b0738bade7b85298e469da", - "action": "add" - } - }, - "PandasDataframeObject": { - "1": { - "version": 1, - "hash": "35058924b3de2e0a604a92f91f4dd2e3cc0dac80c219d34f360e7cedd52f5f4c", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "66729d4ba7a92210d45c5a5c24fbdb4c8e58138a515a7bdb71ac8f6e8b868544", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "daf3629fb7d26f41f96cd7f9200d7327a4b74d800b3e02afa75454d11bd47d78", - "action": "add" - } - }, - "PandasSeriesObject": { - "1": { - "version": 1, - "hash": "2a0d8a55f1c27bd8fccd276cbe01bf272c40cab10417d7027273983fed423caa", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "cb05a714f75b1140a943f56a3622fcc0477b3a1f504cd545a98510959ffe1528", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "4747a220d1587e99e6ac076496a2aa7217e2700205ac80fc24fe4768a313da78", - "action": "add" - } - }, - "UserCodeStatusChange": { - "1": { - "version": 1, - "hash": "4f5b405cc2b3976ed8f7018df82e873435d9187dff15fa5a23bc85a738969f3f", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "d83e0905ae882c824ba8fbbf455cd3881906bf8b2ebbfff07bcf471ef869cedc", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "dd79f0f4d8cc7c95120911a0a5d9264cc6e65813bd4ad39f81b756b40c1463e9", - "action": "add" - } - }, - "SyncStateItem": { - "1": { - "version": 1, - "hash": "cde09be2cfeca4246d001f3f28c00d8647a4506641104e5dc647f136a64fd06e", - "action": "add" - } - }, - "SyncState": { - "1": { - "version": 1, - "hash": "b91ed9a9eb8ac7e2fadafd9376d8adefc83845d2f29939b30e95ebe94dc78cd9", - "action": "add" - } - }, - "StoreConfig": { - "1": { - "version": 1, - "hash": "17de8875cf590311ddb042140347ffc79d4a85028e504dad178ca4e1237ec861", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3f6c9a967a43557bf88caab87e5d1b9b14ea240bfd5bd6a1a313798e4ee2552b", - "action": "add" - } - }, - "MongoStoreConfig": { - "1": { - "version": 1, - "hash": "e52aa382e300b0b69aaa2d80aadb4e3a9a3c02b3c741b71d56f959c4d3891ce5", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "f27e70c1c074de2d921f8f0cca02bec90d359cf0a1f255fe77d84455e5daa966", - "action": "add" - } - }, - "Action": { - "1": { - "version": 1, - "hash": "5cf71ee35097f17fbb1dd05096f875211d71cf07161205d7f6a9c11fd49d5272", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "a13b50c4d23bd6deb7896e394f2a20e6cef4c33c5e6f4ee30f19eaffab708f21", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "18525c0610aea0aa62fe496a739b0ca7fb828617b4fca73840807d3c7b1477a7", - "action": "add" - } - }, - "DataSubjectCreate": { - "1": { - "version": 1, - "hash": "5a94f9fcba75c50d78d71222f0235c5fd4d8003ae0db4d74bdbc4d56a99de3aa", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b35897295822f061fbc70522ca8967cd2be53a5c01b19e24c587cd7b0c4aa3e8", - "action": "add" - } - }, - "Dataset": { - "1": { - "version": 1, - "hash": "99ca2fa3e46fd9810222d269fac6accb546f632e94d5d57529016ba5e55af5a8", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0bbae6e3665e61e97eeb328400efc678dfb26409616c66bf48f3f34bbf102721", - "action": "add" - } - }, - "CreateDataset": { - "1": { - "version": 1, - "hash": "3b020d9b8928cbd7e91f41c749ab4c932e19520696a183f2c7cd1312ebb640d1", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "83c6142c99da6667260e0d6df258b6e173beb18e399d60209b6ffccb5547f1e7", - "action": "add" - } - }, - "DictStoreConfig": { - "1": { - "version": 1, - "hash": "256e9c623ce0becd555ddd2a55a0c15514e162786b1549388cef98a92a9b18c9", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6cef5c61f567c75c969827fabaf5bd4f4409a399f33b6b2623fbed3c7a597a41", - "action": "add" - } - }, - "SQLiteStoreConfig": { - "1": { - "version": 1, - "hash": "b656b26c14cf4e97aba702dd62a0927aec7f860c12eed512c2c688e1b7109aa5", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "e2027eacb8db772fadc506e5bbe797a3fd24175c18b98f79f412cc86ee300f2e", - "action": "add" - } - }, - "Plan": { - "1": { - "version": 1, - "hash": "a0bba2b7792c9e08c453e9e256f0ac6e6185610726566bcd50b057ae83b42d9a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "67be9b8933b5bec20090727a7b1a03216f874dcc254975481ac62a5a1e9c0c1e", - "action": "add" - } - }, - "NodeMetadata": { - "1": { - "version": 1, - "hash": "6bee018894dfdf697ea624740d0bf051750e0b0d8470ced59646f6d8812068ac", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "f856169fea72486cd436875ce4411ef935da11eb7c5af48121adfa00d4c0cdb6", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "3cc67abf394a805066a88aef0bea15bde609b9ecbe7ec15172eac5e7a0b7ef7c", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "9501017d54d67c987bf62a37891e9e2ceaa0f741ff6cc502ea1db7bdf26b98da", - "action": "add" - } - }, - "NodeSettings": { - "1": { - "version": 1, - "hash": "b662047bb278f4f5db77c102f94b733c3a929839271b3d6b82ea174a60e2aaf0", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "29a82afcb006a044b6ae04c6ea8a067d145d28b4210bb038ea9fa86ebde108c8", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "ea0a9336358fc24988e2e157912f1898a9f770d9520b73a34ce2320b0565f99c", - "action": "add" - } - }, - "BlobFile": { - "1": { - "version": 1, - "hash": "47ed55183d619c6c624e35412360a41de42833e2c24223c1de1ad12a84fdafc2", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "8f1710c754bb3b39f546b97fd69c4826291398b247976bbc41fa873af431bca9", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "05ef86582c6b8967499eb0f57d048676e15390ce74891409fada522226563754", - "action": "add" - } - }, - "SeaweedSecureFilePathLocation": { - "1": { - "version": 1, - "hash": "5724a38b1a92b8a55da3d9cc34a720365a6d0c32683acda630fc44067173e201", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "5fd63fed2a4efba8c2b6c7a7b5e9b5939181781c331230896aa130b6fd558739", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "a986f0e990db9c7ada326b2cca828fa146349a303e674fa48ee4b45702bedc14", - "action": "add" - } - }, - "BlobStorageEntry": { - "1": { - "version": 1, - "hash": "9f1b027cce390ee6f71c7a81e7420bb71a477b29c6c62ba74e781a97bc5434e6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "5472bdd5bdce6d0b561543a6bac70d47bf0c05c141a21450751460cc538d6b55", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "136b0fb4908eb0c065a7ba6644ff5377a3c22ce8d97b3e48de1eb241101d4806", - "action": "add" - } - }, - "BlobStorageMetadata": { - "1": { - "version": 1, - "hash": "6888943be3f97186190dd26d7eefbdf29b15c6f2fa459e13608065ebcdb799e2", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "674f4c52a8444289d5ef389b919008860e2b0e7acbaafa774d58e492d5b6741a", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "643065504ecfabd283c736c794cfb41fb85156879940488d6ea851bb2ac3c16a", - "action": "add" - } - }, - "BlobRetrieval": { - "1": { - "version": 1, - "hash": "a8d7e1d6483e7a9b5a130e837fa398862aa6cbb316cc5f4470450d835755fdd9", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "4c4fbdb6df5bb9fcbe914a9890bd1c1b6a1b3f382a04cbc8752a5a1b03130111", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "ab0f1f06c57b3cd8bd362514d662b170a888a2487dbb1e9f880f611ce47a2b2c", - "action": "add" - } - }, - "SyftObjectRetrieval": { - "2": { - "version": 2, - "hash": "d9d7a7e1b8843145c9687fd013c9223700285886073547734267e91ac53e0996", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "952958e9afae007bef3cb89aa15be95dddc4c310e3a8ce4191576f90ac6fcbc8", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "dd6527e200e7d21e5f4166b2874daf6aeb0b41fafeb8f07f96b675c8625d4cf7", - "action": "add" - } - }, - "WorkerSettings": { - "1": { - "version": 1, - "hash": "0dcd95422ec8a7c74e45ee68a125084c08f898dc94a13d25fe5a5fd0e4fc5027", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "d623a8a0d6c83b26ba49686bd8be10eccb126f54626fef334a85396c3b8a8ed6", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "d42ed88ba674e8e1ceefa61b0f9fd76400d965e52ab000b2c7f0ae5f9d26d109", - "action": "add" - } - }, - "SubmitUserCode": { - "2": { - "version": 2, - "hash": "9b29e060973a3de8d3564a2b7d2bb5c53745aa445bf257576994b613505d7194", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "a29160c16d2e2620800d42cdcd9f3637d063a570c477a5d05217a2e64b4bb396", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "755721313ee8a7148c513c1d0b85324cfcbec14297887daf84ac4c0c5f468a4f", - "action": "add" - } - }, - "SeaweedFSBlobDeposit": { - "1": { - "version": 1, - "hash": "382a9ac178deed2a9591e1ebbb39f265cbe67027fb93a420d473a4c26b7fda11", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "07d84a95324d95d9c868cd7d1c33c908f77aa468671d76c144586aab672bcbb5", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "ba3715305ea320413ca5a8780d0d02aeeb5cf3be2445aa274496c539ac787425", - "action": "add" - } - }, - "QueueItem": { - "1": { - "version": 1, - "hash": "5aa94681d9d0715d5b605f9625a54e114927271378cf2ea7245f85c488035e0b", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "9503b878de4b5b7a1793580301353523b7d6219ebd27d38abe598061979b7570", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "3495f406d2c97050ce86be80c230f49b6b846c63b9a9230cbd6631952f2bad0f", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "c37bc1c6303c467050ce4f8faa088a2f66ef1781437ffe34f15aadf5477ac25b", - "action": "add" - } - }, - "ZMQClientConfig": { - "1": { - "version": 1, - "hash": "e6054969b495791569caaf33239039beae3d116e1fe74e9575467c48b9007c45", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "91ce5953cced58e12c576aa5174d5ca0c91981b01cf42edd5283d347baa3390b", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "94f4243442d5aa7d2eb48e661a2cbf9d7c1d6a22035a3783977bdfae4a571142", - "action": "add" - } - }, - "ActionQueueItem": { - "1": { - "version": 1, - "hash": "11a43caf9164eb2a5a21f4bcb0ca361d0a5d134bf3c60173f2c502d0d80219de", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6413ed01e949cac169299a43ce40651f9bf8053e408b6942853f8afa8a693b3d", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "4159d6ea45bc82577828bc19d668196422ff29bb8cc298b84623e6f4f476aaf3", - "action": "add" - } - }, - "JobItem": { - "1": { - "version": 1, - "hash": "7b8723861837b0b7e948b2cf9244159d232185f3407dd6bef108346f941ddf6e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "e99cf5a78c6dd3a0adc37af3472c7c21570a9e747985dff540a2b06d24de6446", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "5b93a59e28574691339d22826d5650969336a2e930b93d6b3fe6d5409ca0cfc4", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "dae431b87cadacfd30613519b5dd25d2e4ff59d2a971e21a31d56901103b9420", - "action": "add" - } - }, - "SyftLog": { - "1": { - "version": 1, - "hash": "bd3f62b8fe4b2718a6380c8f05a93c5c40169fc4ab174db291929298e588429e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "d3ce45794da2e6c4b0cef63b98a553525af50c5d9db42d3d64caef3e7d22b4a9", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "6417108288ab4cf090ee2d548fb44b7de7f60b20a33876e5333ab4cabcc5b5df", - "action": "add" - } - }, - "SignedSyftAPICall": { - "1": { - "version": 1, - "hash": "e66a116de2fa44ebdd0d4c2d7d5a047dedb555fd201a0f431cd8017d9d33a61d", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6cd89ed24027ed94b3e2bb7a07e8932060e07e481ceb35eb7ee4d2d0b6e34f43", - "action": "add" - } - }, - "UserUpdate": { - "2": { - "version": 2, - "hash": "32cba8fbd786c575f92e26c31384d282e68e3ebfe5c4b0a0e793820b1228d246", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "fd73429a86cc4fe4db51198ae380a18b9a7e42885701efad42bc2ef1b28c04de", - "action": "add" - } - }, - "UserCreate": { - "2": { - "version": 2, - "hash": "2540188c5aaea866914dccff459df6e0f4727108a503414bb1567ff6297d4646", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "26f9467d60b9b642e0a754e9fc028c66a139925fa7d9fac52e5a1e9afdf1387b", - "action": "add" - } - }, - "UserSearch": { - "1": { - "version": 1, - "hash": "69d1e10b81c8a4143cf70e4f911d8562732af2458ebbc455ca64542f11373dd1", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6fd7bc05cfad5724d81b1122ddf70c6ea09e6fa77fa374c0b68e0d42e0781088", - "action": "add" - } - }, - "NodeSettingsUpdate": { - "1": { - "version": 1, - "hash": "b6ddc66ff270a3c2c4760e31e1a55d72ed04ccae2d0115ebe2fba6f2bf9bd119", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3f66c4c8a21d63b6dba2ad27c452a01aae6b827ca5c161580312dfb850a0d821", - "action": "add" - } - }, - "User": { - "2": { - "version": 2, - "hash": "ded970c92f202716ed33a2117cf541789f35fad66bd4b1db39da5026b1d7d0e7", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "7f5e148674564f2c9c75e19fd2ea17001fbef9e2ba5e49a7e92a8b8b6098f340", - "action": "add" - } - }, - "UserView": { - "2": { - "version": 2, - "hash": "e410de583bb15bc5af57acef7be55ea5fc56b5b0fc169daa3869f4203c4d7473", - "action": "remove" - }, - "3": { - "version": 3, - "hash": "4487e0e96c6cdef771d751bca4e14afac48a17ba7aa03d956521e3d757ab95f5", - "action": "add" - } - }, - "Notification": { - "1": { - "version": 1, - "hash": "d13981f721fe2b3e2717640ee07dc716c596e4ecd442461665c3fdab0b85bf0e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3814065d869d10444d7413302101c720bc6dd1a105dd7c29eccf38f32351e322", - "action": "add" - } - }, - "CreateNotification": { - "1": { - "version": 1, - "hash": "b1f459de374fe674f873a4a5f3fb8a8aabe0d83faad84a933f0a77dd1141159a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "32d046bda4d978fb8e839e2c2c4994b86a60843311b74330e307e6e3e422176f", - "action": "add" - } - }, - "NotificationPreferences": { - "1": { - "version": 1, - "hash": "127206b9c72d353d9f1b73fb10d8ecd57f28f9bfbfdc2f7648894cb0d2ad2522", - "action": "add" - } - }, - "NotifierSettings": { - "1": { - "version": 1, - "hash": "8505ded16432d1741ee16b0eada22da7c6e36ae7b414cfb59168ac846f3e9f54", - "action": "add" - } - }, - "PartialSyftObject": { - "1": { - "version": 1, - "hash": "008917584d8e1c09015cdbef02f59c0622f48e0618877c1b44425c8846befc13", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "385ef254e4a0c9e68fd750f2bb47f8f9c46dbd2ac9f00f535f843f19f1cf6032", - "action": "add" - } - }, - "NodeMetadataUpdate": { - "1": { - "version": 1, - "hash": "569d124c23590360bda240c19b53314ccc6204c5d1ab0d2898976a028e002191", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "cfe5400a5440de50e9a413f84c2aa05bad33135f46b16d21496534973145e93c", - "action": "add" - } - }, - "MongoDict": { - "1": { - "version": 1, - "hash": "640734396edae801e1601fe7777710e67685e552acb0244ad8b4f689599baca9", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "c83245be5997362196ee7fe2afd2b7ec7a2cf67aed5efe4bde16c7e83dc530b0", - "action": "add" - } - }, - "LinkedObject": { - "1": { - "version": 1, - "hash": "824567c6933c095d0e2f6995c8de3581c0fbd2e9e4ead35c8159f7964709c28e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0c52ad9a259358652f7c78f73ab041185a59b24534cee9f0802313ff4b4d4781", - "action": "add" - } - }, - "BaseConfig": { - "1": { - "version": 1, - "hash": "4e5257080ce615aa4122b02bad8487e4c7d6d0f171ff77abbc9e8cd3e33df89a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "45e4480e6fbb5183e36cbe3bd18e21d65c43cc5809028a13ab49270e0a565da6", - "action": "add" - } - }, - "ServiceConfig": { - "1": { - "version": 1, - "hash": "ca91f59bf045d949d82860f7d52655bfbede4cf6bdc5bae8f847f08a16f05d74", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "5945f4f7347baeae0a7f5386d71982a16d6be8ab0c1caa2b10c28d282e66b1ea", - "action": "add" - } - }, - "LibConfig": { - "1": { - "version": 1, - "hash": "c6ff229aea16874c5d9ae4d1f9e500d13f5cf984bbcee7abd16c5841707a2f78", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0fc4586bc939a15426ba2315f2457c77eea262c9d34756f0ee6b0198c001cf47", - "action": "add" - } - }, - "APIEndpoint": { - "1": { - "version": 1, - "hash": "c0e83867b107113e6fed06364ba364c24b2f4af35b15a3869b176318d3be7989", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "1264dca857f7d5c8d1aa92791726a2e17567aba82538b64d357b988d1ae3a8c9", - "action": "add" - } - }, - "LibEndpoint": { - "1": { - "version": 1, - "hash": "153eac6d8990774eebfffaa75a9895e7c4e1a0e09465d5da0baf4c3a3b03369d", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "c845900e729bef87be1a0efe69a7059055199eb5a5b9b9e8bd730dd16e18ed7a", - "action": "add" - } - }, - "SyftAPICall": { - "1": { - "version": 1, - "hash": "014bd1d0933f6070888a313edba239170759de24eae49bf2374c1be4dbe2b4d7", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "bc686b6399e058b21472d61fe56df1f0de0785219f52c7306dd5ab8bae863d89", - "action": "add" - } - }, - "SyftAPIData": { - "1": { - "version": 1, - "hash": "db101a75227e34750d7056785a1e87bb2e8ad6604f19c372d0cb6aa437243bf5", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b303d322c7e6da6e003e5d92a27d86acce512228a9dd62c1ab48824702055bf0", - "action": "add" - } - }, - "SyftAPI": { - "1": { - "version": 1, - "hash": "2bba1d9fcf677a58e35bf903de3da22ee4913af138aa3012af9c46b3609579cd", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "8f3ff426794df07cbeab441ff545fb896f27897df88b11ec949ec05726a41747", - "action": "add" - } - }, - "UserViewPage": { - "1": { - "version": 1, - "hash": "16dac6209b19a934d286ef1efa874379e0040c324e71023c57d1bc6d2d367171", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0f9d54e606f9a4af73249dd4012baa11fcb7c1e60cce70c01ee48bb63411d6fe", - "action": "add" - } - }, - "UserPrivateKey": { - "1": { - "version": 1, - "hash": "7cb196587887f0f3bffb298dd9f3b88509e9b2748792bf8dc03bdd0d6b98714a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0917d22c7cbd3531be6365570952557aed054332d1ec89720213f218e4202ae0", - "action": "add" - } - }, - "DateTime": { - "1": { - "version": 1, - "hash": "7e9d89309a10d2110a7ae4f97d8f25a7914853269e8fa0c531630790c1253f17", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "c353b8edfa13250507942a3134f0ec9db8fb1d85f4f7a029fe4ad5665614bf5a", - "action": "add" - } - }, - "ReplyNotification": { - "1": { - "version": 1, - "hash": "34b2ad522f7406c2486573467d9c7acef5c1063a0d9f2177c3bda2d8c4f87572", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "7bea00170bce350ea1c3a1a16cfb31264e70da9da2fd6f2128852c479e793b60", - "action": "add" - } - }, - "HTTPConnection": { - "1": { - "version": 1, - "hash": "5ee19eaf55ecbe7945ea45924c036ec0f500114a2f64176620961a8c2ec94cdb", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "c05bfaf9ca6b5f47cd20c52fd7961bf9f372196713c2333fc9bfed8e0383acf1", - "action": "add" - } - }, - "PythonConnection": { - "1": { - "version": 1, - "hash": "011946fc9af0a6987f5c7bc9b0208b2fae9d65217531430bced7ba542788da1a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b7bb677f60333d3ab1e927d0be44725667ce75620c2861c706cbca022cfae1fc", - "action": "add" - } - }, - "ActionDataEmpty": { - "1": { - "version": 1, - "hash": "89b5912fe5416f922051b8068be6071a03c87a4ab264959de524f1b86e95f028", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "2bea14a344a82a10725a9e933bb1838ffbe2d28771ee4f54f40b4d5663840a7c", - "action": "add" - } - }, - "ObjectNotReady": { - "1": { - "version": 1, - "hash": "88207988639b11eaca686b6e079616d9caecc3dbc2a8112258e0f39ee5c3e113", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "be7001fea1c819ced4c14e6b3a32b59ee11f773d8b23cf42c2f228e782b631b8", - "action": "add" - } - }, - "ActionDataLink": { - "1": { - "version": 1, - "hash": "10bf94e99637695f1ba283f0b10e70743a4ebcb9ee75aefb1a05e6d6e1d21a71", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "4551f22ea68af0d0943f9aa239b4fd468cf9f4da43589b536651fc3d27d99f12", - "action": "add" - } - }, - "SyftImageRegistry": { - "1": { - "version": 1, - "hash": "dc83910c91947e3d9eaa3e6f8592237448f0408668c7cca80450b5fcd54722e1", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3ceacaa164246323be86ccde0881dd42ee6275684e147095e1d0de7b007ae066", - "action": "add" - } - }, - "SyftWorkerImage": { - "1": { - "version": 1, - "hash": "2a9585b6a286e24f1a9f3f943d0128730cf853edc549184dc1809d19e1eec54b", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "4a6169ba1f50fdb73ac45500dd02b9d164ef239f13800c0da0ed5f8aed7cde1a", - "action": "add" - } - }, - "SyftWorker": { - "1": { - "version": 1, - "hash": "0d5b367162f3ce55ab090cc1b49bd30e50d4eb144e8431eadc679bd0e743aa70", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "257395af556b1b2972089150c0e3280479a5ba12779d012651eee2f6870e7133", - "action": "add" - } - }, - "WorkerPool": { - "1": { - "version": 1, - "hash": "250699eb4c452fc427995353d5c5ad6245fb3e9fdac8814f8348784816a0733b", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3fa999bb789b9557939dea820ddcb6c68224822581971a3c3861da3b781d6c25", - "action": "add" - } - }, - "SecureFilePathLocation": { - "1": { - "version": 1, - "hash": "7febc066e2ee5a3a4a891720afede3f5c155cacc0557662ac4d04bf67b964c6d", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "f1a9510992d60e037c0016574225b8f61433b87bb65bc3320800b1c70e54982c", - "action": "add" - } - }, - "AzureSecureFilePathLocation": { - "1": { - "version": 1, - "hash": "1bb15f3f9d7082779f1c9f58de94011487924cb8a8c9c2ec18fd7c161c27fd0e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "29a0c01a59d8632037c6d18d6fce1512b651e1aa8493b302746ff294c7bd331d", - "action": "add" - } - }, - "CreateBlobStorageEntry": { - "1": { - "version": 1, - "hash": "61a373336e83645f1b6d78a320323d9ea4ee91b3d87b730cb0608fbfa0072262", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "9046843fba39e5700aeb8c442a7e4ac5e772b12f6ac502367b2e5decbb26761f", - "action": "add" - } - }, - "BlobRetrievalByURL": { - "3": { - "version": 3, - "hash": "0b664100ea08413ca4ef04665ca910c2cf9535539617ea4ba33687d05cdfe747", - "action": "remove" - }, - "4": { - "version": 4, - "hash": "3fadedaf8e4ba97db9d4ddf1cf954338113cbb88d016253c008b11f0dfe19c59", - "action": "add" - } - }, - "BlobDeposit": { - "1": { - "version": 1, - "hash": "c98e6da658a3be01ead4ea6ee6a4c10046879f0ce0f5fc5f946346671579b229", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "87dd601b58f31ccf8e3001e8723d8d251f84bd7ab9a2f87ff7c6cf05b074d41f", - "action": "add" - } - }, - "HTTPNodeRoute": { - "1": { - "version": 1, - "hash": "1901b9f53f9970ce2bd8307ba9f7cafc0e7eba1d2ec82e4014c6120e605e3741", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b7ee63d7b47d2fab46a62d8e7d8277c03f872524457f4fe128cc9759eac72795", - "action": "add" - } - }, - "PythonNodeRoute": { - "1": { - "version": 1, - "hash": "15711e6e7a1ef726c8e8b5c35a6cb2d30b56ba5213cba489524bf63489e136cf", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "375b36756047fa0e926e5461320960a5c48546ef8cc0c6bb4ff620c7084dc4fc", - "action": "add" - } - }, - "DataSubject": { - "1": { - "version": 1, - "hash": "0b8b049d4627727b444c419f5d6a97b7cb97a433088ebf744c854b6a470dadf1", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6d9d65d2723aed8cc4cfce9b5ee4a005ab84f8a24372dc47ce856cb6516835a9", - "action": "add" - } - }, - "DataSubjectMemberRelationship": { - "1": { - "version": 1, - "hash": "0a820edc9f1a87387acc3c611fe852752fcb3dab7608058f2bc48211be7bfbd2", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "159d4e4f2463b213a65082b270acbb57ae84c5f0dbc897fda75486290b3148f1", - "action": "add" - } - }, - "Contributor": { - "1": { - "version": 1, - "hash": "d1d4f25bb87e59c0414501d3335097de66815c164c9ed5a7850ff8bec69fbcdc", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "55259f1e4f1b9da4ac83b032adb86eb4a1322a06584790d1300131777212dbaa", - "action": "add" - } - }, - "MarkdownDescription": { - "1": { - "version": 1, - "hash": "519328a3952049f57004013e4fb00840695b24b8575cad983056412c9c9d9ba6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3416f899b925ba0636edd1ac01bf5c6f4f5533eae4f0a825f112bbf89dcd232a", - "action": "add" - } - }, - "Asset": { - "1": { - "version": 1, - "hash": "24350b8d9597df49999918ad42e0eece1328ea30389311f1e0a420be8f39b8a1", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "64661b3bc84a2df81ce631641a0fe3f0d969618b6855971f5e51e5770c278bba", - "action": "add" - } - }, - "CreateAsset": { - "1": { - "version": 1, - "hash": "1b4c71569b8da64258672483bd36dc4aa99a32d4cb519659241d15bc898041a6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "93c75b45b9b74c69243cc2f2ef2d661e11eef5c23ecf71692ffdbd467d11efe6", - "action": "add" - } - }, - "DatasetPageView": { - "1": { - "version": 1, - "hash": "b1de14bb9b6a259648dfc59b6a48fa526116afe50a689c24b8bb36fd0e6a97f8", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "c7494afa0ae27326c4521a918eb234ba74eb2c0494ea448255ff310201a16c88", - "action": "add" - } - }, - "TwinObject": { - "1": { - "version": 1, - "hash": "c42455586b43724a7421becd99122b787a129798daf6081e96954ecaea228099", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "937fded2210d9b792cbe7a99879180e396902fe7b684cd6a14a651db8b9ca2c9", - "action": "add" - } - }, - "ExactMatch": { - "1": { - "version": 1, - "hash": "e497e2e2380db72766c5e219e8afd13136d8953933d6f1eaf83b14001e887cde", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "f752dfdec6b30e1c849e483ac88ab6f0c71a286199415e4f7bc33c8c2502fc1f", - "action": "add" - } - }, - "OutputHistory": { - "1": { - "version": 1, - "hash": "4ec6e6efd86a972b474251885151bdfe4ef262562174605e8ab6a8abba1aa867", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "425ad1c14348e51a2ec0eb82f1ef86b8fbc63e282e4c511023d6c2d644e3bd83", - "action": "add" - } - }, - "UserPolicy": { - "1": { - "version": 1, - "hash": "c69b17b1d96cace8b45da6d9639165f2da4aa7ff156b6fd922ac217bf7856d8a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6f201caff6457bd036e614a58aedb9fad6a3947b7d4d7965ccfdb788b6385262", - "action": "add" - } - }, - "SubmitUserPolicy": { - "1": { - "version": 1, - "hash": "96f7f39279fadc70c569b8d48ed4d6420a8132db51e37466d272fda19953554b", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "971f4aa69bf68e7a876b0b1cb85ba7d4213212baf7eeaa24bab0a70f18841497", - "action": "add" - } - }, - "UserCodeExecutionResult": { - "1": { - "version": 1, - "hash": "49c32e85e78b7b189a7f13b7e26115ef94fcb0b60b578adcbe2b95e289f63a6e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "05c457f502f7a257a4d5287633d18bbd3cb4ba565afb6a69ac0822c55408a55e", - "action": "add" - } - }, - "CodeHistory": { - "1": { - "version": 1, - "hash": "a7baae93862ae0aa67675f1617574e31aafb15a9ebff633eb817278a3a867161", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "54793b2909c70303c58fb720e431752547e29e56a616e544b6a103b2bfd2f73b", - "action": "add" - } - }, - "CodeHistoryView": { - "1": { - "version": 1, - "hash": "0ed1a2a04a962ecbcfa38b0b8a03c1e51e8946a4b80f6bf2557148ce658671ce", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3d5f79f8367c229f163ab746ef8c7069bec5a1478a19812dbac735fc333e41c3", - "action": "add" - } - }, - "CodeHistoriesDict": { - "1": { - "version": 1, - "hash": "95288411cd5843834f3273a2fd66a7df2e603e980f4ab1d329f9ab17d5d2f643", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "36175742343fdb2c9ea54809c08857cf1f30451245ebdca45b13020f6c7c0e2e", - "action": "add" - } - }, - "UsersCodeHistoriesDict": { - "1": { - "version": 1, - "hash": "5e1f389c4565ee8558386dd5c934d81e0c68ab1434f86bb9065976b587ef44d1", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "9cb9a7e1e5c5e294cd019bdb9824180fa399810e7d57db285823157c91ee7d76", - "action": "add" - } - }, - "OnDiskBlobDeposit": { - "1": { - "version": 1, - "hash": "5efc230c1ee65c4626d334aa69ed458c796c45265e546a333844c6c2bcd0e6b0", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "adc890e6c70334b46f49fff6b4f22d6aa9f13981b4f6ecd16a0f2910ed69da1b", - "action": "add" - } - }, - "RemoteConfig": { - "1": { - "version": 1, - "hash": "ad7bc4780a8ad52e14ce68601852c93d2fe07bda489809cad7cae786d2461754", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "9d6b8ddb258815b5660f2288164a3a87f68a0e6849493eb48c87da1509b6ab27", - "action": "add" - } - }, - "AzureRemoteConfig": { - "1": { - "version": 1, - "hash": "c05c6caa27db4e385c642536d4b0ecabc0c71e91220d2e6ce21a2761ca68a673", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "2f820aa55e6476b455fec7774346a4c0dad212bde1400f1f53f42c8864b7ded4", - "action": "add" - } - }, - "Change": { - "1": { - "version": 1, - "hash": "aefebd1601cf5bfd4817b0db75300a78299cc4949ead735a90873cbd22c8d4bc", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b661753ae9187feb92751edb4a38066c9c14aba73e3639d44ac5fe7aee8b2ab9", - "action": "add" - } - }, - "ChangeStatus": { - "1": { - "version": 1, - "hash": "627f6f8e42cc285336aa6fd4916285d796140f4ff901487b7cb3907ef0f116a6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "8a62d5bcde312e7b9efd1d0b26cab6de7affa1e3ffe9182f8598137340408084", - "action": "add" - } - }, - "ActionStoreChange": { - "1": { - "version": 1, - "hash": "17b865e75eb3fb2693924fb00ba87a25260be45d55a4eb2184c4ead22d787cbe", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "3a1c8f10afb4c4d10a4096a1371e4780b2cb40bb2253193bfced6c250d3e8547", - "action": "add" - } - }, - "CreateCustomImageChange": { - "1": { - "version": 1, - "hash": "bc09dca7995938f3b3a2bd9c8b3c2feffc8484df466144a425cb69cadb2ab635", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6569fb11bccd100cd4b6050084656e7e7c46b9405ff76589b870402b26a6927b", - "action": "add" - } - }, - "CreateCustomWorkerPoolChange": { - "1": { - "version": 1, - "hash": "86894f8ccc037de61f44f9698fd113ba02c3cf3870a3048c00a46e15dcd1941c", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "e2a223a65461b502f097f06453f878b54175b4055dad3ec9b09c1eb9458a575e", - "action": "add" - } - }, - "Request": { - "1": { - "version": 1, - "hash": "e054307eeb7f13683cde9ce7613d5ca2925a13fff7c345b1c9f729a12c955f90", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "72bb2fcf520d8ca31fc5fd9b1730a8839648b7f446bcc9f2b6d80e4c635feb59", - "action": "add" - } - }, - "RequestInfo": { - "1": { - "version": 1, - "hash": "b76075c138afc0563ce9ac7f6b1131f048951f7486cd516c02736dc1a2a23639", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "fd127bb4f64b4d04122d31b27b46f712a6f3c9518b2e6df0b140247bab115789", - "action": "add" - } - }, - "RequestInfoFilter": { - "1": { - "version": 1, - "hash": "7103abdc464ae71bb746410f5730f55dd8ed82268aa32bbb0a69e0070488a669", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "c8773edca83f068b5a7b7ebe7f5e70ff8df65915564cead695b4528203f750a3", - "action": "add" - } - }, - "SubmitRequest": { - "1": { - "version": 1, - "hash": "96b4ec12beafd9d8a7c97399cb8a23dade4db16d8f521be3fe7b8fec99db5161", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "796b297342793995b8dd87e8feb420e8601dee3b704b7a21a93326661b227ea8", - "action": "add" - } - }, - "ObjectMutation": { - "1": { - "version": 1, - "hash": "0ee3dd38d6df0fe9a19d848e8f3aaaf13a6ba86afe3406c239caed6da185651a", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "24b7c302f9821afe073534d4ed02c377bd4f7cb691f66ca92b94c38c92dc78c2", - "action": "add" - } - }, - "EnumMutation": { - "1": { - "version": 1, - "hash": "4c02f956ec9b973064972cc57fc8dd9c525e683f93f804642b4e1bfee1b62e57", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "6d2e2f64c00dcda74a2545c77abbcf1630c56c26014987038feab174d15bd9d7", - "action": "add" - } - }, - "NodePeer": { - "1": { - "version": 1, - "hash": "7b88de7e38490e2d69f31295137673e7ddabc16ab0e2272ff491f6cea1835d63", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "14cf8b9bb7c95c20caec8606ae5dddb882832f00fba2326352e7a0f2444dbc9f", - "action": "add" - } - }, - "SyftObjectMigrationState": { - "1": { - "version": 1, - "hash": "d3c8126bc15dae4dd243bb035530e3f56cd9e433d403dd6b5f3b45face6d281f", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "187e6b6619f56fdaf2fbe150a0ec561b1d6a7dbfbc6132257951844206319c79", - "action": "add" - } - }, - "ProjectThreadMessage": { - "1": { - "version": 1, - "hash": "1118e935792e8e54103dbf91fa33edbf192a7767d2b1d4526dfa7d4a643cde2e", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "319007e1173c1558917cbdf25171da70514fe0afaae49c7d099aca6f2ec87015", - "action": "add" - } - }, - "ProjectMessage": { - "1": { - "version": 1, - "hash": "55a3a5171b6949372b4125cc461bf39bc998565e07703804fca6c7ef99695ae4", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "086513fa450d185b5040b75dc034f4e219c3214677674efa4b4263fda140ce2a", - "action": "add" - } - }, - "ProjectRequestResponse": { - "1": { - "version": 1, - "hash": "d4c360e845697a0b24695143d0781626cd344cfde43162c90ae90fe67e00ae21", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b29309054cd9f9e6a3f00724453f90510076de0bf03ff300fc83670a1721b272", - "action": "add" - } - }, - "ProjectRequest": { - "1": { - "version": 1, - "hash": "514d189df335c68869eea36befcdcafec74bdc682eaf18871fe879e26da4dbb6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "7d7f74f39333bef10ac37f49b5783dc9ba9b5783d2bec814d7de2d2025bcce01", - "action": "add" - } - }, - "AnswerProjectPoll": { - "1": { - "version": 1, - "hash": "ff2e1ac7bb764c99d646b96eb3ebfbf9311599b7e3be07aa4a4eb4810bb6dd12", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "fff1a7e5ca30b76132cf8b6225cb576467d9727349b9dc54d4131fede03c10f3", - "action": "add" - } - }, - "ProjectPoll": { - "1": { - "version": 1, - "hash": "b0ac8f1d9c06997374ddbc33fdf1d0af0da15fdb6899f52d91a8574106558964", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "90522301ab056881d79a066d824dcce6d7836f2555ac4182bbafe75bea5a5fa7", - "action": "add" - } - }, - "Project": { - "1": { - "version": 1, - "hash": "ec5b7ac1c92808e266f06b175c6ebcd50be81777ad120c02ce8c6074d0004788", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "4b7f5d0bec9a1ba7863679b85425f1918745e9dad21476078c19f7257d5f38a3", - "action": "add" - } - }, - "ProjectSubmit": { - "1": { - "version": 1, - "hash": "0374b37779497d7e0b2ffeabc38d35bfbae2ee762a7674a5a8af75e7c5545e61", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "0af1abb9ac899c0bc133971f75d17be8260b80a2df9fe191965db431bb6fd910", - "action": "add" - } - }, - "VeilidConnection": { - "1": { - "version": 1, - "hash": "c5ed1cfa9b7b146dbce7f1057f6e81e89715b5addfd4d4c4d53c415e450373a5", - "action": "add" - } - }, - "VeilidNodeRoute": { - "1": { - "version": 1, - "hash": "4797413e3144fce7bccc290db64f1750e8c09f75d5e1aba6e19d29f921a21074", - "action": "add" - } - }, - "EnclaveMetadata": { - "1": { - "version": 1, - "hash": "39f85e475015e6f860ddcc5fea819423eba2db8f4b7d8e004c05a44d6f8444c6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "5103272305abd2bcf23c616bd9014be986a92c40dc37b6238680114036451852", - "action": "add" - } - } - } - } -} diff --git a/packages/syft/src/syft/protocol/releases/0.9.1.json b/packages/syft/src/syft/protocol/releases/0.9.1.json new file mode 100644 index 00000000000..9c33a5d3a88 --- /dev/null +++ b/packages/syft/src/syft/protocol/releases/0.9.1.json @@ -0,0 +1,1178 @@ +{ + "1": { + "object_versions": { + "SyftObjectVersioned": { + "1": { + "version": 1, + "hash": "7c842dcdbb57e2528ffa690ea18c19fff3c8a591811d40cad2b19be3100e2ff4", + "action": "add" + } + }, + "BaseDateTime": { + "1": { + "version": 1, + "hash": "614db484b1950be729902b1861bd3a7b33899176507c61cef11dc0d44611cfd3", + "action": "add" + } + }, + "SyftObject": { + "1": { + "version": 1, + "hash": "bb70d874355988908d3a92a3941d6613a6995a4850be3b6a0147f4d387724406", + "action": "add" + } + }, + "PartialSyftObject": { + "1": { + "version": 1, + "hash": "19a995fcc2833f4fab24584fd99b71a80c2ef1f13c06f83af79e4482846b1656", + "action": "add" + } + }, + "ServerMetadata": { + "1": { + "version": 1, + "hash": "1691c7667eca86b20c4189e90ce4e643dd41fd3682cdb69c6308878f2a6f135c", + "action": "add" + } + }, + "StoreConfig": { + "1": { + "version": 1, + "hash": "a9997fce6a8a0ed2884c58b8eb9382f8554bdd18fff61f8bf0451945bcff12c7", + "action": "add" + } + }, + "MongoDict": { + "1": { + "version": 1, + "hash": "57e36f57eed75e62b29e2bac1295035a9bf2c0e3c56719dac24cb6cc685be00b", + "action": "add" + } + }, + "MongoStoreConfig": { + "1": { + "version": 1, + "hash": "53342b27d34165b7e2699f8e7ad70d13d125875e6a75e8fa18f5796428f41036", + "action": "add" + } + }, + "LinkedObject": { + "1": { + "version": 1, + "hash": "d80f5ac7f51a9383be1a3cb334d56ae50e49733ed3199f3b6b5d6febd9de410b", + "action": "add" + } + }, + "BaseConfig": { + "1": { + "version": 1, + "hash": "10bd7566041d0f0a3aa295367785fdcc2c5bbf0ded984ac9230754f37496a6a7", + "action": "add" + }, + "2": { + "version": 2, + "hash": "890d2879ac44611db9b88ba9334a721130d0ac3aa18a303fa9e4081f14b9b8c7", + "action": "add" + } + }, + "ServiceConfig": { + "1": { + "version": 1, + "hash": "28af8a296f5ff63de50438277eaa1f4380682e6aca9f2ca28320d7a444825e88", + "action": "add" + }, + "2": { + "version": 2, + "hash": "93dfab144e0b0884c602358b3a9ce889bb29ab96e3b4adcfe3cef47a31694a9a", + "action": "add" + } + }, + "LibConfig": { + "1": { + "version": 1, + "hash": "ee8f0e3f6aae81948d72e30226645e8eb5d312a6770411a1edca748168c467c0", + "action": "add" + }, + "2": { + "version": 2, + "hash": "a8a78a8d726ee9e79f95614f3d0fa5b85edc6fce7be7651715208669be93e0e3", + "action": "add" + } + }, + "APIEndpoint": { + "1": { + "version": 1, + "hash": "faa1cf9336a0d1233868c8c57745ff38c0be60399dc1acd0c0e8dd440e405dbd", + "action": "add" + } + }, + "LibEndpoint": { + "1": { + "version": 1, + "hash": "a585c83a33a019d363ae5a0c6d4197193654307c19a4829dfbf8a8cfd2c1842a", + "action": "add" + } + }, + "SignedSyftAPICall": { + "1": { + "version": 1, + "hash": "2f959455f7130f4e59360b8aa58f19785b76eaa0f8a5a9188a6cbf32b31311ca", + "action": "add" + } + }, + "SyftAPICall": { + "1": { + "version": 1, + "hash": "59e89e7b9ea30deaed64d1ffd9bc0769b999d3082b305428432c1f5be36c6343", + "action": "add" + } + }, + "SyftAPIData": { + "1": { + "version": 1, + "hash": "820b279c581cafd9bb5009702d4e3db22ec3a3156676426304b9038dad260a24", + "action": "add" + } + }, + "SyftAPI": { + "1": { + "version": 1, + "hash": "cc13ab058ee36748c14b0d4bd9b9e894c7566fff09cfa4170b3eece520169f15", + "action": "add" + } + }, + "User": { + "1": { + "version": 1, + "hash": "2df4b68182c558dba5485a8a6867acf2a5c341b249ad67373a504098aa8c4343", + "action": "add" + }, + "2": { + "version": 2, + "hash": "af6fb5b2e1606e97838f4a60f0536ad95db606d455e94acbd1977df866608a2c", + "action": "add" + } + }, + "UserUpdate": { + "1": { + "version": 1, + "hash": "1bf6707c69b809c804fb939c7c37d787c2f6889508a4bec37d24221af2eb777a", + "action": "add" + } + }, + "UserCreate": { + "1": { + "version": 1, + "hash": "49d6087e2309ba59987f3126e286e74b3a66492a08ad82fa507ea17d52ce78e3", + "action": "add" + } + }, + "UserSearch": { + "1": { + "version": 1, + "hash": "9ac946338cca68d00d1696a57943442f062628ec3daf53077d0bdd3f72cd9fa0", + "action": "add" + } + }, + "UserView": { + "1": { + "version": 1, + "hash": "0b52d758e31d5889c9cd88afb467aae4a74e34a5276924e07012243c34d300fe", + "action": "add" + } + }, + "UserViewPage": { + "1": { + "version": 1, + "hash": "1cd6528d02ec180f080d5c35f0da760d8a59af9da7baaa9c17c1c7cedcc858fa", + "action": "add" + } + }, + "UserPrivateKey": { + "1": { + "version": 1, + "hash": "4817d8147aba94373f320dcd90e65f097cf6e5a2ef353aa8520e23128d522b5d", + "action": "add" + } + }, + "DateTime": { + "1": { + "version": 1, + "hash": "394abb554114ead4d63c36e3fe83ac018dead4b21a8465174009577c46d54c58", + "action": "add" + } + }, + "ReplyNotification": { + "1": { + "version": 1, + "hash": "84102dfc59d711b03c2f3d3a6ecaca000b6835f1bbdd9af801057f7aacb5f1d0", + "action": "add" + } + }, + "Notification": { + "1": { + "version": 1, + "hash": "af4cb232bff390c431e399975f048b34da7e940ace8b23b940a3b398c91c5326", + "action": "add" + } + }, + "CreateNotification": { + "1": { + "version": 1, + "hash": "7e426c946b7d5db6f9427960ec16042f3018091d835ca5966f3568c324a2ab53", + "action": "add" + } + }, + "UserNotificationActivity": { + "1": { + "version": 1, + "hash": "422fd01c6d9af38688a9982abd34e80794a1f6ddd444cca225d77f49189847a9", + "action": "add" + } + }, + "NotificationPreferences": { + "1": { + "version": 1, + "hash": "a42f06b367e7c6cbabcbf3cfcc84d1ca0873e457d972ebd060e87c9d6185f62b", + "action": "add" + } + }, + "NotifierSettings": { + "1": { + "version": 1, + "hash": "65c8ab814d35fac32f68d3000756692592cc59940f30e3af3dcdfa2328755b9d", + "action": "add" + }, + "2": { + "version": 2, + "hash": "be8b52597fc628d1b7cd22b776ee81416e1adbb04a45188778eb0e32ed1416b4", + "action": "add" + } + }, + "SyftImageRegistry": { + "1": { + "version": 1, + "hash": "67e18903e41cba1afe136adf29d404b63ec04fea6e928abb2533ec4fa52b246b", + "action": "add" + } + }, + "SyftWorkerImage": { + "1": { + "version": 1, + "hash": "44da7badfbe573d5403d3ab78c077f17dbefc560b81fdf927b671815be047441", + "action": "add" + } + }, + "SyftWorker": { + "1": { + "version": 1, + "hash": "9d897f6039eabe48dfa8e8d5c5cdcb283b0375b4c64571b457777eaaf3fb1920", + "action": "add" + } + }, + "WorkerPool": { + "1": { + "version": 1, + "hash": "16efc5dd2596ae744fd611c8f46af9eaec1bd5729eb20e85e9fd2f31df402564", + "action": "add" + } + }, + "MarkdownDescription": { + "1": { + "version": 1, + "hash": "31a73f8824cad1636a55d14b6a1074cdb071d0d4e16e86baaa3d4f63a7e80134", + "action": "add" + } + }, + "HTMLObject": { + "1": { + "version": 1, + "hash": "97f2e93f5ceaa88015047186f66a17ff13df2a6b7925b41331f9e19d5a515a9f", + "action": "add" + } + }, + "PwdTokenResetConfig": { + "1": { + "version": 1, + "hash": "0415a272428f22add4896c64aa9f29c8c1d35619e2433da6564eb5f1faff39ac", + "action": "add" + } + }, + "ServerSettingsUpdate": { + "1": { + "version": 1, + "hash": "1e4260ad879ae80728c3ffae2cd1d48759abd51f9d0960d4b25855cdbb4c506b", + "action": "add" + }, + "2": { + "version": 2, + "hash": "23b2716e9dceca667e228408e2416c82f11821e322e5bccf1f83406f3d09abdc", + "action": "add" + }, + "3": { + "version": 3, + "hash": "335c7946f2e52d09c7b26f511120cd340717c74c5cca9107e84f839da993c55c", + "action": "add" + }, + "4": { + "version": 4, + "hash": "8d7a41992c39c287fcb46383bed429ce75d3c9524ced8c86b88c26dd0232e2fe", + "action": "add" + } + }, + "ServerSettings": { + "1": { + "version": 1, + "hash": "5a1e7470cbeaaae5b80ac9beecb743734f7e4e42d429a09ea8defa569a5ddff1", + "action": "add" + }, + "2": { + "version": 2, + "hash": "7727ea54e494dc9deaa0d1bd38ac8a6180bc192b74eec5659adbc338a19e21f5", + "action": "add" + }, + "3": { + "version": 3, + "hash": "997667e1cba22d151857aacc2caba6b1ca73c1648adbd03461dc74a0c0c372b3", + "action": "add" + }, + "4": { + "version": 4, + "hash": "b8067777967a0e06733433e179e549caaf501419d62f7e8474ee33b839e3890d", + "action": "add" + } + }, + "HTTPConnection": { + "1": { + "version": 1, + "hash": "bf10f81646c71069c76292b1237b4a3de1e507264392c5c591d067636ce6fb46", + "action": "add" + } + }, + "PythonConnection": { + "1": { + "version": 1, + "hash": "28010778b5e3463ff6960a0e2224818de00bc7b5e6f892192e02e399ccbe18b5", + "action": "add" + } + }, + "ActionDataEmpty": { + "1": { + "version": 1, + "hash": "e0e4a5cf18d05b6b747addc048515c6f2a5f35f0766ebaee96d898cb971e1c5b", + "action": "add" + } + }, + "ObjectNotReady": { + "1": { + "version": 1, + "hash": "8cf471e205cd0893d6aae5f0227d14db7df1c9698da08a3ab991f59132d17fe9", + "action": "add" + } + }, + "ActionDataLink": { + "1": { + "version": 1, + "hash": "3469478343439e411b761c270eec63eb3d533e459ad72d0965158c3a6cdf3b9a", + "action": "add" + } + }, + "Action": { + "1": { + "version": 1, + "hash": "021826d7c6f69bd0283d025d40661f3ffbeba8810ca94de01344f6afbdae62cd", + "action": "add" + } + }, + "ActionObject": { + "1": { + "version": 1, + "hash": "0a5f4bc343cb114a251f06686ecdbb59d74bfb3d29a098b176699deb35a1e683", + "action": "add" + } + }, + "AnyActionObject": { + "1": { + "version": 1, + "hash": "b3c44c7788c59c03fa1baeec656c2ca6e633f4cbd4b23ff7ece6ee94c38449f0", + "action": "add" + } + }, + "CustomEndpointActionObject": { + "1": { + "version": 1, + "hash": "c7addbaf2777707f3e91e5c1e092343476cd22efc4ec8617f39ccf76e61a5a14", + "action": "add" + }, + "2": { + "version": 2, + "hash": "846ba36e8737a1bec16853c9de54c4948450009278e0b76fe7e3355ef9e70089", + "action": "add" + } + }, + "DataSubject": { + "1": { + "version": 1, + "hash": "582cdf9e82b5d6915b7f09f7c0d5f08328b11a2ce9b0198e5083f1672c2e2bf5", + "action": "add" + } + }, + "DataSubjectCreate": { + "1": { + "version": 1, + "hash": "5a8423c2690d55f425bfeecc87cd4a797a75d88ebb5fbda754d4f269b62d2ceb", + "action": "add" + } + }, + "DataSubjectMemberRelationship": { + "1": { + "version": 1, + "hash": "0810483ea76ea10c8f286c6035dc0b2085291f345183be50c179f3a05a577110", + "action": "add" + } + }, + "Contributor": { + "1": { + "version": 1, + "hash": "30c32bd44098f00e0b15496be441763b6e50af8b12d3d2bef33aca6287193876", + "action": "add" + } + }, + "Asset": { + "1": { + "version": 1, + "hash": "000abc78719611c106295cf12b1690b7e5411dc1bb9db9d4afd22956da90d1f4", + "action": "add" + } + }, + "CreateAsset": { + "1": { + "version": 1, + "hash": "357d52576cb12b24fb3980342bb49a562b065c0e4419e87d34176340628c7309", + "action": "add" + } + }, + "Dataset": { + "1": { + "version": 1, + "hash": "0ca6b0b4a3aebb2c8f351668075b44951bb20d1e23a779b82109124f334ce3a4", + "action": "add" + } + }, + "DatasetPageView": { + "1": { + "version": 1, + "hash": "aa0dd69637281b80d5523b4409a2c7e89db114c9fe79c858063c6dadff8977d1", + "action": "add" + }, + "2": { + "version": 2, + "hash": "be1ca6dcd0b3aa0481ce5dce737e78432d06a78ad0c701aaf136be407c798352", + "action": "add" + } + }, + "CreateDataset": { + "1": { + "version": 1, + "hash": "7e02dfa89540c3dbebacbb13810d95cdc4e36db31d56cffed7ab54abe25716c9", + "action": "add" + } + }, + "SyftLog": { + "1": { + "version": 1, + "hash": "1bcd71e5bf3f0db3bba0996f33b6b2bde3489b9c71f11e6b30c3495c76a8f53f", + "action": "add" + } + }, + "JobItem": { + "1": { + "version": 1, + "hash": "0b32277b7d3b9bdc14a2a51cc9005f8254e7f7b6ec059ddcccbcd681a807afb6", + "action": "add" + }, + "2": { + "version": 2, + "hash": "b087d0c62b7d304c6ca80e4fb0e8a7f2a444be8f8cba57490dc09aeb98033105", + "action": "add" + } + }, + "ExecutionOutput": { + "1": { + "version": 1, + "hash": "e36c71685edf5276a3427cb6749550486d3a177c1dcf73dd337ab2a73c0ce6b5", + "action": "add" + } + }, + "TwinObject": { + "1": { + "version": 1, + "hash": "4f31243fb348dbb083579afd6f638d75af010cb53d19bfba59b74afff41ccbbb", + "action": "add" + } + }, + "PolicyRule": { + "1": { + "version": 1, + "hash": "44d1ca1db97be46f66558aa1a729ff31bf8e113c6a913b11aedf9d6b6ad5b7b5", + "action": "add" + } + }, + "CreatePolicyRule": { + "1": { + "version": 1, + "hash": "342bb723526d445151a0435f57d251f4c1219f8ae7cca3e8e9fce52e2ee1b8b1", + "action": "add" + } + }, + "CreatePolicyRuleConstant": { + "1": { + "version": 1, + "hash": "78b54832cb0468a87013bc36bc11d4759874ca1b5065a1b711f1e5ef5d94c2df", + "action": "add" + } + }, + "Matches": { + "1": { + "version": 1, + "hash": "dd6d91ddb2ec5eaf60be2b0899ecfdb9a15f7904aa39d2f4d9bb2d7b793040e6", + "action": "add" + } + }, + "PreFill": { + "1": { + "version": 1, + "hash": "c7aefb11dc4c4569dcd1e6988371047a32a8be1b32ad46d12adba419a19769ad", + "action": "add" + } + }, + "UserOwned": { + "1": { + "version": 1, + "hash": "c8738dc3d8c2a5ef461b85a0467c3dff53dab16b54a4d12b44b1477906aef51d", + "action": "add" + } + }, + "MixedInputPolicy": { + "1": { + "version": 1, + "hash": "37bb12d950518d9579c8ec7c4cc22ac731ea82caf8c1370dd0b0a82b46462dde", + "action": "add" + } + }, + "ExactMatch": { + "1": { + "version": 1, + "hash": "5eb37edbf5e451d942e599247f3eaed923c1fe9d91eefdba02bf06503f6cc08d", + "action": "add" + } + }, + "OutputHistory": { + "1": { + "version": 1, + "hash": "9366db79d131f8c65e5a4ff12c90e2aa0c11e302debe06e46eeb93b26e2aaf61", + "action": "add" + } + }, + "OutputPolicyExecuteCount": { + "1": { + "version": 1, + "hash": "2a77e5ed5c7b0391147562651ad4061e20b11745c191fbc34cb549da37ba72dd", + "action": "add" + } + }, + "OutputPolicyExecuteOnce": { + "1": { + "version": 1, + "hash": "5589c00d127d9eb1f5ccf3a16def8219737784d57bb3bf9be5cb6d83325ef436", + "action": "add" + } + }, + "EmptyInputPolicy": { + "1": { + "version": 1, + "hash": "7ef81cfd223be0064600e1503f8b04bafc16385e27730e9319466e68a077c68b", + "action": "add" + } + }, + "UserPolicy": { + "1": { + "version": 1, + "hash": "74373bb71a334f4dcf77623ae10ff5b1c7e5b3006f65f2051ffb1e01f422f982", + "action": "add" + } + }, + "SubmitUserPolicy": { + "1": { + "version": 1, + "hash": "ec4e808eb39613bcdbbbf9ffb3267612084a9d99880a2f3bee3ef32d46329c02", + "action": "add" + } + }, + "UserCodeStatusCollection": { + "1": { + "version": 1, + "hash": "735ecf2d4abb1e7d19b2e751d880f32b01ce267ba10e417ef1b440be3d94d8f1", + "action": "add" + } + }, + "UserCode": { + "1": { + "version": 1, + "hash": "3bcd14413b9c4fbde7c5612c2ed713518340280b5cff89cf2aaaf1c77c4037a8", + "action": "add" + } + }, + "SubmitUserCode": { + "1": { + "version": 1, + "hash": "d2bb8cfe12f070b4adafded78ce01900c5409bd83f055f94b1e285745ef65a76", + "action": "add" + } + }, + "UserCodeExecutionResult": { + "1": { + "version": 1, + "hash": "1f4cbc62caac4dd193f427306405dc7a099ae744bea5830cf57149ce71c1e589", + "action": "add" + } + }, + "UserCodeExecutionOutput": { + "1": { + "version": 1, + "hash": "c1d53300a39dbbb437d7d5a1257bd175a067b1065f4099a0938fac7540035258", + "action": "add" + }, + "2": { + "version": 2, + "hash": "3e104e39b4ab53c950e61e4f7e92ce935cf96a5100de301de9bf297eb7e5787e", + "action": "add" + } + }, + "CodeHistory": { + "1": { + "version": 1, + "hash": "e3ef5346f108257828f364d22b12d9311812c9cf843200afef5dc4d9302f9b21", + "action": "add" + } + }, + "CodeHistoryView": { + "1": { + "version": 1, + "hash": "8b8b97d334b51d1ce0a9efab722411ff25caa3f12be319105954497e0a306eb2", + "action": "add" + } + }, + "CodeHistoriesDict": { + "1": { + "version": 1, + "hash": "01d7dcd4b21525a06e4484d8699a4a34a5c84f1f6026ec55e32eb30412742601", + "action": "add" + } + }, + "UsersCodeHistoriesDict": { + "1": { + "version": 1, + "hash": "4ed8b83973258ea19a1f91feb2590ff73b801be86f4296cc3db48f6929ff784c", + "action": "add" + } + }, + "BlobFile": { + "1": { + "version": 1, + "hash": "d99239100f1cb0b73c69b2ad7cab01a06909cc3a4976ba2b3b67cf6fe5e2f516", + "action": "add" + } + }, + "BlobFileOBject": { + "1": { + "version": 1, + "hash": "6c40dab2c8d2220d4fff7cc653d76cc026a856db7e2b5713b6341e255adc7ea2", + "action": "add" + } + }, + "SecureFilePathLocation": { + "1": { + "version": 1, + "hash": "ea5978b98d7773d221665b450454c9130c103a5c850669a0acd620607cd614b7", + "action": "add" + } + }, + "SeaweedSecureFilePathLocation": { + "1": { + "version": 1, + "hash": "3fc9bfc8c1b1cf660c9747e8c1fe3eb2220e78d4e3b5d6b5c5f29a07a77ebf3e", + "action": "add" + } + }, + "AzureSecureFilePathLocation": { + "1": { + "version": 1, + "hash": "090a9e962eeb655586ee966c5651d8996363969818a38f9a486fd64d33047e05", + "action": "add" + } + }, + "BlobStorageEntry": { + "1": { + "version": 1, + "hash": "afdc6a1d8a24b1ee1ed9d3e79f5bac64b4f0d9d36800f07f10be0b896470345f", + "action": "add" + } + }, + "BlobStorageMetadata": { + "1": { + "version": 1, + "hash": "9d4b61ac4ea1910c2f7c767a50a6a52544a24663548f069e79bd906f11b538e4", + "action": "add" + } + }, + "CreateBlobStorageEntry": { + "1": { + "version": 1, + "hash": "ffc3cbfeade67d074dc5bf7d655a1eb8c83630076028a72b3cc4548f3b413e14", + "action": "add" + } + }, + "SyftObjectMigrationState": { + "1": { + "version": 1, + "hash": "ee83315828551f18904bab18e0cac48896493620561215b04cc448e6ce5834af", + "action": "add" + } + }, + "StoreMetadata": { + "1": { + "version": 1, + "hash": "8de9a22a2765ef976bc161cb0704347d30350c085da8c8ffa876065cfca3e5fd", + "action": "add" + } + }, + "MigrationData": { + "1": { + "version": 1, + "hash": "cb96b8c8413609e1224341d1b0dd1efb08387c0ff7b0ff65eba36c0b104c9ed1", + "action": "add" + }, + "2": { + "version": 2, + "hash": "1d1b14c196221ecf6d644d7dcaa32ac9e90361b2687fa83161ff399ebc6df1bd", + "action": "add" + } + }, + "BlobRetrieval": { + "1": { + "version": 1, + "hash": "c422c74b89a9349742acaa848566fe18bfef1a83333458b858c074baed37a859", + "action": "add" + } + }, + "SyftObjectRetrieval": { + "1": { + "version": 1, + "hash": "b2b62447445adc4cd0b77ab59d6fa56624dd316fb50281e570daad07556b6db2", + "action": "add" + } + }, + "BlobRetrievalByURL": { + "1": { + "version": 1, + "hash": "4db0e3b7a6334d3835356d8393866711e243e360af25a95f3cc4066f032404b5", + "action": "add" + } + }, + "BlobDeposit": { + "1": { + "version": 1, + "hash": "6eb5cc57dc763126bfc6ec5a2b79d02e77eadf9d9efb1888a5c366b7799c1c24", + "action": "add" + } + }, + "OnDiskBlobDeposit": { + "1": { + "version": 1, + "hash": "817bf1bee4a35bfa1cd25d6779a10d8d180b1b3f1e837952f81f48b9411d1970", + "action": "add" + } + }, + "RemoteConfig": { + "1": { + "version": 1, + "hash": "179d067099a178d748c6d9a0477e8de7c3b55577439669eca7150258f2409567", + "action": "add" + } + }, + "AzureRemoteConfig": { + "1": { + "version": 1, + "hash": "a143811fec0da5fd881e927643ef667c91c78a2c90519cf88da7da20738bd187", + "action": "add" + } + }, + "SeaweedFSBlobDeposit": { + "1": { + "version": 1, + "hash": "febeb2a2ce81aa2c512e4c6b611b582984042aafa0541403d4584662273a166c", + "action": "add" + } + }, + "DictStoreConfig": { + "1": { + "version": 1, + "hash": "2e1365c5535fa51c22eef79f67dd6444789bc829c27881367e3050e06e2ffbfe", + "action": "add" + } + }, + "NumpyArrayObject": { + "1": { + "version": 1, + "hash": "05dd2917b7692b3daf4e7ad083a46fa7ec7a2be8faac8d4a654809189c986443", + "action": "add" + } + }, + "NumpyScalarObject": { + "1": { + "version": 1, + "hash": "8753e5c78270a5cacbf0439447724772f4765351a4a8b58b0a5c416a6b2c8b6e", + "action": "add" + } + }, + "NumpyBoolObject": { + "1": { + "version": 1, + "hash": "331c44f8fa3d0a077f1aaad7313bae2c43b386d04def7b8bedae9fdf7690134d", + "action": "add" + } + }, + "PandasDataframeObject": { + "1": { + "version": 1, + "hash": "5e8018364cea31d5f185a901da4ab89846b02153ee7d041ee8a6d305ece31f90", + "action": "add" + } + }, + "PandasSeriesObject": { + "1": { + "version": 1, + "hash": "b8bd482bf16fc7177e9778292cd42f8835b6ced2ce8dc88908b4b8e6d7c7528f", + "action": "add" + } + }, + "Change": { + "1": { + "version": 1, + "hash": "75fb9a5cd4e76b189ebe130a421d3921a0c251947a48bbb92a2ef1c315dc3c16", + "action": "add" + } + }, + "ChangeStatus": { + "1": { + "version": 1, + "hash": "c914a6f7637b555a51b71e8e197e591f7a2e28121e29b5dd586f87e0383d179d", + "action": "add" + } + }, + "ActionStoreChange": { + "1": { + "version": 1, + "hash": "1a803bb08924b49f3114fd46e0e132f819d4d56be5e03a27e9fe90947ca26e85", + "action": "add" + } + }, + "CreateCustomImageChange": { + "1": { + "version": 1, + "hash": "c3dbea3f49979fdcc517c0d13cd02739ca2fe86b370c42496a224f142ae31562", + "action": "add" + } + }, + "CreateCustomWorkerPoolChange": { + "1": { + "version": 1, + "hash": "0355793dd58b364dcb84fff29714b6a26446bead3ba95c6d75e3200008e580f4", + "action": "add" + } + }, + "Request": { + "1": { + "version": 1, + "hash": "1d69f5f0074114f99aa29c5ee77cb20b9151e5b50e77b026f11c3632a12efadf", + "action": "add" + } + }, + "RequestInfo": { + "1": { + "version": 1, + "hash": "779562547744ebed64548f8021647292604fdf4256bf79685dfa14a1e56cc27b", + "action": "add" + } + }, + "RequestInfoFilter": { + "1": { + "version": 1, + "hash": "bb881a003032f4676321218d7cd09580f4d64fccaa1cf9e118fdcd5c73c3d3a8", + "action": "add" + } + }, + "SubmitRequest": { + "1": { + "version": 1, + "hash": "6c38b6ffd0a6f7442746e68b9ace7b21cb1dca7d2031929db5f9a302a280403f", + "action": "add" + } + }, + "ObjectMutation": { + "1": { + "version": 1, + "hash": "ce88096760ce9334599c8194ec97b0a1470651ad680d9d21b8826a0df0af2a36", + "action": "add" + } + }, + "EnumMutation": { + "1": { + "version": 1, + "hash": "5173fda73df17a344eb663b7692cca48bd46bf1773455439836b852cd165448c", + "action": "add" + } + }, + "UserCodeStatusChange": { + "1": { + "version": 1, + "hash": "89aaf7f1368c782e3a1b9e79988877f6eaa05ab84365f7d321b757fde7fe86e7", + "action": "add" + } + }, + "SyncedUserCodeStatusChange": { + "1": { + "version": 1, + "hash": "d9ad2d341eb645bd50d06330cd30fd4c266f93e37b9f5391d58b78365fc440e6", + "action": "add" + } + }, + "TwinAPIContextView": { + "1": { + "version": 1, + "hash": "e099eef32cb3a8a806cbdc54cc7fca96bed3d60344bd571163ec049db407938b", + "action": "add" + } + }, + "CustomAPIView": { + "1": { + "version": 1, + "hash": "769e96bebd05736ab860591670fb6da19406239b0104ddc71bd092a134335146", + "action": "add" + } + }, + "CustomApiEndpoint": { + "1": { + "version": 1, + "hash": "ec4a217585336d1b59c93c18570443a63f4fbb24d2c088fbacf80bcf389d23e8", + "action": "add" + } + }, + "PrivateAPIEndpoint": { + "1": { + "version": 1, + "hash": "6d7d143432c2811c520ab6dade005ba40173b590e5c676be04f5921b970ef938", + "action": "add" + } + }, + "PublicAPIEndpoint": { + "1": { + "version": 1, + "hash": "3bf51fc33aa8feb1abc9d0ef792e8889da31a57050430e0bd8e17f2065ff8734", + "action": "add" + } + }, + "UpdateTwinAPIEndpoint": { + "1": { + "version": 1, + "hash": "851e59412716e73c7f70a696619e0b375ce136b43f6fe2ea784747091caba5d8", + "action": "add" + } + }, + "CreateTwinAPIEndpoint": { + "1": { + "version": 1, + "hash": "3d0b84dae95ebcc6647b5aabe54e65b3c6bf957665fde57d8037806a4aac13be", + "action": "add" + } + }, + "TwinAPIEndpoint": { + "1": { + "version": 1, + "hash": "d1947b8f9c80d6c9b443e5a9f0758afa8849a5f12b9a511feefd7e4f82c374f4", + "action": "add" + } + }, + "SyncState": { + "1": { + "version": 1, + "hash": "9a3f0bb973858b55bc766c9770c4d9abcc817898f797d94a89938650c0c67868", + "action": "add" + } + }, + "WorkerSettings": { + "1": { + "version": 1, + "hash": "dca33003904a71688e5b07db65f8833eb4de8135aade7154076b8eafbb94d26b", + "action": "add" + } + }, + "HTTPServerRoute": { + "1": { + "version": 1, + "hash": "938245604a9c7e50001299afff5b669b2548364e356fed22a22780497831bf81", + "action": "add" + } + }, + "PythonServerRoute": { + "1": { + "version": 1, + "hash": "a068d8f942d55ecb6d45af88a27c6ebf208584275bf589cbc308df3f774ab9a9", + "action": "add" + } + }, + "VeilidServerRoute": { + "1": { + "version": 1, + "hash": "e676bc165601d2ede69707a4b6168ed4674f3f98887026d098a2dd4da4dfd097", + "action": "add" + } + }, + "ServerPeer": { + "1": { + "version": 1, + "hash": "0d5f252018e324ea0d2dcb5c2ad8bd15707220565fce4f14de7f63a8f9e4391b", + "action": "add" + } + }, + "ServerPeerUpdate": { + "1": { + "version": 1, + "hash": "0b854b57db7a18118c1fd8f31495b2ba4eeb9fbe4f24c631ff112418a94570d3", + "action": "add" + } + }, + "AssociationRequestChange": { + "1": { + "version": 1, + "hash": "0134ac0002879c85fc9ddb06bed6306a8905c8434b0a40d3a96ce24a7bd4da90", + "action": "add" + } + }, + "QueueItem": { + "1": { + "version": 1, + "hash": "1db212c46b6c56ccc5579cfe2141b693f0cd9286e2ede71210393e8455379bf1", + "action": "add" + } + }, + "ActionQueueItem": { + "1": { + "version": 1, + "hash": "396d579dfc2e2b36b9fbed2f204bffcca1bea7ee2db7175045dd3328ebf08718", + "action": "add" + } + }, + "APIEndpointQueueItem": { + "1": { + "version": 1, + "hash": "f04b3990a8d29c116d301e70df54d58f188895307a411dc13a666ff764ffd8dd", + "action": "add" + } + }, + "ZMQClientConfig": { + "1": { + "version": 1, + "hash": "36ee8f75067d5144f0ed062cdc79466caae16b7a128231d89b6b430174843bde", + "action": "add" + } + }, + "SQLiteStoreConfig": { + "1": { + "version": 1, + "hash": "ad062a5f863ae84683867d2a6a5e1d4420c010a64b88bc7b392106e33d71ac03", + "action": "add" + } + }, + "ProjectEvent": { + "1": { + "version": 1, + "hash": "dc0486c52daebd5e98c2b3b03ffd9a9a14bc3d86d8dc0c23e41ebf6c31fe2ffb", + "action": "add" + } + }, + "ProjectThreadMessage": { + "1": { + "version": 1, + "hash": "99256d7592577d1e37df94a06eabc0a287f2d79e144c51fd719315e278edb46d", + "action": "add" + } + }, + "ProjectMessage": { + "1": { + "version": 1, + "hash": "b5004b6354f71b19c81dd5f4b20bf446e0b959f5608a22707e96b944dd8175b0", + "action": "add" + } + }, + "ProjectRequestResponse": { + "1": { + "version": 1, + "hash": "52162a8a779a4a301d8755691bf4cf994c86b9f650f9e8c8a923b44e635b1bc0", + "action": "add" + } + }, + "ProjectRequest": { + "1": { + "version": 1, + "hash": "dc684135d5a5a48e5fc7988598c1e6e0de76cf1c5995f1c283fcf63d0eb4d24f", + "action": "add" + } + }, + "AnswerProjectPoll": { + "1": { + "version": 1, + "hash": "c83d83a5ba6cc034d5061df200b3f1d029aa770b1e13dbef959bb1790323dc6e", + "action": "add" + } + }, + "ProjectPoll": { + "1": { + "version": 1, + "hash": "ecf69b3b324e0bee9c82295796d44c4e8f796496cdc9db6d4302c2f160566466", + "action": "add" + } + }, + "Project": { + "1": { + "version": 1, + "hash": "de86a1163ddbcd1cc3cc2b1b5dfcb85a8ad9f9d4bbc759c2b1f92a0d0a2ff184", + "action": "add" + } + }, + "ProjectSubmit": { + "1": { + "version": 1, + "hash": "7555ba11ee5a814dcd9c45647300020f7359efc1081559940990cbd745936cac", + "action": "add" + } + }, + "Plan": { + "1": { + "version": 1, + "hash": "ed05cb87aec832098fc464ac36cd6bceaab705463d0d2fa1b2d8e1ccc510018c", + "action": "add" + } + }, + "EnclaveMetadata": { + "1": { + "version": 1, + "hash": "8d2dfafa01ec909c080a790cf15a8fc78e00382d3bfe6207098ceb25a60b9c53", + "action": "add" + } + } + } + } +} diff --git a/packages/syft/src/syft/protocol/releases/0.9.2.json b/packages/syft/src/syft/protocol/releases/0.9.2.json new file mode 100644 index 00000000000..e8dffc4ae4a --- /dev/null +++ b/packages/syft/src/syft/protocol/releases/0.9.2.json @@ -0,0 +1,132 @@ +{ + "2": { + "object_versions": { + "Notification": { + "2": { + "version": 2, + "hash": "812d3a612422fb1cf53caa13ec34a7bdfcf033a7c24b7518f527af144cb45f3c", + "action": "add" + } + }, + "SyftWorkerImage": { + "2": { + "version": 2, + "hash": "afd3a69719cd6d08b1121676ca8d80ca37be96ee5ed5893dc73733fbf47fd035", + "action": "add" + } + }, + "SyftWorker": { + "2": { + "version": 2, + "hash": "e996dabbb8ad4ff0bc5d19528077c11f73b9300d810735d367916e4e5b9149b6", + "action": "add" + } + }, + "WorkerSettings": { + "2": { + "version": 2, + "hash": "91c375dd40d06c81fc6403751ee48cbc94b9877f91e65a7e302303218dfe71fa", + "action": "add" + } + }, + "ApprovalDecision": { + "1": { + "version": 1, + "hash": "ecce7c6e01af68b0c0a73605f0c2226917f0784ecce69e9f64ce004b243252d4", + "action": "add" + } + }, + "UserCodeStatusCollection": { + "2": { + "version": 2, + "hash": "22a1574d4d2d5dcfa26791f2a5007bf3885dae707e175bf8cc20d0803ae54dec", + "action": "add" + } + }, + "UserCode": { + "2": { + "version": 2, + "hash": "726bc406449178029c04b0b21b50f86ea12b18ea5b7dd030ad7dbfc6e60f6909", + "action": "add" + } + }, + "QueueItem": { + "2": { + "version": 2, + "hash": "1d8615f6daabcd2a285b2f36fd7bef1df76cdd119dd49c02069c50fd1b9c3ff4", + "action": "add" + } + }, + "ActionQueueItem": { + "2": { + "version": 2, + "hash": "bfda6ef87e4045d663324bb91a215ea06e1f173aec1fb4d9ddd337cdc1f0787f", + "action": "add" + } + }, + "APIEndpointQueueItem": { + "2": { + "version": 2, + "hash": "3a46370205152fa23a7d2bfa47130dbf2e2bc7ef31f6d3fe4c92fd8d683770b5", + "action": "add" + } + }, + "MongoDict": { + "1": { + "version": 1, + "hash": "57e36f57eed75e62b29e2bac1295035a9bf2c0e3c56719dac24cb6cc685be00b", + "action": "remove" + } + }, + "JobItem": { + "1": { + "version": 1, + "hash": "0b32277b7d3b9bdc14a2a51cc9005f8254e7f7b6ec059ddcccbcd681a807afb6", + "action": "remove" + } + }, + "DictStoreConfig": { + "1": { + "version": 1, + "hash": "2e1365c5535fa51c22eef79f67dd6444789bc829c27881367e3050e06e2ffbfe", + "action": "remove" + } + }, + "NotifierSettings": { + "3": { + "version": 3, + "hash": "226c3e0d4de4368ea9eac6689427cfc27860cf51696741b8dda14f939f3d4fbe", + "action": "add" + } + }, + "EmailFrequency": { + "1": { + "version": 1, + "hash": "7659117222a461a959eac7aa1aaf280033c2ca4f1029f97e76051e0474e56759", + "action": "add" + } + }, + "CustomAPIView": { + "2": { + "version": 2, + "hash": "7eb2cd60e9526299c3f989930733b8bfd2e81d4e93a1b82217dec2e0a786ba10", + "action": "add" + } + }, + "CreateTwinAPIEndpoint": { + "2": { + "version": 2, + "hash": "f4048d6cf886ea519df25300af17912c818095288d42f7ef9183372c9c19db79", + "action": "add" + } + }, + "TwinAPIEndpoint": { + "2": { + "version": 2, + "hash": "40229be687cd4290447fe8b409ba3dc1b8d410c5dac37cebb9856fb34d7507cd", + "action": "add" + } + } + } + } +} diff --git a/packages/syft/src/syft/protocol/releases/0.9.3.json b/packages/syft/src/syft/protocol/releases/0.9.3.json new file mode 100644 index 00000000000..e0965da682b --- /dev/null +++ b/packages/syft/src/syft/protocol/releases/0.9.3.json @@ -0,0 +1,5 @@ +{ + "3": { + "object_versions": {} + } +} diff --git a/packages/syft/src/syft/protocol/releases/0.9.5.json b/packages/syft/src/syft/protocol/releases/0.9.5.json new file mode 100644 index 00000000000..8aef0bdc754 --- /dev/null +++ b/packages/syft/src/syft/protocol/releases/0.9.5.json @@ -0,0 +1,5 @@ +{ + "4": { + "object_versions": {} + } +} diff --git a/packages/syft/src/syft/serde/__init__.py b/packages/syft/src/syft/serde/__init__.py index 666be78ca11..00122b4769f 100644 --- a/packages/syft/src/syft/serde/__init__.py +++ b/packages/syft/src/syft/serde/__init__.py @@ -1,4 +1,4 @@ # relative -from .array import NOTHING # noqa: F401 F811 -from .recursive import NOTHING # noqa: F401 F811 -from .third_party import NOTHING # noqa: F401 F811 +from .array import NOTHING # noqa: F811 +from .recursive import NOTHING # noqa: F811 +from .third_party import NOTHING # noqa: F811 diff --git a/packages/syft/src/syft/serde/array.py b/packages/syft/src/syft/serde/array.py index 7d0faf56cc1..3f19e575b97 100644 --- a/packages/syft/src/syft/serde/array.py +++ b/packages/syft/src/syft/serde/array.py @@ -3,6 +3,7 @@ from numpy import frombuffer # relative +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from .arrow import numpy_deserialize from .arrow import numpy_serialize from .recursive import recursive_serde_register @@ -34,11 +35,17 @@ } recursive_serde_register( - np.ndarray, serialize=numpy_serialize, deserialize=numpy_deserialize + np.ndarray, + serialize=numpy_serialize, + deserialize=numpy_deserialize, + canonical_name="numpy_ndarray", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np._globals._NoValueType, + canonical_name="numpy_no_value", + version=SYFT_OBJECT_VERSION_1, ) # serialize=numpy_serialize, deserialize=numpy_deserialize @@ -47,84 +54,120 @@ np.bool_, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.bool_)[0], + canonical_name="numpy_bool", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.int8, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.int8)[0], + canonical_name="numpy_int8", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.int16, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.int16)[0], + canonical_name="numpy_int16", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.int32, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.int32)[0], + canonical_name="numpy_int32", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.int64, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.int64)[0], + canonical_name="numpy_int64", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.uint8, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.uint8)[0], + canonical_name="numpy_uint8", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.uint16, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.uint16)[0], + canonical_name="numpy_uint16", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.uint32, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.uint32)[0], + canonical_name="numpy_uint32", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.uint64, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.uint64)[0], + canonical_name="numpy_uint64", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.single, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.single)[0], + canonical_name="numpy_single", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.double, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.double)[0], + canonical_name="numpy_double", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.float16, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.float16)[0], + canonical_name="numpy_float16", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.float32, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.float32)[0], + canonical_name="numpy_float32", + version=SYFT_OBJECT_VERSION_1, ) recursive_serde_register( np.float64, serialize=lambda x: x.tobytes(), deserialize=lambda buffer: frombuffer(buffer, dtype=np.float64)[0], + canonical_name="numpy_float64", + version=SYFT_OBJECT_VERSION_1, +) + +recursive_serde_register( + np.number, + serialize=lambda x: x.tobytes(), + deserialize=lambda buffer: frombuffer(buffer, dtype=np.number)[0], + canonical_name="numpy_number", + version=SYFT_OBJECT_VERSION_1, ) # TODO: There is an incorrect mapping in looping,which makes it not work. @@ -150,6 +193,5 @@ # deserialize=lambda buffer: frombuffer(buffer, dtype=numpy_scalar_type), # ) - # how else do you import a relative file to execute it? NOTHING = None diff --git a/packages/syft/src/syft/serde/arrow.py b/packages/syft/src/syft/serde/arrow.py index ac86a8a58b4..31a5ad1d27c 100644 --- a/packages/syft/src/syft/serde/arrow.py +++ b/packages/syft/src/syft/serde/arrow.py @@ -13,20 +13,24 @@ def arrow_serialize(obj: np.ndarray) -> bytes: - original_dtype = obj.dtype - apache_arrow = pa.Tensor.from_numpy(obj=obj) - sink = pa.BufferOutputStream() - pa.ipc.write_tensor(apache_arrow, sink) - buffer = sink.getvalue() - if flags.APACHE_ARROW_COMPRESSION is ApacheArrowCompression.NONE: - numpy_bytes = buffer.to_pybytes() - else: - numpy_bytes = pa.compress( - buffer, asbytes=True, codec=flags.APACHE_ARROW_COMPRESSION.value - ) - dtype = original_dtype.name - - return cast(bytes, _serialize((numpy_bytes, buffer.size, dtype), to_bytes=True)) + # inner function to make sure variables go out of scope after this + def inner(obj: np.ndarray) -> tuple: + original_dtype = obj.dtype + apache_arrow = pa.Tensor.from_numpy(obj=obj) + sink = pa.BufferOutputStream() + pa.ipc.write_tensor(apache_arrow, sink) + buffer = sink.getvalue() + if flags.APACHE_ARROW_COMPRESSION is ApacheArrowCompression.NONE: + numpy_bytes = buffer.to_pybytes() + else: + numpy_bytes = pa.compress( + buffer, asbytes=True, codec=flags.APACHE_ARROW_COMPRESSION.value + ) + dtype = original_dtype.name + return (numpy_bytes, buffer.size, dtype) + + m = inner(obj) + return cast(bytes, _serialize(m, to_bytes=True)) def arrow_deserialize( diff --git a/packages/syft/src/syft/serde/capnp.py b/packages/syft/src/syft/serde/capnp.py index 1981a76b07b..bfc77bfdf90 100644 --- a/packages/syft/src/syft/serde/capnp.py +++ b/packages/syft/src/syft/serde/capnp.py @@ -1,6 +1,6 @@ # stdlib -import os -from pathlib import Path +from importlib.resources import as_file +from importlib.resources import files # third party import capnp @@ -10,9 +10,6 @@ def get_capnp_schema(schema_file: str) -> type: - here = os.path.dirname(__file__) - root_dir = Path(here) / ".." / "capnp" - capnp_path = os.path.abspath(root_dir / schema_file) - - with std_stream_capture(): - return capnp.load(str(capnp_path)) + with as_file(files("syft.capnp").joinpath(schema_file)) as capnp_path: + with std_stream_capture(): + return capnp.load(str(capnp_path.absolute())) diff --git a/packages/syft/src/syft/serde/json_serde.py b/packages/syft/src/syft/serde/json_serde.py new file mode 100644 index 00000000000..ee86241716c --- /dev/null +++ b/packages/syft/src/syft/serde/json_serde.py @@ -0,0 +1,452 @@ +# stdlib +import base64 +from collections.abc import Callable +from dataclasses import dataclass +from enum import Enum +import json +import typing +from typing import Any +from typing import Generic +from typing import TypeVar +from typing import Union +from typing import get_args +from typing import get_origin + +# third party +import pydantic + +# syft absolute +import syft as sy + +# relative +from ..server.credentials import SyftSigningKey +from ..server.credentials import SyftVerifyKey +from ..types.datetime import DateTime +from ..types.syft_object import BaseDateTime +from ..types.syft_object_registry import SyftObjectRegistry +from ..types.uid import LineageID +from ..types.uid import UID +from .recursive import DEFAULT_EXCLUDE_ATTRS + +T = TypeVar("T") + +JSON_CANONICAL_NAME_FIELD = "__canonical_name__" +JSON_VERSION_FIELD = "__version__" +JSON_DATA_FIELD = "data" + +JsonPrimitive = str | int | float | bool | None +Json = JsonPrimitive | list["Json"] | dict[str, "Json"] + + +def _noop_fn(obj: Any) -> Any: + return obj + + +@dataclass +class JSONSerde(Generic[T]): + klass: type[T] + serialize_fn: Callable[[T], Json] + deserialize_fn: Callable[[Json], T] + + def serialize(self, obj: T) -> Json: + return self.serialize_fn(obj) + + def deserialize(self, obj: Json) -> T: + return self.deserialize_fn(obj) + + +JSON_SERDE_REGISTRY: dict[type[T], JSONSerde[T]] = {} + + +def register_json_serde( + type_: type[T], + serialize: Callable[[T], Json] | None = None, + deserialize: Callable[[Json], T] | None = None, +) -> None: + if type_ in JSON_SERDE_REGISTRY: + raise ValueError(f"Type {type_} is already registered") + + if serialize is None: + serialize = _noop_fn + + if deserialize is None: + deserialize = _noop_fn + + JSON_SERDE_REGISTRY[type_] = JSONSerde( + klass=type_, + serialize_fn=serialize, + deserialize_fn=deserialize, + ) + + +# Standard JSON primitives +register_json_serde(int) +register_json_serde(str) +register_json_serde(bool) +register_json_serde(float) +register_json_serde(type(None)) +register_json_serde(pydantic.EmailStr) + +# Syft primitives +register_json_serde(UID, lambda uid: uid.no_dash, lambda s: UID(s)) +register_json_serde(LineageID, lambda uid: uid.no_dash, lambda s: LineageID(s)) +register_json_serde( + DateTime, lambda dt: dt.utc_timestamp, lambda f: DateTime(utc_timestamp=f) +) +register_json_serde( + BaseDateTime, lambda dt: dt.utc_timestamp, lambda f: BaseDateTime(utc_timestamp=f) +) +register_json_serde(SyftVerifyKey, lambda key: str(key), SyftVerifyKey.from_string) +register_json_serde(SyftSigningKey, lambda key: str(key), SyftSigningKey.from_string) + + +def _validate_json(value: T) -> T: + # Throws TypeError if value is not JSON-serializable + json.dumps(value) + return value + + +def _is_optional_annotation(annotation: Any) -> bool: + try: + return annotation | None == annotation + except TypeError: + return False + + +def _is_annotated_type(annotation: Any) -> bool: + return get_origin(annotation) == typing.Annotated + + +def _unwrap_optional_annotation(annotation: Any) -> Any: + """Return the type anntation with None type removed, if it is present. + + Args: + annotation (Any): type annotation + + Returns: + Any: type annotation without None type + """ + if _is_optional_annotation(annotation): + args = get_args(annotation) + return Union[tuple(arg for arg in args if arg is not type(None))] # noqa + return annotation + + +def _unwrap_annotated(annotation: Any) -> Any: + # Convert Annotated[T, ...] to T + return get_args(annotation)[0] + + +def _unwrap_type_annotation(annotation: Any) -> Any: + """ + recursively unwrap type annotations, removing Annotated and Optional types + """ + if _is_annotated_type(annotation): + res = _unwrap_annotated(annotation) + return _unwrap_type_annotation(res) + elif _is_optional_annotation(annotation): + res = _unwrap_optional_annotation(annotation) + return _unwrap_type_annotation(res) + return annotation + + +def _annotation_issubclass(annotation: Any, cls: type) -> bool: + # issubclass throws TypeError if annotation is not a valid type (eg Union) + try: + return issubclass(annotation, cls) + except TypeError: + return False + + +def _serialize_pydantic_to_json(obj: pydantic.BaseModel) -> dict[str, Json]: + canonical_name, version = SyftObjectRegistry.get_canonical_name_version(obj) + serde_attributes = SyftObjectRegistry.get_serde_properties(canonical_name, version) + exclude_attrs = serde_attributes[4] + + result: dict[str, Json] = { + JSON_CANONICAL_NAME_FIELD: canonical_name, + JSON_VERSION_FIELD: version, + } + + all_exclude_attrs = set(exclude_attrs) | DEFAULT_EXCLUDE_ATTRS + + for key, type_ in obj.model_fields.items(): + if key in all_exclude_attrs: + continue + result[key] = serialize_json(getattr(obj, key), type_.annotation) + + result = _add_searchable_and_unique_attrs(obj, result, raise_errors=False) + + return result + + +def get_property_return_type(obj: Any, attr_name: str) -> Any: + """ + Get the return type annotation of a @property. + """ + cls = type(obj) + attr = getattr(cls, attr_name, None) + + if isinstance(attr, property): + return attr.fget.__annotations__.get("return", None) + + return None + + +def _add_searchable_and_unique_attrs( + obj: pydantic.BaseModel, obj_dict: dict[str, Json], raise_errors: bool = True +) -> dict[str, Json]: + """ + Add searchable attrs and unique attrs to the serialized object dict, if they are not already present. + Needed for adding non-field attributes (like @property) + + Args: + obj (pydantic.BaseModel): Object to serialize. + obj_dict (dict[str, Json]): Serialized object dict. Should contain the object's fields. + raise_errors (bool, optional): Raise errors if an attribute cannot be accessed. + If False, the attribute will be skipped. Defaults to True. + + Raises: + Exception: Any exception raised when accessing an attribute. + + Returns: + dict[str, Json]: Serialized object dict including searchable attributes. + """ + searchable_attrs: list[str] = getattr(obj, "__attr_searchable__", []) + unique_attrs: list[str] = getattr(obj, "__attr_unique__", []) + + attrs_to_add = set(searchable_attrs) | set(unique_attrs) + for attr in attrs_to_add: + if attr not in obj_dict: + try: + value = getattr(obj, attr) + except Exception as e: + if raise_errors: + raise e + else: + continue + property_annotation = get_property_return_type(obj, attr) + obj_dict[attr] = serialize_json( + value, validate=False, annotation=property_annotation + ) + + return obj_dict + + +def _deserialize_pydantic_from_json( + obj_dict: dict[str, Json], +) -> pydantic.BaseModel: + try: + canonical_name = obj_dict[JSON_CANONICAL_NAME_FIELD] + version = obj_dict[JSON_VERSION_FIELD] + obj_type = SyftObjectRegistry.get_serde_class(canonical_name, version) + + result = {} + for key, type_ in obj_type.model_fields.items(): + if key not in obj_dict: + continue + result[key] = deserialize_json(obj_dict[key], type_.annotation) + + return obj_type.model_validate(result) + except Exception as e: + print(f"Failed to deserialize Pydantic model: {e}") + print(json.dumps(obj_dict, indent=2)) + raise ValueError(f"Failed to deserialize Pydantic model: {e}") + + +def _is_serializable_iterable(annotation: Any) -> bool: + # we can only serialize typed iterables without Union/Any + # NOTE optional is allowed + + # 1. check if it is an iterable + if get_origin(annotation) not in {list, tuple, set, frozenset}: + return False + + # 2. check if iterable annotation is serializable + args = get_args(annotation) + if len(args) != 1: + return False + + inner_type = _unwrap_type_annotation(args[0]) + return inner_type in JSON_SERDE_REGISTRY or _annotation_issubclass( + inner_type, pydantic.BaseModel + ) + + +def _serialize_iterable_to_json(value: Any, annotation: Any) -> Json: + # No need to validate in recursive calls + return [serialize_json(v, validate=False) for v in value] + + +def _deserialize_iterable_from_json(value: Json, annotation: Any) -> Any: + if not isinstance(value, list): + raise ValueError(f"Cannot deserialize {type(value)} to {annotation}") + + annotation = _unwrap_type_annotation(annotation) + + if not _is_serializable_iterable(annotation): + raise ValueError(f"Cannot deserialize {annotation} from JSON") + + inner_type = _unwrap_type_annotation(get_args(annotation)[0]) + return [deserialize_json(v, inner_type) for v in value] + + +def _is_serializable_mapping(annotation: Any) -> bool: + """ + Mapping is serializable if: + - it is a dict + - the key type is str + - the value type is serializable and not a Union + """ + if get_origin(annotation) != dict: + return False + + args = get_args(annotation) + if len(args) != 2: + return False + + key_type, value_type = args + # JSON only allows string keys + if not isinstance(key_type, str): + return False + + # check if value type is serializable + value_type = _unwrap_type_annotation(value_type) + return value_type in JSON_SERDE_REGISTRY or _annotation_issubclass( + value_type, pydantic.BaseModel + ) + + +def _serialize_mapping_to_json(value: Any, annotation: Any) -> Json: + _, value_type = get_args(annotation) + # No need to validate in recursive calls + return {k: serialize_json(v, value_type, validate=False) for k, v in value.items()} + + +def _deserialize_mapping_from_json(value: Json, annotation: Any) -> Any: + if not isinstance(value, dict): + raise ValueError(f"Cannot deserialize {type(value)} to {annotation}") + + annotation = _unwrap_type_annotation(annotation) + + if not _is_serializable_mapping(annotation): + raise ValueError(f"Cannot deserialize {annotation} from JSON") + + _, value_type = get_args(annotation) + return {k: deserialize_json(v, value_type) for k, v in value.items()} + + +def _serialize_to_json_bytes(obj: Any) -> str: + obj_bytes = sy.serialize(obj, to_bytes=True) + return base64.b64encode(obj_bytes).decode("utf-8") + + +def _deserialize_from_json_bytes(obj: str) -> Any: + obj_bytes = base64.b64decode(obj) + return sy.deserialize(obj_bytes, from_bytes=True) + + +def serialize_json(value: Any, annotation: Any = None, validate: bool = True) -> Json: + """ + Serialize a value to a JSON-serializable object, using the schema defined by the + provided annotation. + + Serialization is always done according to the annotation, as the same annotation + is used for deserialization. If the annotation is not provided or is ambiguous, + the JSON serialization will fall back to serializing bytes. Examples: + - int, `list[int]` are strictly typed + - `str | int`, `list`, `list[str | int]`, `list[Any]` are ambiguous and serialized to bytes + - Optional types (like int | None) are serialized to the not-None type + + The function chooses the appropriate serialization method in the following order: + 1. Method registered in `JSON_SERDE_REGISTRY` for the annotation type. + 2. Pydantic model serialization, including all `SyftObjects`. + 3. Iterable serialization, if the annotation is a strict iterable (e.g., `list[int]`). + 4. Mapping serialization, if the annotation is a strictly typed mapping with string keys. + 5. Serialize the object to bytes and encode it as base64. + + Args: + value (Any): Value to serialize. + annotation (Any, optional): Type annotation for the value. Defaults to None. + + Returns: + Json: JSON-serializable object. + """ + if annotation is None: + annotation = type(value) + + if value is None: + return None + + # Remove None type from annotation if it is present. + annotation = _unwrap_type_annotation(annotation) + + if annotation in JSON_SERDE_REGISTRY: + result = JSON_SERDE_REGISTRY[annotation].serialize(value) + elif _annotation_issubclass(annotation, pydantic.BaseModel): + result = _serialize_pydantic_to_json(value) + elif _annotation_issubclass(annotation, Enum): + result = value.name + + # JSON recursive types + # only strictly annotated iterables and mappings are supported + # example: list[int] is supported, but not list[int | str] + elif _is_serializable_iterable(annotation): + result = _serialize_iterable_to_json(value, annotation) + elif _is_serializable_mapping(annotation): + result = _serialize_mapping_to_json(value, annotation) + else: + result = _serialize_to_json_bytes(value) + + if validate: + _validate_json(result) + + return result + + +def deserialize_json(value: Json, annotation: Any = None) -> Any: + """Deserialize a JSON-serializable object to a value, using the schema defined by the + provided annotation. Inverse of `serialize_json`. + + Args: + value (Json): JSON-serializable object. + annotation (Any): Type annotation for the value. + + Returns: + Any: Deserialized value. + """ + if ( + isinstance(value, dict) + and JSON_CANONICAL_NAME_FIELD in value + and JSON_VERSION_FIELD in value + ): + return _deserialize_pydantic_from_json(value) + + if value is None: + return None + + # Remove None type from annotation if it is present. + if annotation is None: + raise ValueError("Annotation is required for deserialization") + + annotation = _unwrap_type_annotation(annotation) + + if annotation in JSON_SERDE_REGISTRY: + return JSON_SERDE_REGISTRY[annotation].deserialize(value) + elif _annotation_issubclass(annotation, pydantic.BaseModel): + return _deserialize_pydantic_from_json(value) + elif _annotation_issubclass(annotation, Enum): + return annotation[value] + elif isinstance(value, list): + return _deserialize_iterable_from_json(value, annotation) + elif isinstance(value, dict): + return _deserialize_mapping_from_json(value, annotation) + elif isinstance(value, str): + return _deserialize_from_json_bytes(value) + else: + raise ValueError(f"Cannot deserialize {value} to {annotation}") + + +def is_json_primitive(value: Any) -> bool: + serialized = serialize_json(value, validate=False) + return isinstance(serialized, JsonPrimitive) # type: ignore diff --git a/packages/syft/src/syft/serde/lib_permissions.py b/packages/syft/src/syft/serde/lib_permissions.py index 98ae4968fa3..751f72df410 100644 --- a/packages/syft/src/syft/serde/lib_permissions.py +++ b/packages/syft/src/syft/serde/lib_permissions.py @@ -6,13 +6,13 @@ from .serializable import serializable -@serializable() +@serializable(canonical_name="CMPCRUDPermission", version=1) class CMPCRUDPermission(Enum): NONE_EXECUTE = 1 ALL_EXECUTE = 2 -@serializable() +@serializable(canonical_name="CMPPermission", version=1) class CMPPermission: @property def permissions_string(self) -> str: @@ -22,7 +22,7 @@ def __repr__(self) -> str: return self.permission_string -@serializable() +@serializable(canonical_name="CMPUserPermission", version=1) class CMPUserPermission(CMPPermission): def __init__(self, user_id: UID, permission: CMPCRUDPermission): self.user_id = user_id @@ -36,7 +36,7 @@ def __repr__(self) -> str: return self.permission_string -@serializable() +@serializable(canonical_name="CMPCompoundPermission", version=1) class CMPCompoundPermission(CMPPermission): def __init__(self, permission: CMPCRUDPermission): self.permissions = permission diff --git a/packages/syft/src/syft/serde/recursive.py b/packages/syft/src/syft/serde/recursive.py index a876b2b57f0..d5694b4efe6 100644 --- a/packages/syft/src/syft/serde/recursive.py +++ b/packages/syft/src/syft/serde/recursive.py @@ -2,7 +2,8 @@ from collections.abc import Callable from enum import Enum from enum import EnumMeta -import sys +import os +import tempfile import types from typing import Any @@ -14,14 +15,18 @@ import syft as sy # relative -from ..util.util import get_fully_qualified_name -from ..util.util import index_syft_by_module_name +from ..types.syft_object_registry import SyftObjectRegistry from .capnp import get_capnp_schema +from .util import compatible_with_large_file_writes_capnp -TYPE_BANK = {} +TYPE_BANK = {} # type: ignore +SYFT_CLASSES_MISSING_CANONICAL_NAME = [] recursive_scheme = get_capnp_schema("recursive_serde.capnp").RecursiveSerde +SPOOLED_FILE_MAX_SIZE_SERDE = 50 * (1024**2) # 50MB +DEFAULT_EXCLUDE_ATTRS: set[str] = {"syft_pre_hooks__", "syft_post_hooks__"} + def get_types(cls: type, keys: list[str] | None = None) -> list[type] | None: if keys is None: @@ -43,7 +48,7 @@ def get_types(cls: type, keys: list[str] | None = None) -> list[type] | None: return types -def check_fqn_alias(cls: object | type) -> tuple | None: +def check_fqn_alias(cls: object | type) -> tuple[str, ...] | None: """Currently, typing.Any has different metaclasses in different versions of Python 🤦‍♂️. For Python <=3.10 Any is an instance of typing._SpecialForm @@ -74,6 +79,62 @@ def check_fqn_alias(cls: object | type) -> tuple | None: return None +def has_canonical_name_version( + cls: type, cannonical_name: str | None, version: int | None +) -> bool: + cls_canonical_name = getattr(cls, "__canonical_name__", None) + cls_version = getattr(cls, "__version__", None) + return bool(cls_canonical_name or cannonical_name) and bool(cls_version or version) + + +def validate_cannonical_name_version( + cls: type, canonical_name: str | None, version: int | None +) -> tuple[str, int]: + cls_canonical_name = getattr(cls, "__canonical_name__", None) + cls_version = getattr(cls, "__version__", None) + if cls_canonical_name and canonical_name: + raise ValueError( + "Cannot specify both __canonical_name__ attribute and cannonical_name argument." + ) + if cls_version and version: + raise ValueError( + "Cannot specify both __version__ attribute and version argument." + ) + if cls_canonical_name is None and canonical_name is None: + raise ValueError( + "Must specify either __canonical_name__ attribute or cannonical_name argument." + ) + if cls_version is None and version is None: + raise ValueError( + "Must specify either __version__ attribute or version argument." + ) + + canonical_name = canonical_name or cls_canonical_name + version = version or cls_version + return canonical_name, version # type: ignore + + +def skip_unregistered_class( + cls: type, canonical_name: str | None, version: str | None +) -> bool: + """ + Used to gather all classes that are missing canonical_name and version for development. + + Returns True if the class should be skipped, False otherwise. + """ + + search_unregistered_classes = ( + os.getenv("SYFT_SEARCH_MISSING_CANONICAL_NAME", False) == "true" + ) + if not search_unregistered_classes: + return False + if not has_canonical_name_version(cls, canonical_name, version): + if cls.__module__.startswith("syft."): + SYFT_CLASSES_MISSING_CANONICAL_NAME.append(cls) + return True + return False + + def recursive_serde_register( cls: object | type, serialize: Callable | None = None, @@ -82,14 +143,21 @@ def recursive_serde_register( exclude_attrs: list | None = None, inherit_attrs: bool | None = True, inheritable_attrs: bool | None = True, + canonical_name: str | None = None, + version: int | None = None, ) -> None: pydantic_fields = None base_attrs = None attribute_list: set[str] = set() - alias_fqn = check_fqn_alias(cls) cls = type(cls) if not isinstance(cls, type) else cls - fqn = f"{cls.__module__}.{cls.__name__}" + + if skip_unregistered_class(cls, canonical_name, version): + return + + canonical_name, version = validate_cannonical_name_version( + cls, canonical_name, version + ) nonrecursive = bool(serialize and deserialize) _serialize = serialize if nonrecursive else rs_object2proto @@ -125,9 +193,7 @@ def recursive_serde_register( attribute_list.update(["value"]) exclude_attrs = [] if exclude_attrs is None else exclude_attrs - attribute_list = ( - attribute_list - set(exclude_attrs) - {"syft_pre_hooks__", "syft_post_hooks__"} - ) + attribute_list = attribute_list - set(exclude_attrs) - DEFAULT_EXCLUDE_ATTRS if inheritable_attrs and attribute_list and not is_pydantic: # only set __syft_serializable__ for non-pydantic classes because @@ -137,7 +203,6 @@ def recursive_serde_register( attributes = set(attribute_list) if attribute_list else None attribute_types = get_types(cls, attributes) serde_overrides = getattr(cls, "__serde_overrides__", {}) - version = getattr(cls, "__version__", None) # without fqn duplicate class names overwrite serde_attributes = ( @@ -153,24 +218,46 @@ def recursive_serde_register( version, ) - TYPE_BANK[fqn] = serde_attributes + SyftObjectRegistry.register_cls(canonical_name, version, serde_attributes) + alias_fqn = check_fqn_alias(cls) if isinstance(alias_fqn, tuple): for alias in alias_fqn: - TYPE_BANK[alias] = serde_attributes + alias_canonical_name = canonical_name + f"_{alias}" + SyftObjectRegistry.register_cls(alias_canonical_name, 1, serde_attributes) def chunk_bytes( - data: bytes, field_name: str | int, builder: _DynamicStructBuilder + field_obj: Any, + ser_func: Callable, + field_name: str | int, + builder: _DynamicStructBuilder, ) -> None: - CHUNK_SIZE = int(5.12e8) # capnp max for a List(Data) field - list_size = len(data) // CHUNK_SIZE + 1 - data_lst = builder.init(field_name, list_size) - END_INDEX = CHUNK_SIZE - for idx in range(list_size): - START_INDEX = idx * CHUNK_SIZE - END_INDEX = min(START_INDEX + CHUNK_SIZE, len(data)) - data_lst[idx] = data[START_INDEX:END_INDEX] + data = ser_func(field_obj) + size_of_data = len(data) + if compatible_with_large_file_writes_capnp(size_of_data): + with tempfile.TemporaryFile() as tmp_file: + # Write data to a file to save RAM + tmp_file.write(data) + tmp_file.seek(0) + del data + + CHUNK_SIZE = int(5.12e8) # capnp max for a List(Data) field + list_size = size_of_data // CHUNK_SIZE + 1 + data_lst = builder.init(field_name, list_size) + for idx in range(list_size): + bytes_to_read = min(CHUNK_SIZE, size_of_data) + data_lst[idx] = tmp_file.read(bytes_to_read) + size_of_data -= CHUNK_SIZE + else: + CHUNK_SIZE = int(5.12e8) # capnp max for a List(Data) field + list_size = len(data) // CHUNK_SIZE + 1 + data_lst = builder.init(field_name, list_size) + END_INDEX = CHUNK_SIZE + for idx in range(list_size): + START_INDEX = idx * CHUNK_SIZE + END_INDEX = min(START_INDEX + CHUNK_SIZE, len(data)) + data_lst[idx] = data[START_INDEX:END_INDEX] def combine_bytes(capnp_list: list[bytes]) -> bytes: @@ -191,31 +278,38 @@ def rs_object2proto(self: Any, for_hashing: bool = False) -> _DynamicStructBuild is_type = True msg = recursive_scheme.new_message() - fqn = get_fully_qualified_name(self) - if fqn not in TYPE_BANK: + + # todo: rewrite and make sure every object has a canonical name and version + canonical_name, version = SyftObjectRegistry.get_canonical_name_version(self) + + if not SyftObjectRegistry.has_serde_class(canonical_name, version): # third party - raise Exception(f"{fqn} not in TYPE_BANK") + raise Exception( + f"obj2proto: {canonical_name} version {version} not in SyftObjectRegistry" + ) + + msg.canonicalName = canonical_name + msg.version = version - msg.fullyQualifiedName = fqn ( nonrecursive, serialize, - deserialize, + _, attribute_list, exclude_attrs_list, serde_overrides, hash_exclude_attrs, - cls, - attribute_types, - version, - ) = TYPE_BANK[fqn] + _, + _, + _, + ) = SyftObjectRegistry.get_serde_properties(canonical_name, version) if nonrecursive or is_type: if serialize is None: raise Exception( f"Cant serialize {type(self)} nonrecursive without serialize." ) - chunk_bytes(serialize(self), "nonrecursiveBlob", msg) + chunk_bytes(self, serialize, "nonrecursiveBlob", msg) return msg if attribute_list is None: @@ -248,9 +342,13 @@ def rs_object2proto(self: Any, for_hashing: bool = False) -> _DynamicStructBuild if isinstance(field_obj, types.FunctionType): continue - serialized = sy.serialize(field_obj, to_bytes=True, for_hashing=for_hashing) msg.fieldsName[idx] = attr_name - chunk_bytes(serialized, idx, msg.fieldsData) + chunk_bytes( + field_obj, + lambda x: sy.serialize(x, to_bytes=True, for_hashing=for_hashing), + idx, + msg.fieldsData, + ) return msg @@ -264,35 +362,39 @@ def rs_bytes2object(blob: bytes) -> Any: return rs_proto2object(msg) +def map_fqns_for_backward_compatibility(fqn: str) -> str: + """for backwards compatibility with 0.8.6. Sometimes classes where moved to another file. Which is + exactly why we are implementing it differently""" + mapping = { + "syft.service.dataset.dataset.MarkdownDescription": "syft.util.misc_objs.MarkdownDescription", + # "syft.service.object_search.object_migration_state.SyftObjectMigrationState": "syft.service.migration.object_migration_state.SyftObjectMigrationState", # noqa: E501 + } + if fqn in mapping: + return mapping[fqn] + else: + return fqn + + def rs_proto2object(proto: _DynamicStructBuilder) -> Any: # relative from .deserialize import _deserialize - # clean this mess, Tudor - module_parts = proto.fullyQualifiedName.split(".") - klass = module_parts.pop() class_type: type | Any = type(None) - if klass != "NoneType": - try: - class_type = index_syft_by_module_name(proto.fullyQualifiedName) # type: ignore[assignment,unused-ignore] - except Exception: # nosec - try: - class_type = getattr(sys.modules[".".join(module_parts)], klass) - except Exception: # nosec - if "syft.user" in proto.fullyQualifiedName: - # relative - from ..node.node import CODE_RELOADER - - for _, load_user_code in CODE_RELOADER.items(): - load_user_code() - try: - class_type = getattr(sys.modules[".".join(module_parts)], klass) - except Exception: # nosec - pass - - if proto.fullyQualifiedName not in TYPE_BANK: - raise Exception(f"{proto.fullyQualifiedName} not in TYPE_BANK") + canonical_name = proto.canonicalName + version = getattr(proto, "version", -1) + + if not SyftObjectRegistry.has_serde_class(canonical_name, version): + # relative + from ..server.server import CODE_RELOADER + + for load_user_code in CODE_RELOADER.values(): + load_user_code() + # third party + if not SyftObjectRegistry.has_serde_class(canonical_name, version): + raise Exception( + f"proto2obj: {canonical_name} version {version} not in SyftObjectRegistry" + ) # TODO: 🐉 sort this out, basically sometimes the syft.user classes are not in the # module name space in sub-processes or threads even though they are loaded on start @@ -301,20 +403,18 @@ def rs_proto2object(proto: _DynamicStructBuilder) -> Any: # causes some errors so it seems like we want to get the local one where possible ( nonrecursive, - serialize, + _, deserialize, - attribute_list, - exclude_attrs_list, + _, + _, serde_overrides, - hash_exclude_attrs, + _, cls, - attribute_types, + _, version, - ) = TYPE_BANK[proto.fullyQualifiedName] + ) = SyftObjectRegistry.get_serde_properties(canonical_name, version) - if class_type == type(None): - # yes this looks stupid but it works and the opposite breaks - class_type = cls + class_type = cls if nonrecursive: if deserialize is None: @@ -345,14 +445,15 @@ def rs_proto2object(proto: _DynamicStructBuilder) -> Any: # if we skip the __new__ flow of BaseModel we get the error # AttributeError: object has no attribute '__fields_set__' - if "syft.user" in proto.fullyQualifiedName: - # weird issues with pydantic and ForwardRef on user classes being inited - # with custom state args / kwargs - obj = class_type() - for attr_name, attr_value in kwargs.items(): - setattr(obj, attr_name, attr_value) - else: - obj = class_type(**kwargs) + # if "syft.user" in proto.fullyQualifiedName: + # # weird issues with pydantic and ForwardRef on user classes being inited + # # with custom state args / kwargs + # obj = class_type() + # for attr_name, attr_value in kwargs.items(): + # setattr(obj, attr_name, attr_value) + # else: + # obj = class_type(**kwargs) + obj = class_type(**kwargs) else: obj = class_type.__new__(class_type) # type: ignore diff --git a/packages/syft/src/syft/serde/recursive_primitives.py b/packages/syft/src/syft/serde/recursive_primitives.py index fe74dec92ed..38d8281434d 100644 --- a/packages/syft/src/syft/serde/recursive_primitives.py +++ b/packages/syft/src/syft/serde/recursive_primitives.py @@ -1,4 +1,5 @@ # stdlib +from abc import ABCMeta from collections import OrderedDict from collections import defaultdict from collections.abc import Collection @@ -7,11 +8,14 @@ from enum import Enum from enum import EnumMeta import functools +import inspect import pathlib from pathlib import PurePath import sys +import tempfile from types import MappingProxyType from types import UnionType +import typing from typing import Any from typing import GenericAlias from typing import Optional @@ -25,10 +29,12 @@ import weakref # relative +from ..types.syft_object_registry import SyftObjectRegistry from .capnp import get_capnp_schema from .recursive import chunk_bytes from .recursive import combine_bytes from .recursive import recursive_serde_register +from .util import compatible_with_large_file_writes_capnp iterable_schema = get_capnp_schema("iterable.capnp").Iterable kv_iterable_schema = get_capnp_schema("kv_iterable.capnp").KVIterable @@ -43,10 +49,21 @@ def serialize_iterable(iterable: Collection) -> bytes: message.init("values", len(iterable)) for idx, it in enumerate(iterable): - serialized = _serialize(it, to_bytes=True) - chunk_bytes(serialized, idx, message.values) - - return message.to_bytes() + # serialized = _serialize(it, to_bytes=True) + chunk_bytes(it, lambda x: _serialize(x, to_bytes=True), idx, message.values) + + if compatible_with_large_file_writes_capnp(message): + with tempfile.TemporaryFile() as tmp_file: + # Write data to a file to save RAM + message.write(tmp_file) + del message + tmp_file.seek(0) + res = tmp_file.read() + return res + else: + res = message.to_bytes() + del message + return res def deserialize_iterable(iterable_type: type, blob: bytes) -> Collection: @@ -54,13 +71,14 @@ def deserialize_iterable(iterable_type: type, blob: bytes) -> Collection: from .deserialize import _deserialize MAX_TRAVERSAL_LIMIT = 2**64 - 1 - values = [] with iterable_schema.from_bytes( blob, traversal_limit_in_words=MAX_TRAVERSAL_LIMIT ) as msg: - for element in msg.values: - values.append(_deserialize(combine_bytes(element), from_bytes=True)) + values = [ + _deserialize(combine_bytes(element), from_bytes=True) + for element in msg.values + ] return iterable_type(values) @@ -80,8 +98,8 @@ def _serialize_kv_pairs(size: int, kv_pairs: Iterable[tuple[_KT, _VT]]) -> bytes for index, (k, v) in enumerate(kv_pairs): message.keys[index] = _serialize(k, to_bytes=True) - serialized = _serialize(v, to_bytes=True) - chunk_bytes(serialized, index, message.values) + # serialized = _serialize(v, to_bytes=True) + chunk_bytes(v, lambda x: _serialize(x, to_bytes=True), index, message.values) return message.to_bytes() @@ -154,22 +172,31 @@ def deserialize_enum(enum_type: type, enum_buf: bytes) -> Enum: return enum_type(enum_value) -def serialize_type(serialized_type: type) -> bytes: +def serialize_type(_type_to_serialize: type) -> bytes: # relative - from ..util.util import full_name_with_qualname + type_to_serialize = typing.get_origin(_type_to_serialize) or _type_to_serialize + canonical_name, version = SyftObjectRegistry.get_identifier_for_type( + type_to_serialize + ) + return f"{canonical_name}:{version}".encode() - fqn = full_name_with_qualname(klass=serialized_type) - module_parts = fqn.split(".") - return ".".join(module_parts).encode() + # from ..util.util import full_name_with_qualname + + # fqn = full_name_with_qualname(klass=serialized_type) + # module_parts = fqn.split(".") + # return ".".join(module_parts).encode() def deserialize_type(type_blob: bytes) -> type: deserialized_type = type_blob.decode() - module_parts = deserialized_type.split(".") - klass = module_parts.pop() - klass = "None" if klass == "NoneType" else klass - exception_type = getattr(sys.modules[".".join(module_parts)], klass) - return exception_type + canonical_name, version = deserialized_type.split(":", 1) + return SyftObjectRegistry.get_serde_class(canonical_name, int(version)) + + # module_parts = deserialized_type.split(".") + # klass = module_parts.pop() + # klass = "None" if klass == "NoneType" else klass + # exception_type = getattr(sys.modules[".".join(module_parts)], klass) + # return exception_type TPath = TypeVar("TPath", bound=PurePath) @@ -195,111 +222,163 @@ def deserialize_path(path_type: type[TPath], buf: bytes) -> TPath: int, serialize=lambda x: x.to_bytes((x.bit_length() + 7) // 8 + 1, "big", signed=True), deserialize=lambda x_bytes: int.from_bytes(x_bytes, "big", signed=True), + canonical_name="int", + version=1, ) recursive_serde_register( float, serialize=lambda x: x.hex().encode(), deserialize=lambda x: float.fromhex(x.decode()), + canonical_name="float", + version=1, ) -recursive_serde_register(bytes, serialize=lambda x: x, deserialize=lambda x: x) +recursive_serde_register( + bytes, + serialize=lambda x: x, + deserialize=lambda x: x, + canonical_name="bytes", + version=1, +) recursive_serde_register( - str, serialize=lambda x: x.encode(), deserialize=lambda x: x.decode() + str, + serialize=lambda x: x.encode(), + deserialize=lambda x: x.decode(), + canonical_name="str", + version=1, ) recursive_serde_register( list, serialize=serialize_iterable, deserialize=functools.partial(deserialize_iterable, list), + canonical_name="list", + version=1, ) recursive_serde_register( tuple, serialize=serialize_iterable, deserialize=functools.partial(deserialize_iterable, tuple), + canonical_name="tuple", + version=1, ) recursive_serde_register( - dict, serialize=serialize_kv, deserialize=functools.partial(deserialize_kv, dict) + dict, + serialize=serialize_kv, + deserialize=functools.partial(deserialize_kv, dict), + canonical_name="dict", + version=1, ) recursive_serde_register( defaultdict, serialize=serialize_defaultdict, deserialize=deserialize_defaultdict, + canonical_name="defaultdict", + version=1, ) recursive_serde_register( OrderedDict, serialize=serialize_kv, deserialize=functools.partial(deserialize_kv, OrderedDict), + canonical_name="OrderedDict", + version=1, ) recursive_serde_register( - type(None), serialize=lambda _: b"1", deserialize=lambda _: None + type(None), + serialize=lambda _: b"1", + deserialize=lambda _: None, + canonical_name="NoneType", + version=1, ) recursive_serde_register( bool, serialize=lambda x: b"1" if x else b"0", deserialize=lambda x: False if x == b"0" else True, + canonical_name="bool", + version=1, ) recursive_serde_register( set, serialize=serialize_iterable, deserialize=functools.partial(deserialize_iterable, set), + canonical_name="set", + version=1, ) recursive_serde_register( weakref.WeakSet, serialize=serialize_iterable, deserialize=functools.partial(deserialize_iterable, weakref.WeakSet), + canonical_name="WeakSet", + version=1, ) recursive_serde_register( frozenset, serialize=serialize_iterable, deserialize=functools.partial(deserialize_iterable, frozenset), + canonical_name="frozenset", + version=1, ) recursive_serde_register( complex, serialize=lambda x: serialize_iterable((x.real, x.imag)), deserialize=lambda x: complex(*deserialize_iterable(tuple, x)), + canonical_name="complex", + version=1, ) recursive_serde_register( range, serialize=lambda x: serialize_iterable((x.start, x.stop, x.step)), deserialize=lambda x: range(*deserialize_iterable(tuple, x)), + canonical_name="range", + version=1, ) - recursive_serde_register( slice, serialize=lambda x: serialize_iterable((x.start, x.stop, x.step)), deserialize=lambda x: slice(*deserialize_iterable(tuple, x)), + canonical_name="slice", + version=1, ) recursive_serde_register( - slice, - serialize=lambda x: serialize_iterable((x.start, x.stop, x.step)), - deserialize=lambda x: slice(*deserialize_iterable(tuple, x)), + type, + serialize=serialize_type, + deserialize=deserialize_type, + canonical_name="type", + version=1, ) -recursive_serde_register(type, serialize=serialize_type, deserialize=deserialize_type) recursive_serde_register( MappingProxyType, serialize=serialize_kv, deserialize=functools.partial(deserialize_kv, MappingProxyType), + canonical_name="MappingProxyType", + version=1, ) +recursive_serde_register( + PurePath, + serialize=serialize_path, + deserialize=functools.partial(deserialize_path, PurePath), + canonical_name="PurePath", + version=1, +) for __path_type in ( - PurePath, pathlib.PurePosixPath, pathlib.PureWindowsPath, pathlib.Path, @@ -310,6 +389,8 @@ def deserialize_path(path_type: type[TPath], buf: bytes) -> TPath: __path_type, serialize=serialize_path, deserialize=functools.partial(deserialize_path, __path_type), + canonical_name=f"pathlib_{__path_type.__name__}", + version=1, ) @@ -359,7 +440,14 @@ def deserialize_generic_alias(type_blob: bytes) -> type: # 🟡 TODO 5: add tests and all typing options for signatures -def recursive_serde_register_type(t: type, serialize_attrs: list | None = None) -> None: +def recursive_serde_register_type( + t: type, + serialize_attrs: list | None = None, + canonical_name: str | None = None, + version: int | None = None, +) -> None: + # former case is for instance for _GerericAlias itself or UnionGenericAlias + # Latter case is true for for instance List[str], which is currently not used if (isinstance(t, type) and issubclass(t, _GenericAlias)) or issubclass( type(t), _GenericAlias ): @@ -368,6 +456,8 @@ def recursive_serde_register_type(t: type, serialize_attrs: list | None = None) serialize=serialize_generic_alias, deserialize=deserialize_generic_alias, serialize_attrs=serialize_attrs, + canonical_name=canonical_name, + version=version, ) else: recursive_serde_register( @@ -375,6 +465,8 @@ def recursive_serde_register_type(t: type, serialize_attrs: list | None = None) serialize=serialize_type, deserialize=deserialize_type, serialize_attrs=serialize_attrs, + canonical_name=canonical_name, + version=version, ) @@ -393,16 +485,62 @@ def deserialize_union_type(type_blob: bytes) -> type: return functools.reduce(lambda x, y: x | y, args) +def serialize_union(serialized_type: UnionType) -> bytes: + return b"" + + +def deserialize_union(type_blob: bytes) -> type: # type: ignore + return Union # type: ignore + + +def serialize_typevar(serialized_type: TypeVar) -> bytes: + return f"{serialized_type.__name__}".encode() + + +def deserialize_typevar(type_blob: bytes) -> type: + name = type_blob.decode() + return TypeVar(name=name) # type: ignore + + +def serialize_any(serialized_type: TypeVar) -> bytes: + return b"" + + +def deserialize_any(type_blob: bytes) -> type: # type: ignore + return Any # type: ignore + + recursive_serde_register( UnionType, serialize=serialize_union_type, deserialize=deserialize_union_type, + canonical_name="UnionType", + version=1, ) -recursive_serde_register_type(_SpecialForm) -recursive_serde_register_type(_GenericAlias) -recursive_serde_register_type(Union) -recursive_serde_register_type(TypeVar) +recursive_serde_register_type(_SpecialForm, canonical_name="_SpecialForm", version=1) +recursive_serde_register_type(_GenericAlias, canonical_name="_GenericAlias", version=1) +recursive_serde_register( + Union, + canonical_name="Union", + serialize=serialize_union, + deserialize=deserialize_union, + version=1, +) +recursive_serde_register( + TypeVar, + canonical_name="TypeVar", + serialize=serialize_typevar, + deserialize=deserialize_typevar, + version=1, +) +recursive_serde_register( + Any, + canonical_name="Any", + serialize=serialize_any, + deserialize=deserialize_any, + version=1, +) recursive_serde_register_type( _UnionGenericAlias, @@ -415,9 +553,17 @@ def deserialize_union_type(type_blob: bytes) -> type: "__module__", "__origin__", ], + canonical_name="_UnionGenericAlias", + version=1, +) +recursive_serde_register_type( + _SpecialGenericAlias, canonical_name="_SpecialGenericAlias", version=1 ) -recursive_serde_register_type(_SpecialGenericAlias) -recursive_serde_register_type(GenericAlias) +recursive_serde_register_type(GenericAlias, canonical_name="GenericAlias", version=1) + +# recursive_serde_register_type(Any, canonical_name="Any", version=1) +recursive_serde_register_type(EnumMeta, canonical_name="EnumMeta", version=1) + +recursive_serde_register_type(ABCMeta, canonical_name="ABCMeta", version=1) -recursive_serde_register_type(Any) -recursive_serde_register_type(EnumMeta) +recursive_serde_register_type(inspect._empty, canonical_name="inspect_empty", version=1) diff --git a/packages/syft/src/syft/serde/serializable.py b/packages/syft/src/syft/serde/serializable.py index 4dda2ee3af9..9a683dbcf57 100644 --- a/packages/syft/src/syft/serde/serializable.py +++ b/packages/syft/src/syft/serde/serializable.py @@ -19,6 +19,8 @@ def serializable( without: list[str] | None = None, inherit: bool | None = True, inheritable: bool | None = True, + canonical_name: str | None = None, + version: int | None = None, ) -> Callable[[T], T]: """ Recursively serialize attributes of the class. @@ -52,6 +54,8 @@ def rs_decorator(cls: T) -> T: exclude_attrs=without, inherit_attrs=inherit, inheritable_attrs=inheritable, + canonical_name=canonical_name, + version=version, ) return cls diff --git a/packages/syft/src/syft/serde/serialize.py b/packages/syft/src/syft/serde/serialize.py index 0692ffb77bd..210e9e0ca9e 100644 --- a/packages/syft/src/syft/serde/serialize.py +++ b/packages/syft/src/syft/serde/serialize.py @@ -1,6 +1,10 @@ # stdlib +import tempfile from typing import Any +# relative +from .util import compatible_with_large_file_writes_capnp + def _serialize( obj: object, @@ -12,9 +16,19 @@ def _serialize( from .recursive import rs_object2proto proto = rs_object2proto(obj, for_hashing=for_hashing) - if to_bytes: - return proto.to_bytes() + if compatible_with_large_file_writes_capnp(proto): + with tempfile.TemporaryFile() as tmp_file: + # Write data to a file to save RAM + proto.write(tmp_file) + # proto in memory, and bytes in file + del proto + # bytes in file + tmp_file.seek(0) + return tmp_file.read() + else: + res = proto.to_bytes() + return res if to_proto: return proto diff --git a/packages/syft/src/syft/serde/signature.py b/packages/syft/src/syft/serde/signature.py index 865a4f142e3..0887d148367 100644 --- a/packages/syft/src/syft/serde/signature.py +++ b/packages/syft/src/syft/serde/signature.py @@ -12,11 +12,18 @@ from .recursive import recursive_serde_register from .serialize import _serialize -recursive_serde_register(_ParameterKind) +recursive_serde_register( + _ParameterKind, + canonical_name="inspect_ParameterKind", + version=1, +) recursive_serde_register( - Parameter, serialize_attrs=["_annotation", "_name", "_kind", "_default"] + Parameter, + serialize_attrs=["_annotation", "_name", "_kind", "_default"], + canonical_name="inspect_Parameter", + version=1, ) @@ -42,9 +49,6 @@ # return Parameter(**obj_dict) -# recursive_serde_register(Parameter, serialize_parameter, deserialize_parameter) - - def serialize_signature(obj: Signature) -> bytes: parameters = list(dict(obj.parameters).values()) return_annotation = obj.return_annotation @@ -57,7 +61,13 @@ def deserialize_signature(blob: bytes) -> Signature: return Signature(**obj_dict) -recursive_serde_register(Signature, serialize_signature, deserialize_signature) +recursive_serde_register( + Signature, + serialize_signature, + deserialize_signature, + canonical_name="inspect_Signature", + version=1, +) def signature_remove_self(signature: Signature) -> Signature: @@ -76,6 +86,15 @@ def signature_remove_context(signature: Signature) -> Signature: ) +def signature_remove(signature: Signature, args: list[str]) -> Signature: + params = dict(signature.parameters) + for arg in args: + params.pop(arg, None) + return Signature( + list(params.values()), return_annotation=signature.return_annotation + ) + + def get_str_signature_from_docstring(doc: str, callable_name: str) -> str | None: if not doc or callable_name not in doc: return None diff --git a/packages/syft/src/syft/serde/third_party.py b/packages/syft/src/syft/serde/third_party.py index fddbb5ae755..6fadf6261f9 100644 --- a/packages/syft/src/syft/serde/third_party.py +++ b/packages/syft/src/syft/serde/third_party.py @@ -3,16 +3,13 @@ from datetime import datetime from datetime import time import functools +from importlib.util import find_spec from io import BytesIO # third party from dateutil import parser -from jax import numpy as jnp -from jaxlib.xla_extension import ArrayImpl from nacl.signing import SigningKey from nacl.signing import VerifyKey -import networkx as nx -from networkx import DiGraph import numpy as np from pandas import DataFrame from pandas import Series @@ -21,17 +18,14 @@ import pyarrow.parquet as pq import pydantic from pydantic._internal._model_construction import ModelMetaclass -from pymongo.collection import Collection -from result import Err -from result import Ok -from result import Result -import zmq.green as zmq # relative from ..types.dicttuple import DictTuple from ..types.dicttuple import _Meta as _DictTupleMetaClass from ..types.syft_metaclass import EmptyType from ..types.syft_metaclass import PartialModelMetaclass +from .array import numpy_deserialize +from .array import numpy_serialize from .deserialize import _deserialize as deserialize from .recursive_primitives import _serialize_kv_pairs from .recursive_primitives import deserialize_kv @@ -45,25 +39,23 @@ SigningKey, serialize=lambda x: bytes(x), deserialize=lambda x: SigningKey(x), + canonical_name="nacl_signing_key", + version=1, ) recursive_serde_register( VerifyKey, serialize=lambda x: bytes(x), deserialize=lambda x: VerifyKey(x), + canonical_name="nacl_verify_key", + version=1, ) # result Ok and Err -recursive_serde_register(Ok, serialize_attrs=["_value"]) -recursive_serde_register(Err, serialize_attrs=["_value"]) -recursive_serde_register(Result) # exceptions -recursive_serde_register(cls=TypeError) - -# mongo collection -recursive_serde_register_type(Collection) +recursive_serde_register(cls=TypeError, canonical_name="TypeError", version=1) def serialize_dataframe(df: DataFrame) -> bytes: @@ -93,6 +85,8 @@ def deserialize_dataframe(buf: bytes) -> DataFrame: DataFrame, serialize=serialize_dataframe, deserialize=deserialize_dataframe, + canonical_name="pandas_dataframe", + version=1, ) @@ -105,31 +99,40 @@ def deserialize_series(blob: bytes) -> Series: Series, serialize=lambda x: serialize(DataFrame(x).to_dict(), to_bytes=True), deserialize=deserialize_series, + canonical_name="pandas_series", + version=1, ) - recursive_serde_register( datetime, serialize=lambda x: serialize(x.isoformat(), to_bytes=True), deserialize=lambda x: parser.isoparse(deserialize(x, from_bytes=True)), + canonical_name="datetime_datetime", + version=1, ) recursive_serde_register( time, serialize=lambda x: serialize(x.isoformat(), to_bytes=True), deserialize=lambda x: parser.parse(deserialize(x, from_bytes=True)).time(), + canonical_name="datetime_time", + version=1, ) recursive_serde_register( date, serialize=lambda x: serialize(x.isoformat(), to_bytes=True), deserialize=lambda x: parser.parse(deserialize(x, from_bytes=True)).date(), + canonical_name="datetime_date", + version=1, ) recursive_serde_register( Timestamp, serialize=lambda x: serialize(x.value, to_bytes=True), deserialize=lambda x: Timestamp(deserialize(x, from_bytes=True)), + canonical_name="pandas_timestamp", + version=1, ) @@ -141,11 +144,15 @@ def _serialize_dicttuple(x: DictTuple) -> bytes: _DictTupleMetaClass, serialize=serialize_type, deserialize=deserialize_type, + canonical_name="dicttuple_meta", + version=1, ) recursive_serde_register( DictTuple, serialize=_serialize_dicttuple, deserialize=functools.partial(deserialize_kv, DictTuple), + canonical_name="dicttuple", + version=1, ) @@ -153,11 +160,17 @@ def _serialize_dicttuple(x: DictTuple) -> bytes: EmptyType, serialize=serialize_type, deserialize=deserialize_type, + canonical_name="empty_type", + version=1, ) -recursive_serde_register_type(ModelMetaclass) -recursive_serde_register_type(PartialModelMetaclass) +recursive_serde_register_type( + ModelMetaclass, canonical_name="pydantic_model_metaclass", version=1 +) +recursive_serde_register_type( + PartialModelMetaclass, canonical_name="partial_model_metaclass", version=1 +) def serialize_bytes_io(io: BytesIO) -> bytes: @@ -169,61 +182,101 @@ def serialize_bytes_io(io: BytesIO) -> bytes: BytesIO, serialize=serialize_bytes_io, deserialize=lambda x: BytesIO(deserialize(x, from_bytes=True)), + canonical_name="bytes_io", + version=1, ) try: # third party from IPython.display import Image - recursive_serde_register(Image) + recursive_serde_register(Image, canonical_name="IPython_display_Image", version=1) except Exception: # nosec pass -# jax -recursive_serde_register( - ArrayImpl, - serialize=lambda x: serialize(np.array(x), to_bytes=True), - deserialize=lambda x: jnp.array(deserialize(x, from_bytes=True)), -) +try: + # third party + import torch + from torch._C import _TensorMeta + + recursive_serde_register_type( + _TensorMeta, canonical_name="torch_tensor_meta", version=1 + ) + recursive_serde_register_type( + torch.Tensor, canonical_name="torch_tensor", version=1 + ) + + def torch_serialize(tensor: torch.Tensor) -> bytes: + return numpy_serialize(tensor.numpy()) + + def torch_deserialize(buffer: bytes) -> torch.tensor: + np_array = numpy_deserialize(buffer) + return torch.from_numpy(np_array) + + recursive_serde_register( + torch.Tensor, + serialize=torch_serialize, + deserialize=lambda data: torch_deserialize(data), + canonical_name="torch_tensor", + version=1, + ) + +except ImportError: # nosec + pass # unsure why we have to register the object not the type but this works -recursive_serde_register(np.core._ufunc_config._unspecified()) +recursive_serde_register( + np.core._ufunc_config._unspecified(), + canonical_name="numpy_ufunc_unspecified", + version=1, +) recursive_serde_register( pydantic.EmailStr, serialize=lambda x: x.encode(), deserialize=lambda x: pydantic.EmailStr(x.decode()), + canonical_name="pydantic_emailstr", + version=1, ) -recursive_serde_register( - zmq._Socket, - serialize_attrs=[ - "_shadow", - "_monitor_socket", - "_type_name", - ], -) -recursive_serde_register(zmq._Context) # how else do you import a relative file to execute it? NOTHING = None - -# TODO: debug serializing after updating a node -def serialize_networkx_graph(graph: DiGraph) -> bytes: - graph_dict: dict = nx.node_link_data(graph) - return serialize(graph_dict, to_bytes=True) - - -def deserialize_networkx_graph(buf: bytes) -> DiGraph: - graph_dict: dict = deserialize(buf, from_bytes=True) - return nx.node_link_graph(graph_dict) - - -recursive_serde_register( - DiGraph, - serialize=serialize_networkx_graph, - deserialize=deserialize_networkx_graph, -) +try: + # Just register these serializers if the google.cloud.bigquery & db_dtypes module are available + # third party + from google.cloud.bigquery.job.query import QueryJob + from google.cloud.bigquery.table import RowIterator + + # Checking db_dtypes availability this way to avoid unused ruff issues, but this package is used internally + if not find_spec("db_dtypes"): + raise ImportError("db_dtypes module not found") + + def convert_to_dataframe(obj: RowIterator) -> bytes: + dataframe = obj.to_dataframe() + return serialize_dataframe(dataframe) + + def convert_from_dataframe(blob: bytes) -> DataFrame: + dataframe = deserialize_dataframe(blob) + return dataframe + + recursive_serde_register( + RowIterator, + serialize=convert_to_dataframe, + deserialize=convert_from_dataframe, + canonical_name="bigquery_rowiterator", + version=1, + ) + + recursive_serde_register( + QueryJob, + serialize=lambda obj: convert_to_dataframe(obj.result()), + deserialize=convert_from_dataframe, + canonical_name="bigquery_queryjob", + version=1, + ) +except ImportError: + pass diff --git a/packages/syft/src/syft/serde/util.py b/packages/syft/src/syft/serde/util.py new file mode 100644 index 00000000000..af0eb101924 --- /dev/null +++ b/packages/syft/src/syft/serde/util.py @@ -0,0 +1,18 @@ +# stdlib +from sys import platform + +# third party +from capnp.lib.capnp import _DynamicStructBuilder + + +def get_size(thing: _DynamicStructBuilder | int) -> int: + if isinstance(thing, int): + return thing + return thing.total_size.word_count + + +def compatible_with_large_file_writes_capnp(thing: _DynamicStructBuilder | int) -> bool: + if platform in ["darwin", "win32"]: + return False + else: + return get_size(thing) > 50000000 # roughly 0.5GB diff --git a/packages/syft/src/syft/server/__init__.py b/packages/syft/src/syft/server/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/node/credentials.py b/packages/syft/src/syft/server/credentials.py similarity index 84% rename from packages/syft/src/syft/node/credentials.py rename to packages/syft/src/syft/server/credentials.py index dc75bc20811..f4615f083e8 100644 --- a/packages/syft/src/syft/node/credentials.py +++ b/packages/syft/src/syft/server/credentials.py @@ -2,6 +2,7 @@ from __future__ import annotations # stdlib +import hashlib from typing import Any # third party @@ -14,10 +15,10 @@ from ..serde.serializable import serializable from ..types.base import SyftBaseModel -SIGNING_KEY_FOR = "SigningKey for" +SIGNING_KEY_FOR = "Corresponding Public Key" -@serializable() +@serializable(canonical_name="SyftVerifyKey", version=1) class SyftVerifyKey(SyftBaseModel): verify_key: VerifyKey @@ -49,7 +50,7 @@ def __hash__(self) -> int: return hash(self.verify_key) -@serializable() +@serializable(canonical_name="SyftSigningKey", version=1) class SyftSigningKey(SyftBaseModel): signing_key: SigningKey @@ -58,6 +59,9 @@ class SyftSigningKey(SyftBaseModel): def make_signing_key(cls, v: Any) -> Any: return SigningKey(bytes.fromhex(v)) if isinstance(v, str) else v + def deterministic_hash(self) -> str: + return hashlib.sha256(self.signing_key._seed).hexdigest() + @property def verify_key(self) -> SyftVerifyKey: return SyftVerifyKey(verify_key=self.signing_key.verify_key) @@ -76,6 +80,11 @@ def from_string(key_str: str) -> SyftSigningKey: def __repr__(self) -> str: return f"<{SIGNING_KEY_FOR}: {self.verify}>" + def _coll_repr_(self) -> dict[str, str]: + return { + SIGNING_KEY_FOR: self.verify, + } + @property def verify(self) -> str: return str(self.verify_key) @@ -92,7 +101,7 @@ def __eq__(self, other: Any) -> bool: SyftCredentials = SyftVerifyKey | SyftSigningKey -@serializable() +@serializable(canonical_name="UserLoginCredentials", version=1) class UserLoginCredentials(SyftBaseModel): email: str password: str diff --git a/packages/syft/src/syft/server/datasite.py b/packages/syft/src/syft/server/datasite.py new file mode 100644 index 00000000000..97fe976721a --- /dev/null +++ b/packages/syft/src/syft/server/datasite.py @@ -0,0 +1,8 @@ +# relative +from ..serde.serializable import serializable +from .server import Server + + +@serializable(without=["queue_manager"], canonical_name="Datasite", version=1) +class Datasite(Server): + pass diff --git a/packages/syft/src/syft/server/enclave.py b/packages/syft/src/syft/server/enclave.py new file mode 100644 index 00000000000..919916c4581 --- /dev/null +++ b/packages/syft/src/syft/server/enclave.py @@ -0,0 +1,11 @@ +# relative +from ..abstract_server import ServerType +from ..serde.serializable import serializable +from .server import Server + + +@serializable(canonical_name="Enclave", version=1) +class Enclave(Server): + def post_init(self) -> None: + self.server_type = ServerType.ENCLAVE + super().post_init() diff --git a/packages/syft/src/syft/server/env.py b/packages/syft/src/syft/server/env.py new file mode 100644 index 00000000000..c101f05bad1 --- /dev/null +++ b/packages/syft/src/syft/server/env.py @@ -0,0 +1,120 @@ +# stdlib +import json +import subprocess # nosec +import sys + +# relative +from ..service.worker.utils import DEFAULT_WORKER_POOL_NAME +from ..types.uid import UID +from ..util.util import get_env +from ..util.util import str_to_bool + +SERVER_PRIVATE_KEY = "SERVER_PRIVATE_KEY" +SERVER_UID = "SERVER_UID" +SERVER_TYPE = "SERVER_TYPE" +SERVER_NAME = "SERVER_NAME" +SERVER_SIDE_TYPE = "SERVER_SIDE_TYPE" + +DEFAULT_ROOT_EMAIL = "DEFAULT_ROOT_EMAIL" +DEFAULT_ROOT_USERNAME = "DEFAULT_ROOT_USERNAME" +DEFAULT_ROOT_PASSWORD = "DEFAULT_ROOT_PASSWORD" # nosec + + +def get_private_key_env() -> str | None: + return get_env(SERVER_PRIVATE_KEY) + + +def get_server_type() -> str | None: + return get_env(SERVER_TYPE, "datasite") + + +def get_server_name() -> str | None: + return get_env(SERVER_NAME, None) + + +def get_server_side_type() -> str | None: + return get_env(SERVER_SIDE_TYPE, "high") + + +def get_server_uid_env() -> str | None: + return get_env(SERVER_UID) + + +def get_default_root_email() -> str | None: + return get_env(DEFAULT_ROOT_EMAIL, "info@openmined.org") + + +def get_default_root_username() -> str | None: + return get_env(DEFAULT_ROOT_USERNAME, "Jane Doe") + + +def get_default_root_password() -> str | None: + return get_env(DEFAULT_ROOT_PASSWORD, "changethis") # nosec + + +def get_enable_warnings() -> bool: + return str_to_bool(get_env("ENABLE_WARNINGS", "False")) + + +def get_container_host() -> str | None: + return get_env("CONTAINER_HOST") + + +def get_default_worker_image() -> str | None: + return get_env("DEFAULT_WORKER_POOL_IMAGE") + + +def get_default_worker_pool_name() -> str | None: + return get_env("DEFAULT_WORKER_POOL_NAME", DEFAULT_WORKER_POOL_NAME) + + +def get_default_bucket_name() -> str: + env = get_env("DEFAULT_BUCKET_NAME") + server_id = get_server_uid_env() or "syft-bucket" + return env or server_id or "syft-bucket" + + +def get_default_worker_pool_pod_annotations() -> dict[str, str] | None: + annotations = get_env("DEFAULT_WORKER_POOL_POD_ANNOTATIONS", "null") + return json.loads(annotations) + + +def get_default_worker_pool_pod_labels() -> dict[str, str] | None: + labels = get_env("DEFAULT_WORKER_POOL_POD_LABELS", "null") + return json.loads(labels) + + +def in_kubernetes() -> bool: + return get_container_host() == "k8s" + + +def get_venv_packages() -> str: + try: + # subprocess call is safe because it uses a fully qualified path and fixed arguments + result = subprocess.run( + [sys.executable, "-m", "pip", "list", "--format=freeze"], # nosec + capture_output=True, + check=True, + text=True, + ) + return result.stdout + except subprocess.CalledProcessError as e: + return f"An error occurred: {e.stderr}" + + +def get_syft_worker() -> bool: + return str_to_bool(get_env("SYFT_WORKER", "false")) + + +def get_k8s_pod_name() -> str | None: + return get_env("K8S_POD_NAME") + + +def get_syft_worker_uid() -> str | None: + is_worker = get_syft_worker() + pod_name = get_k8s_pod_name() + uid = get_env("SYFT_WORKER_UID") + # if uid is empty is a K8S worker, generate a uid from the pod name + if (not uid) and is_worker and pod_name: + uid = str(UID.with_seed(pod_name)) + return uid diff --git a/packages/syft/src/syft/server/gateway.py b/packages/syft/src/syft/server/gateway.py new file mode 100644 index 00000000000..47e7b1c2475 --- /dev/null +++ b/packages/syft/src/syft/server/gateway.py @@ -0,0 +1,11 @@ +# relative +from ..abstract_server import ServerType +from ..serde.serializable import serializable +from .server import Server + + +@serializable(canonical_name="Gateway", version=1) +class Gateway(Server): + def post_init(self) -> None: + self.server_type = ServerType.GATEWAY + super().post_init() diff --git a/packages/syft/src/syft/server/routes.py b/packages/syft/src/syft/server/routes.py new file mode 100644 index 00000000000..a492e999f8e --- /dev/null +++ b/packages/syft/src/syft/server/routes.py @@ -0,0 +1,275 @@ +# stdlib +import base64 +import binascii +from collections.abc import AsyncGenerator +import logging +from typing import Annotated + +# third party +from fastapi import APIRouter +from fastapi import Body +from fastapi import Depends +from fastapi import HTTPException +from fastapi import Request +from fastapi import Response +from fastapi.responses import JSONResponse +from fastapi.responses import StreamingResponse +from pydantic import ValidationError +import requests + +# relative +from ..abstract_server import AbstractServer +from ..client.connection import ServerConnection +from ..protocol.data_protocol import PROTOCOL_TYPE +from ..serde.deserialize import _deserialize as deserialize +from ..serde.serialize import _serialize as serialize +from ..service.context import ServerServiceContext +from ..service.context import UnauthedServiceContext +from ..service.metadata.server_metadata import ServerMetadataJSON +from ..service.response import SyftError +from ..service.user.user import UserCreate +from ..service.user.user import UserPrivateKey +from ..service.user.user_service import UserService +from ..types.errors import SyftException +from ..types.uid import UID +from .credentials import SyftVerifyKey +from .credentials import UserLoginCredentials +from .worker import Worker + +logger = logging.getLogger(__name__) + + +def make_routes(worker: Worker) -> APIRouter: + router = APIRouter() + + async def get_body(request: Request) -> bytes: + return await request.body() + + def _get_server_connection(peer_uid: UID) -> ServerConnection: + # relative + from ..service.network.server_peer import route_to_connection + + peer = worker.network.stash.get_by_uid(worker.verify_key, peer_uid).unwrap() + peer_server_route = peer.pick_highest_priority_route() + connection = route_to_connection(route=peer_server_route) + return connection + + @router.get("/stream/{peer_uid}/{url_path}/", name="stream") + async def stream_download(peer_uid: str, url_path: str) -> StreamingResponse: + try: + url_path_parsed = base64.urlsafe_b64decode(url_path.encode()).decode() + except binascii.Error: + raise HTTPException(404, "Invalid `url_path`.") + + peer_uid_parsed = UID.from_string(peer_uid) + + try: + peer_connection = _get_server_connection(peer_uid_parsed) + url = peer_connection.to_blob_route(url_path_parsed) + stream_response = peer_connection._make_get(url.path, stream=True) + except requests.RequestException: + raise HTTPException(404, "Failed to retrieve data from datasite.") + + return StreamingResponse(stream_response, media_type="text/event-stream") + + async def read_request_body_in_chunks( + request: Request, + ) -> AsyncGenerator[bytes, None]: + async for chunk in request.stream(): + yield chunk + + @router.put("/stream/{peer_uid}/{url_path}/", name="stream") + async def stream_upload(peer_uid: str, url_path: str, request: Request) -> Response: + try: + url_path_parsed = base64.urlsafe_b64decode(url_path.encode()).decode() + except binascii.Error: + raise HTTPException(404, "Invalid `url_path`.") + + data = await request.body() + + peer_uid_parsed = UID.from_string(peer_uid) + + try: + peer_connection = _get_server_connection(peer_uid_parsed) + url = peer_connection.to_blob_route(url_path_parsed) + + print("Url on stream", url.path) + response = peer_connection._make_put(url.path, data=data, stream=True) + except requests.RequestException: + raise HTTPException(404, "Failed to upload data to datasite") + + return Response( + content=response.content, + headers=response.headers, + media_type="application/octet-stream", + ) + + @router.get( + "/", + name="healthcheck", + status_code=200, + response_class=JSONResponse, + ) + def root() -> dict[str, str]: + """ + Currently, all service backends must satisfy either of the following requirements to + pass the HTTP health checks sent to it from the GCE loadbalancer: 1. Respond with a + 200 on '/'. The content does not matter. 2. Expose an arbitrary url as a readiness + probe on the pods backing the Service. + """ + return {"status": "ok"} + + # provide information about the server in JSON + @router.get("/metadata", response_class=JSONResponse) + def syft_metadata() -> JSONResponse: + return worker.metadata.to(ServerMetadataJSON) + + @router.get("/metadata_capnp") + def syft_metadata_capnp() -> Response: + result = worker.metadata + return Response( + serialize(result, to_bytes=True), + media_type="application/octet-stream", + ) + + def handle_syft_new_api( + user_verify_key: SyftVerifyKey, communication_protocol: PROTOCOL_TYPE + ) -> Response: + return Response( + serialize( + worker.get_api(user_verify_key, communication_protocol), to_bytes=True + ), + media_type="application/octet-stream", + ) + + # get the SyftAPI object + @router.get("/api") + def syft_new_api( + request: Request, verify_key: str, communication_protocol: PROTOCOL_TYPE + ) -> Response: + user_verify_key: SyftVerifyKey = SyftVerifyKey.from_string(verify_key) + return handle_syft_new_api(user_verify_key, communication_protocol) + + def handle_new_api_call(data: bytes) -> Response: + obj_msg = deserialize(blob=data, from_bytes=True) + result = worker.handle_api_call(api_call=obj_msg) + return Response( + serialize(result, to_bytes=True), + media_type="application/octet-stream", + ) + + # make a request to the SyftAPI + @router.post("/api_call") + def syft_new_api_call( + request: Request, data: Annotated[bytes, Depends(get_body)] + ) -> Response: + return handle_new_api_call(data) + + def handle_forgot_password(email: str, server: AbstractServer) -> Response: + try: + context = UnauthedServiceContext(server=server) + result = server.services.user.forgot_password(context=context, email=email) + except SyftException as e: + result = SyftError.from_public_exception(e) + + if isinstance(result, SyftError): + logger.debug(f"Forgot Password Error: {result.message}. user={email}") + + return Response( + serialize(result, to_bytes=True), + media_type="application/octet-stream", + ) + + def handle_reset_password( + token: str, new_password: str, server: AbstractServer + ) -> Response: + try: + context = UnauthedServiceContext(server=server) + result = server.services.user.reset_password( + context=context, token=token, new_password=new_password + ) + except SyftException as e: + result = SyftError.from_public_exception(e) + + if isinstance(result, SyftError): + logger.debug(f"Reset Password Error: {result.message}. token={token}") + + return Response( + serialize(result, to_bytes=True), + media_type="application/octet-stream", + ) + + def handle_login(email: str, password: str, server: AbstractServer) -> Response: + try: + login_credentials = UserLoginCredentials(email=email, password=password) + except ValidationError as e: + return {"Error": e.json()} + + context = UnauthedServiceContext( + server=server, login_credentials=login_credentials + ) + try: + result = server.services.user.exchange_credentials(context=context).value + if not isinstance(result, UserPrivateKey): + response = SyftError(message=f"Incorrect return type: {type(result)}") + else: + response = result + except SyftException as e: + logger.error(f"Login Error: {e}. user={email}") + response = SyftError(message=f"{e.public_message}") + + return Response( + serialize(response, to_bytes=True), + media_type="application/octet-stream", + ) + + def handle_register(data: bytes, server: AbstractServer) -> Response: + user_create = deserialize(data, from_bytes=True) + + if not isinstance(user_create, UserCreate): + raise Exception(f"Incorrect type received: {user_create}") + + context = ServerServiceContext(server=server) + method = server.get_method_with_context(UserService.register, context) + + try: + response = method(new_user=user_create) + except SyftException as e: + logger.error(f"Register Error: {e}. user={user_create.model_dump()}") + response = SyftError(message=f"{e.public_message}") + + return Response( + serialize(response, to_bytes=True), + media_type="application/octet-stream", + ) + + # exchange email and password for a SyftSigningKey + @router.post("/login", name="login", status_code=200) + def login( + request: Request, + email: Annotated[str, Body(example="info@openmined.org")], + password: Annotated[str, Body(example="changethis")], + ) -> Response: + return handle_login(email, password, worker) + + @router.post("/reset_password", name="reset_password", status_code=200) + def reset_password( + request: Request, + token: Annotated[str, Body(...)], + new_password: Annotated[str, Body(...)], + ) -> Response: + return handle_reset_password(token, new_password, worker) + + @router.post("/forgot_password", name="forgot_password", status_code=200) + def forgot_password( + request: Request, email: str = Body(..., embed=True) + ) -> Response: + return handle_forgot_password(email, worker) + + @router.post("/register", name="register", status_code=200) + def register( + request: Request, data: Annotated[bytes, Depends(get_body)] + ) -> Response: + return handle_register(data, worker) + + return router diff --git a/packages/syft/src/syft/server/run.py b/packages/syft/src/syft/server/run.py new file mode 100644 index 00000000000..3be0fdec341 --- /dev/null +++ b/packages/syft/src/syft/server/run.py @@ -0,0 +1,90 @@ +# stdlib +import argparse + +# relative +from ..orchestra import Orchestra +from ..orchestra import ServerHandle + + +def str_to_bool(bool_str: str | None) -> bool: + result = False + bool_str = str(bool_str).lower() + if bool_str == "true" or bool_str == "1": + result = True + return result + + +def run() -> ServerHandle | None: + parser = argparse.ArgumentParser() + parser.add_argument("command", help="command: launch", type=str, default="none") + parser.add_argument( + "--name", help="server name", type=str, default="syft-server", dest="name" + ) + parser.add_argument( + "--server-type", + help="server type", + type=str, + default="datasite", + dest="server_type", + ) + parser.add_argument( + "--host", + help="host for binding", + type=str, + default="0.0.0.0", # nosec + dest="host", + ) + + parser.add_argument( + "--port", help="port for binding", type=int, default=8080, dest="port" + ) + parser.add_argument( + "--dev-mode", + help="developer mode", + type=str, + default="True", + dest="dev_mode", + ) + parser.add_argument( + "--reset", + help="reset", + type=str, + default="True", + dest="reset", + ) + parser.add_argument( + "--processes", + help="processing mode", + type=int, + default=0, + dest="processes", + ) + parser.add_argument( + "--tail", + help="tail mode", + type=str, + default="True", + dest="tail", + ) + + args = parser.parse_args() + if args.command != "launch": + print("syft launch is the only command currently supported") + + args.dev_mode = str_to_bool(args.dev_mode) + args.reset = str_to_bool(args.reset) + args.tail = str_to_bool(args.tail) + + server = Orchestra.launch( + name=args.name, + server_type=args.server_type, + host=args.host, + port=args.port, + dev_mode=args.dev_mode, + reset=args.reset, + processes=args.processes, + tail=args.tail, + ) + if not args.tail: + return server + return None diff --git a/packages/syft/src/syft/server/server.py b/packages/syft/src/syft/server/server.py new file mode 100644 index 00000000000..5d3a9deb075 --- /dev/null +++ b/packages/syft/src/syft/server/server.py @@ -0,0 +1,1759 @@ +# futureserver.py +# future +from __future__ import annotations + +# stdlib +from collections import OrderedDict +from collections.abc import Callable +from datetime import MINYEAR +from datetime import datetime +from datetime import timezone +from functools import partial +import hashlib +import logging +import os +from pathlib import Path +import threading +from time import sleep +import traceback +from typing import Any +from typing import TypeVar +from typing import cast + +# third party +from nacl.signing import SigningKey + +# relative +from .. import __version__ +from ..abstract_server import AbstractServer +from ..abstract_server import ServerSideType +from ..abstract_server import ServerType +from ..client.api import SignedSyftAPICall +from ..client.api import SyftAPI +from ..client.api import SyftAPICall +from ..client.api import SyftAPIData +from ..client.api import debox_signed_syftapicall_response +from ..client.client import SyftClient +from ..deployment_type import DeploymentType +from ..protocol.data_protocol import PROTOCOL_TYPE +from ..protocol.data_protocol import get_data_protocol +from ..service.action.action_object import Action +from ..service.action.action_object import ActionObject +from ..service.code.user_code_stash import UserCodeStash +from ..service.context import AuthedServiceContext +from ..service.context import ServerServiceContext +from ..service.context import UnauthedServiceContext +from ..service.context import UserLoginCredentials +from ..service.job.job_stash import Job +from ..service.job.job_stash import JobStash +from ..service.job.job_stash import JobStatus +from ..service.job.job_stash import JobType +from ..service.metadata.server_metadata import ServerMetadata +from ..service.network.utils import PeerHealthCheckTask +from ..service.notifier.notifier_service import NotifierService +from ..service.output.output_service import OutputStash +from ..service.queue.base_queue import AbstractMessageHandler +from ..service.queue.base_queue import QueueConsumer +from ..service.queue.base_queue import QueueProducer +from ..service.queue.queue import APICallMessageHandler +from ..service.queue.queue import ConsumerType +from ..service.queue.queue import QueueManager +from ..service.queue.queue_stash import APIEndpointQueueItem +from ..service.queue.queue_stash import ActionQueueItem +from ..service.queue.queue_stash import QueueItem +from ..service.queue.queue_stash import QueueStash +from ..service.queue.zmq_client import QueueConfig +from ..service.queue.zmq_client import ZMQClientConfig +from ..service.queue.zmq_client import ZMQQueueConfig +from ..service.response import SyftError +from ..service.response import SyftSuccess +from ..service.service import AbstractService +from ..service.service import ServiceConfigRegistry +from ..service.service import UserServiceConfigRegistry +from ..service.settings.settings import ServerSettings +from ..service.settings.settings import ServerSettingsUpdate +from ..service.user.user import UserView +from ..service.user.user_roles import ServiceRole +from ..service.user.utils import create_root_admin_if_not_exists +from ..service.worker.utils import DEFAULT_WORKER_IMAGE_TAG +from ..service.worker.utils import DEFAULT_WORKER_POOL_NAME +from ..service.worker.utils import create_default_image +from ..service.worker.worker_pool import WorkerPool +from ..service.worker.worker_pool_service import SyftWorkerPoolService +from ..service.worker.worker_pool_stash import SyftWorkerPoolStash +from ..service.worker.worker_stash import WorkerStash +from ..store.blob_storage import BlobStorageConfig +from ..store.blob_storage.on_disk import OnDiskBlobStorageClientConfig +from ..store.blob_storage.on_disk import OnDiskBlobStorageConfig +from ..store.blob_storage.seaweedfs import SeaweedFSBlobDeposit +from ..store.db.db import DBConfig +from ..store.db.db import DBManager +from ..store.db.postgres import PostgresDBConfig +from ..store.db.postgres import PostgresDBManager +from ..store.db.sqlite import SQLiteDBConfig +from ..store.db.sqlite import SQLiteDBManager +from ..store.db.stash import ObjectStash +from ..store.document_store_errors import NotFoundException +from ..store.document_store_errors import StashException +from ..store.linked_obj import LinkedObject +from ..types.datetime import DATETIME_FORMAT +from ..types.errors import SyftException +from ..types.result import Result +from ..types.result import as_result +from ..types.syft_metaclass import Empty +from ..types.syft_object import Context +from ..types.syft_object import PartialSyftObject +from ..types.syft_object import SYFT_OBJECT_VERSION_1 +from ..types.syft_object import SyftObject +from ..types.uid import UID +from ..util.experimental_flags import flags +from ..util.telemetry import instrument +from ..util.util import get_dev_mode +from ..util.util import get_env +from ..util.util import get_queue_address +from ..util.util import random_name +from ..util.util import thread_ident +from .credentials import SyftSigningKey +from .credentials import SyftVerifyKey +from .env import get_default_root_email +from .env import get_default_root_password +from .env import get_default_root_username +from .env import get_default_worker_image +from .env import get_default_worker_pool_name +from .env import get_default_worker_pool_pod_annotations +from .env import get_default_worker_pool_pod_labels +from .env import get_private_key_env +from .env import get_server_uid_env +from .env import get_syft_worker_uid +from .env import in_kubernetes +from .service_registry import ServiceRegistry +from .utils import get_named_server_uid +from .utils import get_temp_dir_for_server +from .utils import remove_temp_dir_for_server +from .worker_settings import WorkerSettings + +logger = logging.getLogger(__name__) + +SyftT = TypeVar("SyftT", bound=SyftObject) + +# if user code needs to be serded and its not available we can call this to refresh +# the code for a specific server UID and thread +CODE_RELOADER: dict[int, Callable] = {} + + +def get_default_worker_pool_count(server: Server) -> int: + return int( + get_env( + "DEFAULT_WORKER_POOL_COUNT", server.queue_config.client_config.n_consumers + ) + ) + + +signing_key_env = get_private_key_env() +server_uid_env = get_server_uid_env() + +default_root_email = get_default_root_email() +default_root_username = get_default_root_username() +default_root_password = get_default_root_password() + + +class AuthServerContextRegistry: + __server_context_registry__: dict[str, ServerServiceContext] = OrderedDict() + + @classmethod + def set_server_context( + cls, + server_uid: UID | str, + context: ServerServiceContext, + user_verify_key: SyftVerifyKey | str, + ) -> None: + if isinstance(server_uid, str): + server_uid = UID.from_string(server_uid) + + if isinstance(user_verify_key, str): + user_verify_key = SyftVerifyKey.from_string(user_verify_key) + + key = cls._get_key(server_uid=server_uid, user_verify_key=user_verify_key) + + cls.__server_context_registry__[key] = context + + @staticmethod + def _get_key(server_uid: UID, user_verify_key: SyftVerifyKey) -> str: + return "-".join(str(x) for x in (server_uid, user_verify_key)) + + @classmethod + def auth_context_for_user( + cls, + server_uid: UID, + user_verify_key: SyftVerifyKey, + ) -> AuthedServiceContext | None: + key = cls._get_key(server_uid=server_uid, user_verify_key=user_verify_key) + return cls.__server_context_registry__.get(key) + + +class Server(AbstractServer): + signing_key: SyftSigningKey | None + required_signed_calls: bool = True + packages: str + + def __init__( + self, + *, # Trasterisk + name: str | None = None, + id: UID | None = None, + signing_key: SyftSigningKey | SigningKey | None = None, + db_config: DBConfig | None = None, + root_email: str | None = default_root_email, + root_username: str | None = default_root_username, + root_password: str | None = default_root_password, + processes: int = 0, + is_subprocess: bool = False, + server_type: str | ServerType = ServerType.DATASITE, + deployment_type: str | DeploymentType = "remote", + reset: bool = False, + blob_storage_config: BlobStorageConfig | None = None, + queue_config: QueueConfig | None = None, + queue_port: int | None = None, + n_consumers: int = 0, + create_producer: bool = False, + thread_workers: bool = False, + server_side_type: str | ServerSideType = ServerSideType.HIGH_SIDE, + enable_warnings: bool = False, + dev_mode: bool = False, + migrate: bool = False, + in_memory_workers: bool = True, + log_level: int | None = None, + smtp_username: str | None = None, + smtp_password: str | None = None, + email_sender: str | None = None, + smtp_port: int | None = None, + smtp_host: str | None = None, + association_request_auto_approval: bool = False, + background_tasks: bool = False, + consumer_type: ConsumerType | None = None, + db_url: str | None = None, + ): + # 🟡 TODO 22: change our ENV variable format and default init args to make this + # less horrible or add some convenience functions + self.dev_mode = dev_mode or get_dev_mode() + self.id = UID.from_string(server_uid_env) if server_uid_env else (id or UID()) + self.packages = "" + self.processes = processes + self.is_subprocess = is_subprocess + self.name = name or random_name() + self.enable_warnings = enable_warnings + self.in_memory_workers = in_memory_workers + self.server_type = ServerType(server_type) + self.server_side_type = ServerSideType(server_side_type) + self.client_cache: dict = {} + self.peer_client_cache: dict = {} + self._settings = None + + if isinstance(server_type, str): + server_type = ServerType(server_type) + + self.server_type = server_type + + if isinstance(deployment_type, str): + deployment_type = DeploymentType(deployment_type) + self.deployment_type = deployment_type + + # do this after we set the deployment type + self.set_log_level(log_level) + + if isinstance(server_side_type, str): + server_side_type = ServerSideType(server_side_type) + self.server_side_type = server_side_type + + skey = None + if signing_key_env: + skey = SyftSigningKey.from_string(signing_key_env) + elif isinstance(signing_key, SigningKey): + skey = SyftSigningKey(signing_key=signing_key) + else: + skey = signing_key + self.signing_key = skey or SyftSigningKey.generate() + + logger.critical( + f"Hash of the signing key '{self.signing_key.deterministic_hash()[:5]}...'" + ) + + self.association_request_auto_approval = association_request_auto_approval + + consumer_type = ( + consumer_type or ConsumerType.Thread + if thread_workers + else ConsumerType.Process + ) + self.queue_config = self.create_queue_config( + n_consumers=n_consumers, + create_producer=create_producer, + consumer_type=consumer_type, + queue_port=queue_port, + queue_config=queue_config, + ) + + # must call before initializing stores + if reset: + self.remove_temp_dir() + + db_config = DBConfig.from_connection_string(db_url) if db_url else db_config + + if db_config is None: + db_config = SQLiteDBConfig( + filename=f"{self.id}_json.db", + path=self.get_temp_dir("db"), + ) + + self.db_config = db_config + + self.db = self.init_stores(db_config=self.db_config) + + # construct services only after init stores + self.services: ServiceRegistry = ServiceRegistry.for_server(self) + self.db.init_tables(reset=reset) + self.action_store = self.services.action.stash + + create_root_admin_if_not_exists( + name=root_username, + email=root_email, + password=root_password, # nosec + server=self, + ) + + NotifierService.init_notifier( + server=self, + email_password=smtp_password, + email_username=smtp_username, + email_sender=email_sender, + smtp_port=smtp_port, + smtp_host=smtp_host, + ).unwrap() + + self.post_init() + + if migrate: + self.find_and_migrate_data() + else: + self.find_and_migrate_data([ServerSettings]) + + self.create_initial_settings(admin_email=root_email).unwrap() + + self.init_blob_storage(config=blob_storage_config) + + # Migrate data before any operation on db + + # first migrate, for backwards compatibility + self.init_queue_manager(queue_config=self.queue_config) + + context = AuthedServiceContext( + server=self, + credentials=self.verify_key, + role=ServiceRole.ADMIN, + ) + + self.peer_health_manager: PeerHealthCheckTask | None = None + if background_tasks: + self.run_peer_health_checks(context=context) + + ServerRegistry.set_server_for(self.id, self) + if background_tasks: + email_dispatcher = threading.Thread( + target=self.email_notification_dispatcher, daemon=True + ) + email_dispatcher.start() + + def email_notification_dispatcher(self) -> None: + lock = threading.Lock() + while True: + # Use admin context to have access to the notifier obj + context = AuthedServiceContext( + server=self, + credentials=self.verify_key, + role=ServiceRole.ADMIN, + ) + # Get notitifer settings + notifier_settings = self.services.notifier.settings( + context=context + ).unwrap() + lock.acquire() + # Iterate over email_types and its queues + # Ex: {'EmailRequest': {VerifyKey: [], VerifyKey: [], ...}} + for email_template, email_queue in notifier_settings.email_queue.items(): + # Get the email frequency of that specific email type + email_frequency = notifier_settings.email_frequency[email_template] + for verify_key, queue in email_queue.items(): + if self.services.notifier.is_time_to_dispatch( + email_frequency, datetime.now(timezone.utc) + ): + notifier_settings.send_batched_notification( + context=context, notification_queue=queue + ).unwrap() + notifier_settings.email_queue[email_template][verify_key] = [] + self.services.notifier.stash.update( + credentials=self.verify_key, obj=notifier_settings + ).unwrap() + lock.release() + sleep(15) + + def set_log_level(self, log_level: int | str | None) -> None: + def determine_log_level( + log_level: str | int | None, default: int + ) -> int | None: + if log_level is None: + return default + if isinstance(log_level, str): + level = logging.getLevelName(log_level.upper()) + if isinstance(level, str) and level.startswith("Level "): + level = logging.INFO # defaults to info otherwise + return level # type: ignore + return log_level + + default = logging.CRITICAL + if self.deployment_type == DeploymentType.PYTHON: + default = logging.CRITICAL + elif self.dev_mode: # if real deployment and dev mode + default = logging.INFO + + self.log_level = determine_log_level(log_level, default) + + logging.getLogger().setLevel(self.log_level) + + if log_level == logging.DEBUG: + # only do this if specifically set, very noisy + logging.getLogger("uvicorn").setLevel(logging.DEBUG) + logging.getLogger("uvicorn.access").setLevel(logging.DEBUG) + else: + logging.getLogger("uvicorn").setLevel(logging.CRITICAL) + logging.getLogger("uvicorn.access").setLevel(logging.CRITICAL) + + @property + def runs_in_docker(self) -> bool: + path = "/proc/self/cgroup" + return ( + os.path.exists("/.dockerenv") + or os.path.isfile(path) + and any("docker" in line for line in open(path)) + ) + + def init_blob_storage(self, config: BlobStorageConfig | None = None) -> None: + if config is None: + client_config = OnDiskBlobStorageClientConfig( + base_directory=self.get_temp_dir("blob") + ) + config_ = OnDiskBlobStorageConfig( + client_config=client_config, + min_blob_size=os.getenv("MIN_SIZE_BLOB_STORAGE_MB", 1), + ) + else: + config_ = config + self.blob_store_config = config_ + self.blob_storage_client = config_.client_type(config=config_.client_config) + + # relative + from ..store.blob_storage.seaweedfs import SeaweedFSConfig + + if isinstance(config, SeaweedFSConfig) and self.signing_key: + remote_profiles = self.services.blob_storage.remote_profile_stash.get_all( + credentials=self.signing_key.verify_key, has_permission=True + ).unwrap() + for remote_profile in remote_profiles: + self.blob_store_config.client_config.remote_profiles[ + remote_profile.profile_name + ] = remote_profile + + if self.dev_mode: + if isinstance(self.blob_store_config, OnDiskBlobStorageConfig): + logger.debug( + f"Using on-disk blob storage with path: " + f"{self.blob_store_config.client_config.base_directory}", + ) + logger.debug( + f"Minimum object size to be saved to the blob storage: " + f"{self.blob_store_config.min_blob_size} (MB)." + ) + + def run_peer_health_checks(self, context: AuthedServiceContext) -> None: + self.peer_health_manager = PeerHealthCheckTask() + self.peer_health_manager.run(context=context) + + def stop(self) -> None: + if self.peer_health_manager is not None: + self.peer_health_manager.stop() + + for consumer_list in self.queue_manager.consumers.values(): + for c in consumer_list: + c.close() + for p in self.queue_manager.producers.values(): + p.close() + + self.queue_manager.producers.clear() + self.queue_manager.consumers.clear() + + ServerRegistry.remove_server(self.id) + + def close(self) -> None: + self.stop() + + def cleanup(self) -> None: + self.stop() + self.remove_temp_dir() + + def create_queue_config( + self, + n_consumers: int, + create_producer: bool, + consumer_type: ConsumerType, + queue_port: int | None, + queue_config: QueueConfig | None, + ) -> QueueConfig: + if queue_config: + queue_config_ = queue_config + elif queue_port is not None or n_consumers > 0 or create_producer: + if not create_producer and queue_port is None: + logger.warn("No queue port defined to bind consumers.") + + queue_config_ = ZMQQueueConfig( + client_config=ZMQClientConfig( + create_producer=create_producer, + queue_port=queue_port, + n_consumers=n_consumers, + ), + consumer_type=consumer_type, + ) + else: + queue_config_ = ZMQQueueConfig() + + return queue_config_ + + def init_queue_manager(self, queue_config: QueueConfig) -> None: + MessageHandlers = [APICallMessageHandler] + if self.is_subprocess: + return None + + self.queue_manager = QueueManager(config=queue_config) + for message_handler in MessageHandlers: + queue_name = message_handler.queue_name + # client config + if getattr(queue_config.client_config, "create_producer", True): + context = AuthedServiceContext( + server=self, + credentials=self.verify_key, + role=ServiceRole.ADMIN, + ) + producer: QueueProducer = self.queue_manager.create_producer( + queue_name=queue_name, + queue_stash=self.queue_stash, + context=context, + worker_stash=self.worker_stash, + ) + producer.run() + + address = producer.address + else: + port = queue_config.client_config.queue_port + if port is not None: + address = get_queue_address(port) + else: + address = None + + if address is None and queue_config.client_config.n_consumers > 0: + raise ValueError("address unknown for consumers") + + service_name = queue_config.client_config.consumer_service + + if not service_name: + # Create consumers for default worker pool + create_default_worker_pool(self) + else: + # Create consumer for given worker pool + syft_worker_uid = get_syft_worker_uid() + logger.info( + f"Running as consumer with uid={syft_worker_uid} service={service_name}" + ) + + if syft_worker_uid: + self.add_consumer_for_service( + service_name=service_name, + syft_worker_id=UID(syft_worker_uid), + address=address, + message_handler=message_handler, + ) + + if self.in_memory_workers: + self.start_in_memory_workers( + address=address, message_handler=message_handler + ) + + def start_in_memory_workers( + self, address: str, message_handler: type[AbstractMessageHandler] + ) -> None: + """Starts in-memory workers for the server.""" + + worker_pools = self.pool_stash.get_all(credentials=self.verify_key).unwrap() + for worker_pool in worker_pools: # type: ignore + # Skip the default worker pool + if worker_pool.name == DEFAULT_WORKER_POOL_NAME: + continue + + # Create consumers for each worker pool + for linked_worker in worker_pool.worker_list: + self.add_consumer_for_service( + service_name=worker_pool.name, + syft_worker_id=linked_worker.object_uid, + address=address, + message_handler=message_handler, + ) + + def add_consumer_for_service( + self, + service_name: str, + syft_worker_id: UID, + address: str, + message_handler: type[AbstractMessageHandler] = APICallMessageHandler, + ) -> None: + consumer: QueueConsumer = self.queue_manager.create_consumer( + message_handler, + address=address, + service_name=service_name, + worker_stash=self.worker_stash, # type: ignore + syft_worker_id=syft_worker_id, + ) + consumer.run() + + def remove_consumer_with_id(self, syft_worker_id: UID) -> None: + for consumers in self.queue_manager.consumers.values(): + # Grab the list of consumers for the given queue + consumer_to_pop = None + for consumer_idx, consumer in enumerate(consumers): + if consumer.syft_worker_id == syft_worker_id: + consumer.close() + consumer_to_pop = consumer_idx + break + if consumer_to_pop is not None: + consumers.pop(consumer_to_pop) + + @classmethod + def named( + cls: type[Server], + *, # Trasterisk + name: str, + processes: int = 0, + reset: bool = False, + server_type: str | ServerType = ServerType.DATASITE, + server_side_type: str | ServerSideType = ServerSideType.HIGH_SIDE, + deployment_type: str | DeploymentType = "remote", + enable_warnings: bool = False, + n_consumers: int = 0, + thread_workers: bool = False, + create_producer: bool = False, + queue_port: int | None = None, + dev_mode: bool = False, + migrate: bool = False, + in_memory_workers: bool = True, + association_request_auto_approval: bool = False, + background_tasks: bool = False, + consumer_type: ConsumerType | None = None, + db_url: str | None = None, + db_config: DBConfig | None = None, + log_level: int | None = None, + ) -> Server: + uid = get_named_server_uid(name) + name_hash = hashlib.sha256(name.encode("utf8")).digest() + key = SyftSigningKey(signing_key=SigningKey(name_hash)) + blob_storage_config = None + + server_type = ServerType(server_type) + server_side_type = ServerSideType(server_side_type) + + return cls( + name=name, + id=uid, + signing_key=key, + processes=processes, + server_type=server_type, + server_side_type=server_side_type, + deployment_type=deployment_type, + enable_warnings=enable_warnings, + blob_storage_config=blob_storage_config, + queue_port=queue_port, + n_consumers=n_consumers, + thread_workers=thread_workers, + create_producer=create_producer, + dev_mode=dev_mode, + migrate=migrate, + in_memory_workers=in_memory_workers, + reset=reset, + association_request_auto_approval=association_request_auto_approval, + background_tasks=background_tasks, + consumer_type=consumer_type, + db_url=db_url, + db_config=db_config, + log_level=log_level, + ) + + def is_root(self, credentials: SyftVerifyKey) -> bool: + return credentials == self.verify_key + + @property + def root_client(self) -> SyftClient: + # relative + from ..client.client import PythonConnection + + connection = PythonConnection(server=self) + client_type = connection.get_client_type().unwrap() + root_client = client_type(connection=connection, credentials=self.signing_key) + + if root_client.api.refresh_api_callback is not None: + root_client.api.refresh_api_callback() + + return root_client + + def _find_klasses_pending_for_migration( + self, object_types: list[SyftObject] + ) -> list[SyftObject]: + context = AuthedServiceContext( + server=self, + credentials=self.verify_key, + role=ServiceRole.ADMIN, + ) + migration_state_service = self.services.migration + + klasses_to_be_migrated = [] + + for object_type in object_types: + canonical_name = object_type.__canonical_name__ + object_version = object_type.__version__ + + try: + migration_state = migration_state_service.get_state( + context, canonical_name + ).unwrap() + if migration_state.current_version != migration_state.latest_version: + klasses_to_be_migrated.append(object_type) + except NotFoundException: + migration_state_service.register_migration_state( + context, + current_version=object_version, + canonical_name=canonical_name, + ) + + return klasses_to_be_migrated + + def find_and_migrate_data( + self, document_store_object_types: list[type[SyftObject]] | None = None + ) -> None: + context = AuthedServiceContext( + server=self, + credentials=self.verify_key, + role=ServiceRole.ADMIN, + ) + return self.services.migration.migrate_data( + context, document_store_object_types + ) + + @property + def guest_client(self) -> SyftClient: + return self.get_guest_client() + + @property + def current_protocol(self) -> str | int: + data_protocol = get_data_protocol() + return data_protocol.latest_version + + def get_guest_client(self, verbose: bool = True) -> SyftClient: + # relative + from ..client.client import PythonConnection + + connection = PythonConnection(server=self) + if verbose and self.server_side_type: + message: str = ( + f"Logged into <{self.name}: {self.server_side_type.value.capitalize()} " + ) + if self.server_type: + message += f"side {self.server_type.value.capitalize()} > as GUEST" + logger.debug(message) + + client_type = connection.get_client_type().unwrap() + + guest_client = client_type( + connection=connection, credentials=SyftSigningKey.generate() + ) + if guest_client.api.refresh_api_callback is not None: + guest_client.api.refresh_api_callback() + return guest_client + + def __repr__(self) -> str: + service_string = "" + if not self.is_subprocess: + services = [ + service.__class__.__name__ for service in self.initialized_services + ] + service_string = ", ".join(sorted(services)) + service_string = f"\n\nServices:\n{service_string}" + return f"{type(self).__name__}: {self.name} - {self.id} - {self.server_type}{service_string}" + + def post_init(self) -> None: + context = AuthedServiceContext( + server=self, credentials=self.verify_key, role=ServiceRole.ADMIN + ) + AuthServerContextRegistry.set_server_context( + server_uid=self.id, user_verify_key=self.verify_key, context=context + ) + + if "usercodeservice" in self.service_path_map: + self.services.user_code.load_user_code(context=context) + + def reload_user_code() -> None: + self.services.user_code.load_user_code(context=context) + + ti = thread_ident() + if ti is not None: + CODE_RELOADER[ti] = reload_user_code + + def init_stores(self, db_config: DBConfig) -> DBManager: + if isinstance(db_config, SQLiteDBConfig): + db = SQLiteDBManager( + config=db_config, + server_uid=self.id, + root_verify_key=self.verify_key, + ) + elif isinstance(db_config, PostgresDBConfig): + db = PostgresDBManager( # type: ignore + config=db_config, + server_uid=self.id, + root_verify_key=self.verify_key, + ) + else: + raise SyftException(public_message=f"Unsupported DB config: {db_config}") + + self.queue_stash = QueueStash(store=db) + + print(f"Using {db_config.__class__.__name__} and {db_config.connection_string}") + + return db + + @property + def job_stash(self) -> JobStash: + return self.services.job.stash + + @property + def output_stash(self) -> OutputStash: + return self.services.output.stash + + @property + def worker_stash(self) -> WorkerStash: + return self.services.worker.stash + + @property + def service_path_map(self) -> dict[str, AbstractService]: + return self.services.service_path_map + + @property + def initialized_services(self) -> list[AbstractService]: + return self.services.services + + def get_service_method(self, path_or_func: str | Callable) -> Callable: + if callable(path_or_func): + path_or_func = path_or_func.__qualname__ + return self._get_service_method_from_path(path_or_func) + + def get_service(self, path_or_func: str | Callable) -> AbstractService: + return self.services.get_service(path_or_func) + + @as_result(ValueError) + def get_stash(self, object_type: SyftT) -> ObjectStash[SyftT]: + if object_type not in self.services.stashes: + raise ValueError(f"Stash for {object_type} not found.") + return self.services.stashes[object_type] + + def _get_service_method_from_path(self, path: str) -> Callable: + path_list = path.split(".") + method_name = path_list.pop() + service_obj = self.services._get_service_from_path(path=path) + + return getattr(service_obj, method_name) + + def get_temp_dir(self, dir_name: str = "") -> Path: + """ + Get a temporary directory unique to the server. + Provide all dbs, blob dirs, and locks using this directory. + """ + return get_temp_dir_for_server(self.id, dir_name) + + def remove_temp_dir(self) -> None: + """ + Remove the temporary directory for this server. + """ + remove_temp_dir_for_server(self.id) + + def update_self(self, settings: ServerSettings) -> None: + updateable_attrs = ( + ServerSettingsUpdate.model_fields.keys() + - PartialSyftObject.model_fields.keys() + ) + for attr_name in updateable_attrs: + attr = getattr(settings, attr_name) + if attr is not Empty: + setattr(self, attr_name, attr) + + # NOTE: Some workflows currently expect the settings to be available, + # even though they might not be defined yet. Because of this, we need to check + # if the settings table is already defined. This function is basically a copy + # of the settings property but ignoring stash error in case settings doesn't exist yet. + # it should be removed once the settings are refactored and the inconsistencies between + # settings and services are resolved. + def get_settings(self) -> ServerSettings | None: + if self._settings: + return self._settings # type: ignore + if self.signing_key is None: + raise ValueError(f"{self} has no signing key") + + settings_stash = self.services.settings.stash + + try: + settings = settings_stash.get_all(self.signing_key.verify_key).unwrap() + + if len(settings) > 0: + setting = settings[0] + self.update_self(setting) + self._settings = setting + return setting + else: + return None + + except SyftException: + return None + + @property + def settings(self) -> ServerSettings: + if self.signing_key is None: + raise ValueError(f"{self} has no signing key") + + settings_stash = self.services.settings.stash + error_msg = f"Cannot get server settings for '{self.name}'" + + all_settings = settings_stash.get_all(self.signing_key.verify_key).unwrap( + public_message=error_msg + ) + + if len(all_settings) == 0: + raise SyftException(public_message=error_msg) + + settings = all_settings[0] + self.update_self(settings) + return settings + + @property + def metadata(self) -> ServerMetadata: + show_warnings = self.enable_warnings + settings_data = self.settings + name = settings_data.name + organization = settings_data.organization + description = settings_data.description + show_warnings = settings_data.show_warnings + server_type = ( + settings_data.server_type.value if settings_data.server_type else "" + ) + server_side_type = ( + settings_data.server_side_type.value + if settings_data.server_side_type + else "" + ) + eager_execution_enabled = settings_data.eager_execution_enabled + + return ServerMetadata( + name=name, + id=self.id, + verify_key=self.verify_key, + highest_version=SYFT_OBJECT_VERSION_1, + lowest_version=SYFT_OBJECT_VERSION_1, + syft_version=__version__, + description=description, + organization=organization, + server_type=server_type, + server_side_type=server_side_type, + show_warnings=show_warnings, + eager_execution_enabled=eager_execution_enabled, + min_size_blob_storage_mb=self.blob_store_config.min_blob_size, + ) + + @property + def icon(self) -> str: + return "🦾" + + @property + def verify_key(self) -> SyftVerifyKey: + if self.signing_key is None: + raise ValueError(f"{self} has no signing key") + return self.signing_key.verify_key + + def __hash__(self) -> int: + return hash(self.id) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, type(self)): + return False + + if self.id != other.id: + return False + + return True + + def await_future(self, credentials: SyftVerifyKey, uid: UID) -> QueueItem: + # stdlib + + # relative + from ..service.queue.queue import Status + + while True: + result = self.queue_stash.pop_on_complete(credentials, uid).unwrap() + if result.status == Status.COMPLETED: + return result + sleep(0.1) + + @instrument + def resolve_future(self, credentials: SyftVerifyKey, uid: UID) -> QueueItem: + queue_obj = self.queue_stash.pop_on_complete(credentials, uid).unwrap() + queue_obj._set_obj_location_( + server_uid=self.id, + credentials=credentials, + ) + return queue_obj + + @instrument + def forward_message( + self, api_call: SyftAPICall | SignedSyftAPICall + ) -> Result | QueueItem | SyftObject | Any: + server_uid = api_call.message.server_uid + if "networkservice" not in self.service_path_map: + raise SyftException( + public_message=( + "Server has no network service so we can't " + f"forward this message to {server_uid}" + ) + ) + + client = None + peer = self.services.network.stash.get_by_uid( + self.verify_key, server_uid + ).unwrap() + + # Since we have several routes to a peer + # we need to cache the client for a given server_uid along with the route + peer_cache_key = hash(server_uid) + hash(peer.pick_highest_priority_route()) + if peer_cache_key in self.peer_client_cache: + client = self.peer_client_cache[peer_cache_key] + else: + context = AuthedServiceContext( + server=self, credentials=api_call.credentials + ) + + client = peer.client_with_context(context=context).unwrap( + public_message=f"Failed to create remote client for peer: {peer.id}" + ) + self.peer_client_cache[peer_cache_key] = client + + if client: + message: SyftAPICall = api_call.message + if message.path == "metadata": + result = client.metadata + elif message.path == "login": + result = client.connection.login(**message.kwargs) + elif message.path == "register": + result = client.connection.register(**message.kwargs) + elif message.path == "api": + result = client.connection.get_api(**message.kwargs) + else: + signed_result = client.connection.make_call(api_call) + result = debox_signed_syftapicall_response( + signed_result=signed_result + ).unwrap() + + # relative + from ..store.blob_storage import BlobRetrievalByURL + + if isinstance(result, BlobRetrievalByURL | SeaweedFSBlobDeposit): + result.proxy_server_uid = peer.id + + return result + + raise SyftException(public_message=(f"Server has no route to {server_uid}")) + + def get_role_for_credentials(self, credentials: SyftVerifyKey) -> ServiceRole: + return self.services.user.get_role_for_credentials( + credentials=credentials + ).unwrap() + + @instrument + def handle_api_call( + self, + api_call: SyftAPICall | SignedSyftAPICall, + job_id: UID | None = None, + check_call_location: bool = True, + ) -> SignedSyftAPICall: + # Get the result + result = self.handle_api_call_with_unsigned_result( + api_call, job_id=job_id, check_call_location=check_call_location + ) + # Sign the result + signed_result = SyftAPIData(data=result).sign(self.signing_key) + + return signed_result + + def handle_api_call_with_unsigned_result( + self, + api_call: SyftAPICall | SignedSyftAPICall, + job_id: UID | None = None, + check_call_location: bool = True, + ) -> Result | QueueItem | SyftObject | SyftError: + if self.required_signed_calls and isinstance(api_call, SyftAPICall): + raise SyftException( + public_message=f"You sent a {type(api_call)}. This server requires SignedSyftAPICall." + ) + else: + if not api_call.is_valid: + raise SyftException(public_message="Your message signature is invalid") + + if api_call.message.server_uid != self.id and check_call_location: + return self.forward_message(api_call=api_call) + + if api_call.message.path == "queue": + return self.resolve_future( + credentials=api_call.credentials, uid=api_call.message.kwargs["uid"] + ) + + if api_call.message.path == "metadata": + return self.metadata + + result = None + is_blocking = api_call.message.blocking + + credentials: SyftVerifyKey = api_call.credentials + role = self.get_role_for_credentials(credentials=credentials) + context = AuthedServiceContext( + server=self, + credentials=credentials, + role=role, + job_id=job_id, + is_blocking_api_call=is_blocking, + ) + + if is_blocking or self.is_subprocess: + api_call = api_call.message + + role = self.get_role_for_credentials(credentials=credentials) + settings = self.get_settings() + # TODO: This instance check should be removed once we can ensure that + # self.settings will always return a ServerSettings object. + if ( + settings is not None + and isinstance(settings, ServerSettings) + and not settings.allow_guest_sessions + and role == ServiceRole.GUEST + ): + raise SyftException( + public_message="Server doesn't allow guest sessions." + ) + context = AuthedServiceContext( + server=self, + credentials=credentials, + role=role, + job_id=job_id, + is_blocking_api_call=is_blocking, + ) + + AuthServerContextRegistry.set_server_context(self.id, context, credentials) + + user_config_registry = UserServiceConfigRegistry.from_role(role) + + if api_call.path not in user_config_registry: + if ServiceConfigRegistry.path_exists(api_call.path): + raise SyftException( + public_message=f"As a `{role}`, " + f"you have no access to: {api_call.path}" + ) + else: + raise SyftException( + public_message=f"API call not in registered services: {api_call.path}" + ) + + _private_api_path = user_config_registry.private_path_for(api_call.path) + method = self.get_service_method(_private_api_path) + try: + logger.info(f"API Call: {api_call}") + + result = method(context, *api_call.args, **api_call.kwargs) + + if isinstance(result, SyftError): + raise TypeError( + "Don't return a SyftError, raise SyftException instead" + ) + if not isinstance(result, SyftSuccess): + result = SyftSuccess(message="", value=result) + result.add_warnings_from_context(context) + tb = None + except Exception as e: + include_traceback = ( + self.dev_mode or role.value >= ServiceRole.DATA_OWNER.value + ) + result = SyftError.from_exception( + context=context, exc=e, include_traceback=include_traceback + ) + if not include_traceback: + # then at least log it server side + if isinstance(e, SyftException): + tb = e.get_tb(context, overwrite_permission=True) + else: + tb = traceback.format_exc() + logger.debug( + f"Exception (hidden from DS) happened on the server side:\n{tb}" + ) + else: + try: + return self.add_api_call_to_queue(api_call) + except SyftException as e: + return SyftError.from_exception(context=context, exc=e) + except Exception: + result = SyftError( + message=f"Exception calling {api_call.path}. {traceback.format_exc()}" + ) + tb = traceback.format_exc() + if ( + isinstance(result, SyftError) + and role.value < ServiceRole.DATA_OWNER.value + ): + print(f"Exception (hidden from DS) happened on the server side:\n{tb}") + return result + + def add_api_endpoint_execution_to_queue( + self, + credentials: SyftVerifyKey, + method: str, + path: str, + log_id: UID, + *args: Any, + worker_pool_name: str | None = None, + **kwargs: Any, + ) -> Job: + job_id = UID() + task_uid = UID() + worker_settings = WorkerSettings.from_server(server=self) + + if worker_pool_name is None: + worker_pool_name = self.get_default_worker_pool().unwrap() + else: + worker_pool_name = self.get_worker_pool_by_name(worker_pool_name).unwrap() + + # Create a Worker pool reference object + worker_pool_ref = LinkedObject.from_obj( + worker_pool_name, + service_type=SyftWorkerPoolService, + server_uid=self.id, + ) + queue_item = APIEndpointQueueItem( + id=task_uid, + method=method, + server_uid=self.id, + syft_client_verify_key=credentials, + syft_server_location=self.id, + job_id=job_id, + worker_settings=worker_settings, + args=args, + kwargs={"path": path, "log_id": log_id, **kwargs}, + has_execute_permissions=True, + worker_pool=worker_pool_ref, # set worker pool reference as part of queue item + ) + + action = Action.from_api_endpoint_execution() + return self.add_queueitem_to_queue( + queue_item=queue_item, + credentials=credentials, + action=action, + job_type=JobType.TWINAPIJOB, + ).unwrap() + + def get_worker_pool_ref_by_name( + self, credentials: SyftVerifyKey, worker_pool_name: str | None = None + ) -> LinkedObject: + # If worker pool id is not set, then use default worker pool + # Else, get the worker pool for given uid + if worker_pool_name is None: + worker_pool = self.get_default_worker_pool().unwrap() + else: + worker_pool = self.pool_stash.get_by_name( + credentials, worker_pool_name + ).unwrap() + + # Create a Worker pool reference object + worker_pool_ref = LinkedObject.from_obj( + worker_pool, + service_type=SyftWorkerPoolService, + server_uid=self.id, + ) + return worker_pool_ref + + @instrument + @as_result(SyftException) + def add_action_to_queue( + self, + action: Action, + credentials: SyftVerifyKey, + parent_job_id: UID | None = None, + has_execute_permissions: bool = False, + worker_pool_name: str | None = None, + ) -> Job: + job_id = UID() + task_uid = UID() + worker_settings = WorkerSettings.from_server(server=self) + + # Extract worker pool id from user code + if action.user_code_id is not None: + user_code = self.user_code_stash.get_by_uid( + credentials=credentials, uid=action.user_code_id + ).unwrap() + if user_code is not None: + worker_pool_name = user_code.worker_pool_name + + worker_pool_ref = self.get_worker_pool_ref_by_name( + credentials, worker_pool_name + ) + queue_item = ActionQueueItem( + id=task_uid, + server_uid=self.id, + syft_client_verify_key=credentials, + syft_server_location=self.id, + job_id=job_id, + worker_settings=worker_settings, + args=[], + kwargs={"action": action}, + has_execute_permissions=has_execute_permissions, + worker_pool=worker_pool_ref, # set worker pool reference as part of queue item + ) + user_id = self.services.user.get_user_id_for_credentials(credentials).unwrap() + + return self.add_queueitem_to_queue( + queue_item=queue_item, + credentials=credentials, + action=action, + parent_job_id=parent_job_id, + user_id=user_id, + ).unwrap() + + @instrument + @as_result(SyftException) + def add_queueitem_to_queue( + self, + *, + queue_item: QueueItem, + credentials: SyftVerifyKey, + action: Action | None = None, + parent_job_id: UID | None = None, + user_id: UID | None = None, + log_id: UID | None = None, + job_type: JobType = JobType.JOB, + ) -> Job: + if log_id is None: + log_id = UID() + role = self.get_role_for_credentials(credentials=credentials) + context = AuthedServiceContext(server=self, credentials=credentials, role=role) + + result_obj = ActionObject.empty() + if action is not None: + result_obj = ActionObject.obj_not_ready( + id=action.result_id, + syft_server_location=self.id, + syft_client_verify_key=credentials, + ) + result_obj.id = action.result_id + result_obj.syft_resolved = False + result_obj.syft_server_location = self.id + result_obj.syft_client_verify_key = credentials + + if not self.services.action.stash.exists( + credentials=credentials, uid=action.result_id + ): + self.services.action.set_result_to_store( + result_action_object=result_obj, + context=context, + ).unwrap() + + job = Job( + id=queue_item.job_id, + result=result_obj, + server_uid=self.id, + syft_client_verify_key=credentials, + syft_server_location=self.id, + log_id=log_id, + parent_job_id=parent_job_id, + action=action, + requested_by=user_id, + job_type=job_type, + endpoint=queue_item.kwargs.get("path", None), + ) + + # 🟡 TODO 36: Needs distributed lock + self.job_stash.set(credentials, job).unwrap() + self.queue_stash.set_placeholder(credentials, queue_item).unwrap() + + self.services.log.add(context, log_id, queue_item.job_id) + + return job + + def _sort_jobs(self, jobs: list[Job]) -> list[Job]: + job_datetimes = {} + for job in jobs: + try: + d = datetime.strptime(job.creation_time, DATETIME_FORMAT) + except Exception: + d = datetime(MINYEAR, 1, 1) + job_datetimes[job.id] = d + + jobs.sort( + key=lambda job: (job.status != JobStatus.COMPLETED, job_datetimes[job.id]), + reverse=True, + ) + + return jobs + + @as_result(SyftException) + def _get_existing_user_code_jobs( + self, context: AuthedServiceContext, user_code_id: UID + ) -> list[Job]: + jobs = self.services.job.get_by_user_code_id( + context=context, user_code_id=user_code_id + ) + return self._sort_jobs(jobs) + + def _is_usercode_call_on_owned_kwargs( + self, + context: AuthedServiceContext, + api_call: SyftAPICall, + user_code_id: UID, + ) -> bool: + if api_call.path != "code.call": + return False + return self.services.user_code.is_execution_on_owned_args( + context, user_code_id, api_call.kwargs + ) + + @instrument + def add_api_call_to_queue( + self, api_call: SyftAPICall, parent_job_id: UID | None = None + ) -> SyftSuccess: + unsigned_call = api_call + if isinstance(api_call, SignedSyftAPICall): + unsigned_call = api_call.message + + credentials = api_call.credentials + context = AuthedServiceContext( + server=self, + credentials=credentials, + role=self.get_role_for_credentials(credentials=credentials), + ) + + is_user_code = unsigned_call.path == "code.call" + + service_str, method_str = unsigned_call.path.split(".") + + action = None + if is_user_code: + action = Action.from_api_call(unsigned_call) + user_code_id = action.user_code_id + + user = self.services.user.get_current_user(context) + user = cast(UserView, user) + + is_execution_on_owned_kwargs_allowed = ( + user.mock_execution_permission or context.role == ServiceRole.ADMIN + ) + is_usercode_call_on_owned_kwargs = self._is_usercode_call_on_owned_kwargs( + context, unsigned_call, user_code_id + ) + # Low side does not execute jobs, unless this is a mock execution + if ( + not is_usercode_call_on_owned_kwargs + and self.server_side_type == ServerSideType.LOW_SIDE + ): + try: + existing_jobs = self._get_existing_user_code_jobs( + context, user_code_id + ).unwrap() + + if len(existing_jobs) > 0: + # relative + from ..util.util import prompt_warning_message + + prompt_warning_message( + "There are existing jobs for this user code, returning the latest one" + ) + return SyftSuccess( + message="Found multiple existing jobs, got last", + value=existing_jobs[-1], + ) + else: + raise SyftException( + public_message="Please wait for the admin to allow the execution of this code" + ) + except Exception as e: + raise SyftException.from_exception(e) + elif ( + is_usercode_call_on_owned_kwargs + and not is_execution_on_owned_kwargs_allowed + ): + raise SyftException( + public_message="You do not have the permissions for mock execution, please contact the admin" + ) + + job = self.add_action_to_queue( + action, api_call.credentials, parent_job_id=parent_job_id + ).unwrap() + + return SyftSuccess(message="Succesfully queued job", value=job) + + else: + worker_settings = WorkerSettings.from_server(server=self) + worker_pool_ref = self.get_worker_pool_ref_by_name(credentials=credentials) + queue_item = QueueItem( + id=UID(), + server_uid=self.id, + syft_client_verify_key=api_call.credentials, + syft_server_location=self.id, + job_id=UID(), + worker_settings=worker_settings, + service=service_str, + method=method_str, + args=unsigned_call.args, + kwargs=unsigned_call.kwargs, + worker_pool=worker_pool_ref, + ) + return self.add_queueitem_to_queue( + queue_item=queue_item, + credentials=api_call.credentials, + action=None, + parent_job_id=parent_job_id, + ).unwrap() + + @property + def pool_stash(self) -> SyftWorkerPoolStash: + return self.services.syft_worker_pool.stash + + @property + def user_code_stash(self) -> UserCodeStash: + return self.services.user_code.stash + + @as_result(NotFoundException) + def get_default_worker_pool(self) -> WorkerPool | None: + return self.pool_stash.get_by_name( + credentials=self.verify_key, + pool_name=self.settings.default_worker_pool, + ).unwrap() + + @as_result(NotFoundException) + def get_worker_pool_by_name(self, name: str) -> WorkerPool: + return self.pool_stash.get_by_name( + credentials=self.verify_key, pool_name=name + ).unwrap() + + @instrument + def get_api( + self, + for_user: SyftVerifyKey | None = None, + communication_protocol: PROTOCOL_TYPE | None = None, + ) -> SyftAPI: + return SyftAPI.for_user( + server=self, + user_verify_key=for_user, + communication_protocol=communication_protocol, + ) + + def get_method_with_context( + self, function: Callable, context: ServerServiceContext + ) -> Callable: + method = self.get_service_method(function) + return partial(method, context=context) + + def get_unauthed_context( + self, login_credentials: UserLoginCredentials + ) -> ServerServiceContext: + return UnauthedServiceContext(server=self, login_credentials=login_credentials) + + @as_result(SyftException, StashException) + def create_initial_settings(self, admin_email: str) -> ServerSettings: + settings_stash = self.services.settings.stash + + if self.signing_key is None: + logger.debug("create_initial_settings failed as there is no signing key") + raise SyftException( + public_message="create_initial_settings failed as there is no signing key" + ) + + settings_exists = settings_stash.get_all(self.signing_key.verify_key).unwrap() + + if settings_exists: + server_settings = settings_exists[0] + if server_settings.__version__ != ServerSettings.__version__: + context = Context() + server_settings = server_settings.migrate_to( + ServerSettings.__version__, context + ) + settings_stash.delete_by_uid( + self.signing_key.verify_key, server_settings.id + ).unwrap() + settings_stash.set( + self.signing_key.verify_key, server_settings + ).unwrap() + self.name = server_settings.name + self.association_request_auto_approval = ( + server_settings.association_request_auto_approval + ) + return server_settings + else: + # Currently we allow automatic user registration on enclaves, + # as enclaves do not have superusers + if self.server_type == ServerType.ENCLAVE: + flags.CAN_REGISTER = True + + new_settings = ServerSettings( + id=self.id, + name=self.name, + verify_key=self.verify_key, + server_type=self.server_type, + deployed_on=datetime.now().date().strftime("%m/%d/%Y"), + signup_enabled=flags.CAN_REGISTER, + admin_email=admin_email, + server_side_type=self.server_side_type.value, # type: ignore + show_warnings=self.enable_warnings, + association_request_auto_approval=self.association_request_auto_approval, + default_worker_pool=get_default_worker_pool_name(), + notifications_enabled=False, + ) + + return settings_stash.set( + credentials=self.signing_key.verify_key, obj=new_settings + ).unwrap() + + +class ServerRegistry: + __server_registry__: dict[UID, Server] = {} + + @classmethod + def set_server_for( + cls, + server_uid: UID | str, + server: Server, + ) -> None: + if isinstance(server_uid, str): + server_uid = UID.from_string(server_uid) + + cls.__server_registry__[server_uid] = server + + @classmethod + def server_for(cls, server_uid: UID) -> Server: + return cls.__server_registry__.get(server_uid, None) + + @classmethod + def get_all_servers(cls) -> list[Server]: + return list(cls.__server_registry__.values()) + + @classmethod + def remove_server(cls, server_uid: UID) -> None: + if server_uid in cls.__server_registry__: + del cls.__server_registry__[server_uid] + + +def get_default_worker_tag_by_env(dev_mode: bool = False) -> str | None: + if in_kubernetes(): + return get_default_worker_image() + elif dev_mode: + return "local-dev" + else: + return __version__ + + +def create_default_worker_pool(server: Server) -> None: + credentials = server.verify_key + pull_image = not server.dev_mode + image_stash = server.services.syft_worker_image.stash + default_pool_name = server.settings.default_worker_pool + + try: + default_worker_pool = server.get_default_worker_pool().unwrap( + public_message="Failed to get default worker pool" + ) + except SyftException: + default_worker_pool = None + + default_worker_tag = get_default_worker_tag_by_env(server.dev_mode) + default_worker_pool_pod_annotations = get_default_worker_pool_pod_annotations() + default_worker_pool_pod_labels = get_default_worker_pool_pod_labels() + worker_count = get_default_worker_pool_count(server) + context = AuthedServiceContext( + server=server, + credentials=credentials, + role=ServiceRole.ADMIN, + ) + + logger.info(f"Creating default worker image with tag='{default_worker_tag}'. ") + # Get/Create a default worker SyftWorkerImage + # TODO: MERGE: Unwrap without public message? + default_image = create_default_image( + credentials=credentials, + image_stash=image_stash, + tag=default_worker_tag, + in_kubernetes=in_kubernetes(), + ).unwrap(public_message="Failed to create default worker image") + + if not default_image.is_built: + logger.info(f"Building default worker image with tag={default_worker_tag}. ") + # Build the Image for given tag + result = server.services.worker_image.build( + context, + image_uid=default_image.id, + tag=DEFAULT_WORKER_IMAGE_TAG, + pull_image=pull_image, + ) + + # Create worker pool if it doesn't exists + logger.info( + "Setting up worker pool" + f"name={default_pool_name} " + f"workers={worker_count} " + f"image_uid={default_image.id} " + f"in_memory={server.in_memory_workers}. " + ) + if default_worker_pool is None: + worker_to_add_ = worker_count + result = server.services.syft_worker_pool.launch( + context, + pool_name=default_pool_name, + image_uid=default_image.id, + num_workers=worker_count, + pod_annotations=default_worker_pool_pod_annotations, + pod_labels=default_worker_pool_pod_labels, + ) + else: + # Else add a worker to existing worker pool + worker_to_add_ = max(default_worker_pool.max_count, worker_count) - len( + default_worker_pool.worker_list + ) + if worker_to_add_ > 0: + result = server.services.syft_worker_pool.add_workers( + context=context, + number=worker_to_add_, + pool_name=default_pool_name, + ) + else: + return None + + for n in range(worker_to_add_): + container_status = result[n] + if container_status.error: + logger.error( + f"Failed to create container: Worker: {container_status.worker}," + f"Error: {container_status.error}" + ) + return None + + logger.info("Created default worker pool.") + return None diff --git a/packages/syft/src/syft/server/service_registry.py b/packages/syft/src/syft/server/service_registry.py new file mode 100644 index 00000000000..dfb7f331972 --- /dev/null +++ b/packages/syft/src/syft/server/service_registry.py @@ -0,0 +1,144 @@ +# stdlib +from collections.abc import Callable +from dataclasses import dataclass +from dataclasses import field +import typing +from typing import TYPE_CHECKING +from typing import TypeVar + +# relative +from ..serde.serializable import serializable +from ..service.action.action_service import ActionService +from ..service.api.api_service import APIService +from ..service.attestation.attestation_service import AttestationService +from ..service.blob_storage.remote_profile import RemoteProfileService +from ..service.blob_storage.service import BlobStorageService +from ..service.code.status_service import UserCodeStatusService +from ..service.code.user_code_service import UserCodeService +from ..service.code_history.code_history_service import CodeHistoryService +from ..service.data_subject.data_subject_member_service import DataSubjectMemberService +from ..service.data_subject.data_subject_service import DataSubjectService +from ..service.dataset.dataset_service import DatasetService +from ..service.enclave.enclave_service import EnclaveService +from ..service.job.job_service import JobService +from ..service.log.log_service import LogService +from ..service.metadata.metadata_service import MetadataService +from ..service.migration.migration_service import MigrationService +from ..service.network.network_service import NetworkService +from ..service.notification.notification_service import NotificationService +from ..service.notifier.notifier_service import NotifierService +from ..service.output.output_service import OutputService +from ..service.policy.policy_service import PolicyService +from ..service.project.project_service import ProjectService +from ..service.queue.queue_service import QueueService +from ..service.request.request_service import RequestService +from ..service.service import AbstractService +from ..service.settings.settings_service import SettingsService +from ..service.sync.sync_service import SyncService +from ..service.user.user_service import UserService +from ..service.worker.image_registry_service import SyftImageRegistryService +from ..service.worker.worker_image_service import SyftWorkerImageService +from ..service.worker.worker_pool_service import SyftWorkerPoolService +from ..service.worker.worker_service import WorkerService +from ..store.db.stash import ObjectStash +from ..types.syft_object import SyftObject + +if TYPE_CHECKING: + # relative + from .server import Server + + +StashT = TypeVar("StashT", bound=SyftObject) + + +@serializable(canonical_name="ServiceRegistry", version=1) +@dataclass +class ServiceRegistry: + action: ActionService + user: UserService + attestation: AttestationService + worker: WorkerService + settings: SettingsService + dataset: DatasetService + user_code: UserCodeService + log: LogService + request: RequestService + queue: QueueService + job: JobService + api: APIService + data_subject: DataSubjectService + network: NetworkService + policy: PolicyService + notifier: NotifierService + notification: NotificationService + data_subject_member: DataSubjectMemberService + project: ProjectService + enclave: EnclaveService + code_history: CodeHistoryService + metadata: MetadataService + blob_storage: BlobStorageService + migration: MigrationService + syft_worker_image: SyftWorkerImageService + syft_worker_pool: SyftWorkerPoolService + syft_image_registry: SyftImageRegistryService + sync: SyncService + output: OutputService + user_code_status: UserCodeStatusService + remote_profile: RemoteProfileService + + services: list[AbstractService] = field(default_factory=list, init=False) + service_path_map: dict[str, AbstractService] = field( + default_factory=dict, init=False + ) + stashes: dict[StashT, ObjectStash[StashT]] = field(default_factory=dict, init=False) + + @classmethod + def for_server(cls, server: "Server") -> "ServiceRegistry": + return cls(**cls._construct_services(server)) + + def __post_init__(self) -> None: + for name, service_cls in self.get_service_classes().items(): + service = getattr(self, name) + self.services.append(service) + self.service_path_map[service_cls.__name__.lower()] = service + + # TODO ActionService now has same stash, but interface is still different. Fix this. + if hasattr(service, "stash") and not issubclass(service_cls, ActionService): + stash: ObjectStash = service.stash + self.stashes[stash.object_type] = stash + + @classmethod + def get_service_classes( + cls, + ) -> dict[str, type[AbstractService]]: + return { + name: cls + for name, cls in typing.get_type_hints(cls).items() + if issubclass(cls, AbstractService) + } + + @classmethod + def _construct_services(cls, server: "Server") -> dict[str, AbstractService]: + service_dict = {} + for field_name, service_cls in cls.get_service_classes().items(): + service = service_cls(store=server.db) # type: ignore + service_dict[field_name] = service + return service_dict + + def get_service(self, path_or_func: str | Callable) -> AbstractService: + if callable(path_or_func): + path_or_func = path_or_func.__qualname__ + return self._get_service_from_path(path_or_func) + + def _get_service_from_path(self, path: str) -> AbstractService: + try: + path_list = path.split(".") + if len(path_list) > 1: + _ = path_list.pop() + service_name = path_list.pop() + return self.service_path_map[service_name.lower()] + except KeyError: + raise ValueError(f"Service {path} not found.") + + def __iter__(self) -> typing.Iterator[AbstractService]: + return iter(self.services) diff --git a/packages/syft/src/syft/server/utils.py b/packages/syft/src/syft/server/utils.py new file mode 100644 index 00000000000..ac7425b143b --- /dev/null +++ b/packages/syft/src/syft/server/utils.py @@ -0,0 +1,38 @@ +# future +from __future__ import annotations + +# stdlib +import os +from pathlib import Path +import shutil +import tempfile + +# relative +from ..types.uid import UID + + +def get_named_server_uid(name: str) -> UID: + """ + Get a unique identifier for a named server. + """ + return UID.with_seed(name) + + +def get_temp_dir_for_server(server_uid: UID, dir_name: str = "") -> Path: + """ + Get a temporary directory unique to the server. + Provide all dbs, blob dirs, and locks using this directory. + """ + root = os.getenv("SYFT_TEMP_ROOT", "syft") + p = Path(tempfile.gettempdir(), root, str(server_uid), dir_name) + p.mkdir(parents=True, exist_ok=True) + return p + + +def remove_temp_dir_for_server(server_uid: UID) -> None: + """ + Remove the temporary directory for this server. + """ + rootdir = get_temp_dir_for_server(server_uid) + if rootdir.exists(): + shutil.rmtree(rootdir, ignore_errors=True) diff --git a/packages/syft/src/syft/server/uvicorn.py b/packages/syft/src/syft/server/uvicorn.py new file mode 100644 index 00000000000..635ef61c9a1 --- /dev/null +++ b/packages/syft/src/syft/server/uvicorn.py @@ -0,0 +1,372 @@ +# stdlib +from collections.abc import Callable +from contextlib import asynccontextmanager +import json +import logging +import multiprocessing +import multiprocessing.synchronize +import os +from pathlib import Path +import platform +import signal +import subprocess # nosec +import sys +import time +from typing import Any + +# third party +from fastapi import APIRouter +from fastapi import FastAPI +from pydantic_settings import BaseSettings +from pydantic_settings import SettingsConfigDict +import requests +from starlette.middleware.cors import CORSMiddleware +import uvicorn + +# relative +from ..abstract_server import ServerSideType +from ..client.client import API_PATH +from ..deployment_type import DeploymentType +from ..store.db.db import DBConfig +from ..util.autoreload import enable_autoreload +from ..util.constants import DEFAULT_TIMEOUT +from ..util.telemetry import instrument_fastapi +from ..util.util import os_name +from .datasite import Datasite +from .enclave import Enclave +from .gateway import Gateway +from .routes import make_routes +from .server import Server +from .server import ServerType +from .utils import get_named_server_uid +from .utils import remove_temp_dir_for_server + +if os_name() == "macOS": + # needed on MacOS to prevent [__NSCFConstantString initialize] may have been in + # progress in another thread when fork() was called. + multiprocessing.set_start_method("spawn", True) + +WAIT_TIME_SECONDS = 20 + + +logger = logging.getLogger("uvicorn") + + +class AppSettings(BaseSettings): + name: str + server_type: ServerType = ServerType.DATASITE + server_side_type: ServerSideType = ServerSideType.HIGH_SIDE + deployment_type: DeploymentType = DeploymentType.REMOTE + processes: int = 1 + reset: bool = False + dev_mode: bool = False + enable_warnings: bool = False + in_memory_workers: bool = True + queue_port: int | None = None + create_producer: bool = False + n_consumers: int = 0 + association_request_auto_approval: bool = False + background_tasks: bool = False + db_config: DBConfig | None = None + db_url: str | None = None + + model_config = SettingsConfigDict(env_prefix="SYFT_", env_parse_none_str="None") + + +def get_lifetime(worker: Server) -> Callable: + @asynccontextmanager + async def lifespan(app: FastAPI) -> Any: + try: + yield + finally: + worker.stop() + + return lifespan + + +def app_factory() -> FastAPI: + settings = AppSettings() + + worker_classes = { + ServerType.DATASITE: Datasite, + ServerType.GATEWAY: Gateway, + ServerType.ENCLAVE: Enclave, + } + if settings.server_type not in worker_classes: + raise NotImplementedError( + f"server_type: {settings.server_type} is not supported" + ) + worker_class = worker_classes[settings.server_type] + + kwargs = settings.model_dump() + + logger.info( + f"Starting server with settings: {kwargs} and worker class: {worker_class}" + ) + if settings.dev_mode: + print( + f"WARN: private key is based on server name: {settings.name} in dev_mode. " + "Don't run this in production." + ) + worker = worker_class.named(**kwargs) + else: + worker = worker_class(**kwargs) + + worker_lifespan = get_lifetime(worker=worker) + + app = FastAPI(title=settings.name, lifespan=worker_lifespan) + router = make_routes(worker=worker) + api_router = APIRouter() + api_router.include_router(router) + app.include_router(api_router, prefix="/api/v2") + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + instrument_fastapi(app) + return app + + +def attach_debugger() -> None: + # third party + import debugpy + + os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1" + _, debug_port = debugpy.listen(0) + print( + "\nStarting the server with the Python Debugger enabled (`debug=True`).\n" + 'To attach the debugger, open the command palette in VSCode and select "Debug: Start Debugging (F5)".\n' + f"Then, enter `{debug_port}` in the port field and press Enter.\n", + flush=True, + ) + print(f"Waiting for debugger to attach on port `{debug_port}`...", flush=True) + debugpy.wait_for_client() # blocks execution until a remote debugger is attached + print("Debugger attached", flush=True) + + +def run_uvicorn( + host: str, + port: int, + starting_uvicorn_event: multiprocessing.synchronize.Event, + **kwargs: Any, +) -> None: + log_level = kwargs.get("log_level") + dev_mode = kwargs.get("dev_mode") + should_reset = dev_mode and kwargs.get("reset") + + if should_reset: + print("Found `reset=True` in the launch configuration. Resetting the server...") + named_server_uid = get_named_server_uid(kwargs.get("name")) + remove_temp_dir_for_server(named_server_uid) + # Explicitly set `reset` to False to prevent multiple resets during hot-reload + kwargs["reset"] = False + # Kill all old python processes + try: + python_pids = find_python_processes_on_port(port) + for pid in python_pids: + print(f"Stopping process on port: {port}") + kill_process(pid) + time.sleep(1) + except Exception: # nosec + print(f"Failed to kill python process on port: {port}") + + if kwargs.get("debug"): + attach_debugger() + + # Set up all kwargs as environment variables so that they can be accessed in the app_factory function. + env_prefix = AppSettings.model_config.get("env_prefix", "") + for key, value in kwargs.items(): + key_with_prefix = f"{env_prefix}{key.upper()}" + if isinstance(value, dict): + value = json.dumps(value) + os.environ[key_with_prefix] = str(value) + + # The `serve_server` function calls `run_uvicorn` in a separate process using `multiprocessing.Process`. + # When the child process is created, it inherits the file descriptors from the parent process. + # If the parent process has a file descriptor open for sys.stdin, the child process will also have a file descriptor + # open for sys.stdin. This can cause an OSError in uvicorn when it tries to access sys.stdin in the child process. + # To prevent this, we set sys.stdin to None in the child process. This is safe because we don't actually need + # sys.stdin while running uvicorn programmatically. + sys.stdin = None # type: ignore + + # Signal the parent process that we are starting the uvicorn server. + starting_uvicorn_event.set() + + # Finally, run the uvicorn server. + uvicorn.run( + "syft.server.uvicorn:app_factory", + host=host, + port=port, + factory=True, + reload=dev_mode, + reload_dirs=[Path(__file__).parent.parent] if dev_mode else None, + log_level=log_level, + ) + + +def serve_server( + name: str, + server_type: ServerType = ServerType.DATASITE, + server_side_type: ServerSideType = ServerSideType.HIGH_SIDE, + deployment_type: DeploymentType = DeploymentType.REMOTE, + host: str = "0.0.0.0", # nosec + port: int = 8080, + processes: int = 1, + reset: bool = False, + dev_mode: bool = False, + tail: bool = False, + enable_warnings: bool = False, + in_memory_workers: bool = True, + log_level: str | int | None = None, + queue_port: int | None = None, + create_producer: bool = False, + n_consumers: int = 0, + association_request_auto_approval: bool = False, + background_tasks: bool = False, + debug: bool = False, + db_url: str | None = None, +) -> tuple[Callable, Callable]: + starting_uvicorn_event = multiprocessing.Event() + + # Enable IPython autoreload if dev_mode is enabled. + if dev_mode: + enable_autoreload() + + server_process = multiprocessing.Process( + target=run_uvicorn, + kwargs={ + "name": name, + "server_type": server_type, + "host": host, + "port": port, + "processes": processes, + "reset": reset, + "dev_mode": dev_mode, + "server_side_type": server_side_type, + "enable_warnings": enable_warnings, + "in_memory_workers": in_memory_workers, + "log_level": log_level, + "queue_port": queue_port, + "create_producer": create_producer, + "n_consumers": n_consumers, + "association_request_auto_approval": association_request_auto_approval, + "background_tasks": background_tasks, + "debug": debug, + "starting_uvicorn_event": starting_uvicorn_event, + "deployment_type": deployment_type, + "db_url": db_url, + }, + ) + + def stop() -> None: + print(f"Stopping {name}") + server_process.terminate() + server_process.join(3) + if server_process.is_alive(): + # this is needed because often the process is still alive + server_process.kill() + print("killed") + + def start() -> None: + print(f"Starting {name} server on {host}:{port}") + server_process.start() + + # Wait for the child process to start uvicorn server before starting the readiness checks. + starting_uvicorn_event.wait() + + if tail: + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + try: + stop() + except SystemExit: + os._exit(130) + else: + for i in range(WAIT_TIME_SECONDS): + try: + req = requests.get( + f"http://{host}:{port}{API_PATH}/metadata", + timeout=DEFAULT_TIMEOUT, + ) + if req.status_code == 200: + print(" Done.") + break + except Exception: + time.sleep(1) + if i == 0: + print("Waiting for server to start", end="") + else: + print(".", end="") + + return start, stop + + +def find_python_processes_on_port(port: int) -> list[int]: + system = platform.system() + + if system == "Windows": + command = f"netstat -ano | findstr :{port}" + process = subprocess.Popen( # nosec + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + output, _ = process.communicate() + pids = [ + int(line.strip().split()[-1]) for line in output.split("\n") if line.strip() + ] + + else: # Linux and MacOS + command = f"lsof -i :{port} -sTCP:LISTEN -t" + process = subprocess.Popen( # nosec + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + output, _ = process.communicate() + pids = [int(pid.strip()) for pid in output.split("\n") if pid.strip()] + + python_pids = [] + for pid in pids: + if system == "Windows": + command = ( + f"wmic process where (ProcessId='{pid}') get ProcessId,CommandLine" + ) + else: + command = f"ps -p {pid} -o pid,command" + + try: + process = subprocess.Popen( # nosec + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + output, _ = process.communicate() + except Exception as e: + print(f"Error checking process {pid}: {e}") + continue + + lines = output.strip().split("\n") + if len(lines) > 1 and "python" in lines[1].lower(): + python_pids.append(pid) + + return python_pids + + +def kill_process(pid: int) -> None: + try: + os.kill(pid, signal.SIGTERM) + print(f"Process {pid} terminated.") + except Exception as e: + print(f"Error killing process {pid}: {e}") diff --git a/packages/syft/src/syft/server/worker.py b/packages/syft/src/syft/server/worker.py new file mode 100644 index 00000000000..2658492d6fc --- /dev/null +++ b/packages/syft/src/syft/server/worker.py @@ -0,0 +1,8 @@ +# relative +from ..serde.serializable import serializable +from .server import Server + + +@serializable(canonical_name="Worker", version=1) +class Worker(Server): + pass diff --git a/packages/syft/src/syft/server/worker_settings.py b/packages/syft/src/syft/server/worker_settings.py new file mode 100644 index 00000000000..3e10cc7d5fa --- /dev/null +++ b/packages/syft/src/syft/server/worker_settings.py @@ -0,0 +1,95 @@ +# future +from __future__ import annotations + +# stdlib +from collections.abc import Callable + +# third party +from typing_extensions import Self + +# relative +from ..abstract_server import AbstractServer +from ..abstract_server import ServerSideType +from ..abstract_server import ServerType +from ..deployment_type import DeploymentType +from ..serde.serializable import serializable +from ..server.credentials import SyftSigningKey +from ..service.queue.base_queue import QueueConfig +from ..store.blob_storage import BlobStorageConfig +from ..store.db.db import DBConfig +from ..store.document_store import StoreConfig +from ..types.syft_migration import migrate +from ..types.syft_object import SYFT_OBJECT_VERSION_1 +from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SyftObject +from ..types.transforms import TransformContext +from ..types.transforms import drop +from ..types.uid import UID + + +@serializable() +class WorkerSettings(SyftObject): + __canonical_name__ = "WorkerSettings" + __version__ = SYFT_OBJECT_VERSION_2 + + id: UID + name: str + server_type: ServerType + server_side_type: ServerSideType + deployment_type: DeploymentType = DeploymentType.REMOTE + signing_key: SyftSigningKey + db_config: DBConfig + blob_store_config: BlobStorageConfig | None = None + queue_config: QueueConfig | None = None + log_level: int | None = None + + @classmethod + def from_server(cls, server: AbstractServer) -> Self: + server_side_type = server.server_side_type or ServerSideType.HIGH_SIDE + return cls( + id=server.id, + name=server.name, + server_type=server.server_type, + signing_key=server.signing_key, + db_config=server.db_config, + server_side_type=server_side_type, + blob_store_config=server.blob_store_config, + queue_config=server.queue_config, + log_level=server.log_level, + deployment_type=server.deployment_type, + ) + + +@serializable() +class WorkerSettingsV1(SyftObject): + __canonical_name__ = "WorkerSettings" + __version__ = SYFT_OBJECT_VERSION_1 + + id: UID + name: str + server_type: ServerType + server_side_type: ServerSideType + deployment_type: DeploymentType = DeploymentType.REMOTE + signing_key: SyftSigningKey + document_store_config: StoreConfig + action_store_config: StoreConfig + blob_store_config: BlobStorageConfig | None = None + queue_config: QueueConfig | None = None + log_level: int | None = None + + +def set_db_config(context: TransformContext) -> TransformContext: + if context.output: + context.output["db_config"] = ( + context.server.db_config if context.server is not None else DBConfig() + ) + return context + + +@migrate(WorkerSettingsV1, WorkerSettings) +def migrate_workersettings_v1_to_v2() -> list[Callable]: + return [ + drop("document_store_config"), + drop("action_store_config"), + set_db_config, + ] diff --git a/packages/syft/src/syft/service/action/action_data_empty.py b/packages/syft/src/syft/service/action/action_data_empty.py index 96343566844..260c6f6d06b 100644 --- a/packages/syft/src/syft/service/action/action_data_empty.py +++ b/packages/syft/src/syft/service/action/action_data_empty.py @@ -6,7 +6,7 @@ # relative from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject from ...types.uid import UID @@ -14,7 +14,7 @@ @serializable() class ActionDataEmpty(SyftObject): __canonical_name__ = "ActionDataEmpty" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: type | None = NoneType # type: ignore @@ -28,7 +28,7 @@ def __str__(self) -> str: @serializable() class ObjectNotReady(SyftObject): __canonical_name__ = "ObjectNotReady" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 obj_id: UID @@ -36,6 +36,6 @@ class ObjectNotReady(SyftObject): @serializable() class ActionDataLink(SyftObject): __canonical_name__ = "ActionDataLink" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 action_object_id: UID diff --git a/packages/syft/src/syft/service/action/action_endpoint.py b/packages/syft/src/syft/service/action/action_endpoint.py new file mode 100644 index 00000000000..2237b3963a6 --- /dev/null +++ b/packages/syft/src/syft/service/action/action_endpoint.py @@ -0,0 +1,110 @@ +# future +from __future__ import annotations + +# stdlib +from collections.abc import Callable +from enum import Enum +from enum import auto +from typing import Any + +# relative +from ...serde.serializable import serializable +from ...types.syft_migration import migrate +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SyftObject +from ...types.transforms import drop +from ...types.transforms import make_set_default +from ...types.uid import UID +from ..context import AuthedServiceContext + + +class EXECUTION_MODE(Enum): + CALL = auto() + MOCK = auto() + PRIVATE = auto() + + +@serializable() +class CustomEndpointActionObjectV1(SyftObject): + __canonical_name__ = "CustomEndpointActionObject" + __version__ = SYFT_OBJECT_VERSION_1 + + endpoint_id: UID + context: AuthedServiceContext | None = None + + +@serializable() +class CustomEndpointActionObject(SyftObject): + __canonical_name__ = "CustomEndpointActionObject" + __version__ = SYFT_OBJECT_VERSION_2 + + endpoint_id: UID + context: AuthedServiceContext | None = None + log_id: UID | None = None + + def add_context( + self, context: AuthedServiceContext, log_id: UID | None = None + ) -> CustomEndpointActionObject: + self.context = context + self.log_id = log_id + return self + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return self.__call_function( # type: ignore[misc] + *args, + **kwargs, + call_mode=EXECUTION_MODE.CALL, + ) + + def mock(self, *args: Any, **kwargs: Any) -> Any: + return self.__call_function( # type: ignore[misc] + *args, + **kwargs, + call_mode=EXECUTION_MODE.MOCK, + ) + + def private(self, *args: Any, **kwargs: Any) -> Any: + return self.__call_function( # type: ignore[misc] + *args, + **kwargs, + call_mode=EXECUTION_MODE.PRIVATE, + ) + + def __call_function( + self, call_mode: EXECUTION_MODE, *args: Any, **kwargs: Any + ) -> Any: + self.context = self.__check_context() + if call_mode == EXECUTION_MODE.MOCK: + __endpoint_mode = ( + self.context.server.services.api.execute_server_side_endpoint_mock_by_id + ) + elif call_mode == EXECUTION_MODE.PRIVATE: + __endpoint_mode = self.context.server.services.api.execute_service_side_endpoint_private_by_id + else: + __endpoint_mode = ( + self.context.server.services.api.execute_server_side_endpoint_by_id + ) + + return __endpoint_mode( # type: ignore[misc] + *args, + context=self.context, + endpoint_uid=self.endpoint_id, + log_id=self.log_id, + **kwargs, + ).unwrap() + + def __check_context(self) -> AuthedServiceContext: + if self.context is None: + raise Exception("No context provided to CustomEndpointActionObject") + return self.context + + +@migrate(CustomEndpointActionObjectV1, CustomEndpointActionObject) +def migrate_custom_endpoint_v1_to_v2() -> list[Callable]: + return [make_set_default("log_id", None)] + + +@migrate(CustomEndpointActionObject, CustomEndpointActionObjectV1) +def migrate_custom_endpoint_v2_to_v1() -> list[Callable]: + return [drop(["log_id"])] diff --git a/packages/syft/src/syft/service/action/action_graph.py b/packages/syft/src/syft/service/action/action_graph.py deleted file mode 100644 index 3a928da9f0c..00000000000 --- a/packages/syft/src/syft/service/action/action_graph.py +++ /dev/null @@ -1,536 +0,0 @@ -# stdlib -from collections.abc import Callable -from collections.abc import Iterable -from enum import Enum -from functools import partial -import os -from pathlib import Path -import tempfile -from typing import Any - -# third party -import matplotlib.pyplot as plt -import networkx as nx -from pydantic import Field -from pydantic import field_validator -from result import Err -from result import Ok -from result import Result -from typing_extensions import Self - -# relative -from ...node.credentials import SyftVerifyKey -from ...serde.deserialize import _deserialize -from ...serde.serializable import serializable -from ...serde.serialize import _serialize -from ...store.document_store import QueryKey -from ...store.document_store import QueryKeys -from ...store.document_store import StoreClientConfig -from ...store.document_store import StoreConfig -from ...store.locks import LockingConfig -from ...store.locks import SyftLock -from ...store.locks import ThreadingLockingConfig -from ...types.datetime import DateTime -from ...types.syft_object import PartialSyftObject -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SyftObject -from ...types.uid import UID -from .action_object import Action -from .action_object import ActionObject - - -@serializable() -class ExecutionStatus(Enum): - PROCESSING = 0 - DONE = 1 - FAILED = 2 - - -@serializable() -class NodeType(Enum): - ACTION = Action - ACTION_OBJECT = ActionObject - - -@serializable() -class NodeActionData(SyftObject): - __canonical_name__ = "NodeActionData" - __version__ = SYFT_OBJECT_VERSION_2 - - id: UID | None = None # type: ignore[assignment] - type: NodeType - status: ExecutionStatus = ExecutionStatus.PROCESSING - retry: int = 0 - created_at: DateTime = Field(default_factory=DateTime.now) - updated_at: DateTime = Field(default_factory=DateTime.now) - user_verify_key: SyftVerifyKey - is_mutated: bool = False # denotes that this node has been mutated - is_mutagen: bool = False # denotes that this node is causing a mutation - next_mutagen_node: UID | None = None # next neighboring mutagen node - last_nm_mutagen_node: UID | None = None # last non mutated mutagen node - - @classmethod - def from_action(cls, action: Action, credentials: SyftVerifyKey) -> Self: - is_mutagen = action.remote_self is not None and ( - action.remote_self == action.result_id - ) - return cls( - id=action.id, - type=NodeType.ACTION, - user_verify_key=credentials, - is_mutagen=is_mutagen, - ) - - @classmethod - def from_action_obj( - cls, action_obj: ActionObject, credentials: SyftVerifyKey - ) -> Self: - return cls( - id=action_obj.id, - type=NodeType.ACTION_OBJECT, - user_verify_key=credentials, - ) - - def __hash__(self) -> int: - return hash(self.id) - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, NodeActionData): - raise NotImplementedError( - "Comparisions can be made with NodeActionData type objects only." - ) - return hash(self) == hash(other) - - def __repr__(self) -> str: - return self._repr_debug_() - - -@serializable() -class NodeActionDataUpdate(PartialSyftObject): - __canonical_name__ = "NodeActionDataUpdate" - __version__ = SYFT_OBJECT_VERSION_2 - - id: UID - type: NodeType - status: ExecutionStatus - retry: int - created_at: DateTime - updated_at: DateTime = Field(default_factory=DateTime.now) - credentials: SyftVerifyKey - is_mutated: bool - is_mutagen: bool - next_mutagen_node: UID # next neighboring mutagen node - last_nm_mutagen_node: UID # last non mutated mutagen node - - -@serializable() -class BaseGraphStore: - graph_type: Any - client_config: StoreClientConfig | None - - def set(self, uid: Any, data: Any) -> None: - raise NotImplementedError - - def get(self, uid: Any) -> Any: - raise NotImplementedError - - def delete(self, uid: Any) -> None: - raise NotImplementedError - - def find_neighbors(self, uid: Any) -> list | None: - raise NotImplementedError - - def update(self, uid: Any, data: Any) -> None: - raise NotImplementedError - - def add_edge(self, parent: Any, child: Any) -> None: - raise NotImplementedError - - def remove_edge(self, parent: Any, child: Any) -> None: - raise NotImplementedError - - def nodes(self) -> Any: - raise NotImplementedError - - def edges(self) -> Any: - raise NotImplementedError - - def visualize(self, seed: int, figsize: tuple) -> None: - raise NotImplementedError - - def save(self) -> None: - raise NotImplementedError - - def get_predecessors(self, uid: UID) -> list: - raise NotImplementedError - - def get_successors(self, uid: UID) -> list: - raise NotImplementedError - - def exists(self, uid: Any) -> bool: - raise NotImplementedError - - def subgraph(self, qks: QueryKeys) -> Any: - raise NotImplementedError - - def topological_sort(self, subgraph: Any) -> Any: - raise NotImplementedError - - -@serializable() -class InMemoryStoreClientConfig(StoreClientConfig): - filename: str = "action_graph.bytes" - path: str | Path = Field(default_factory=tempfile.gettempdir) - - # We need this in addition to Field(default_factory=...) - # so users can still do InMemoryStoreClientConfig(path=None) - @field_validator("path", mode="before") - @classmethod - def __default_path(cls, path: str | Path | None) -> str | Path: - if path is None: - return tempfile.gettempdir() - return path - - @property - def file_path(self) -> Path: - return Path(self.path) / self.filename - - -@serializable(without=["_lock"]) -class NetworkXBackingStore(BaseGraphStore): - def __init__(self, store_config: StoreConfig, reset: bool = False) -> None: - if store_config.client_config: - self.path_str = store_config.client_config.file_path.as_posix() - else: - self.path_str = "" - if not reset and os.path.exists(self.path_str): - self._db = self._load_from_path(self.path_str) - else: - self._db = nx.DiGraph() - - self.locking_config = store_config.locking_config - self._lock: SyftLock | None = None - - @property - def lock(self) -> SyftLock: - if not hasattr(self, "_lock") or self._lock is None: - self._lock = SyftLock(self.locking_config) - return self._lock - - @property - def db(self) -> nx.Graph: - return self._db - - def _thread_safe_cbk( - self, cbk: Callable, *args: Any, **kwargs: Any - ) -> Result[Any, str]: - # TODO copied method from document_store, have it in one place and reuse? - locked = self.lock.acquire(blocking=True) - if not locked: - return Err( - f"Failed to acquire lock for the operation {self.lock.lock_name} ({self.lock._lock})" - ) - try: - result = cbk(*args, **kwargs) - except BaseException as e: - result = Err(str(e)) - self.lock.release() - - return result - - def set(self, uid: UID, data: Any) -> None: - self._thread_safe_cbk(self._set, uid=uid, data=data) - - def _set(self, uid: UID, data: Any) -> None: - if self.exists(uid=uid): - self.update(uid=uid, data=data) - else: - self.db.add_node(uid, data=data) - self.save() - - def get(self, uid: UID) -> Any: - node_data = self.db.nodes.get(uid) - return node_data.get("data") - - def exists(self, uid: Any) -> bool: - return uid in self.nodes() - - def delete(self, uid: UID) -> None: - self._thread_safe_cbk(self._delete, uid=uid) - - def _delete(self, uid: UID) -> None: - if self.exists(uid=uid): - self.db.remove_node(uid) - self.save() - - def find_neighbors(self, uid: UID) -> list | None: - if self.exists(uid=uid): - neighbors = self.db.neighbors(uid) - return neighbors - return None - - def update(self, uid: UID, data: Any) -> None: - self._thread_safe_cbk(self._update, uid=uid, data=data) - - def _update(self, uid: UID, data: Any) -> None: - if self.exists(uid=uid): - self.db.nodes[uid]["data"] = data - self.save() - - def add_edge(self, parent: Any, child: Any) -> None: - self._thread_safe_cbk(self._add_edge, parent=parent, child=child) - - def _add_edge(self, parent: Any, child: Any) -> None: - self.db.add_edge(parent, child) - self.save() - - def remove_edge(self, parent: Any, child: Any) -> None: - self._thread_safe_cbk(self._remove_edge, parent=parent, child=child) - - def _remove_edge(self, parent: Any, child: Any) -> None: - self.db.remove_edge(parent, child) - self.save() - - def visualize(self, seed: int = 3113794652, figsize: tuple = (20, 10)) -> None: - plt.figure(figsize=figsize) - pos = nx.spring_layout(self.db, seed=seed) - return nx.draw_networkx(self.db, pos=pos, with_labels=True) - - def nodes(self) -> Iterable: - return self.db.nodes(data=True) - - def edges(self) -> Iterable: - return self.db.edges() - - def get_predecessors(self, uid: UID) -> list: - return self.db.predecessors(uid) - - def get_successors(self, uid: UID) -> list: - return self.db.successors(uid) - - def is_parent(self, parent: Any, child: Any) -> bool: - parents = self.db.predecessors(child) - return parent in parents - - def save(self) -> None: - bytes = _serialize(self.db, to_bytes=True) - with open(self.path_str, "wb") as f: - f.write(bytes) - - def _filter_nodes_by(self, uid: UID, qks: QueryKeys) -> bool: - node_data = self.db.nodes[uid]["data"] - matches = [] - for qk in qks.all: - matches.append(getattr(node_data, qk.key) == qk.value) - # AND matches - return all(matches) - - def subgraph(self, qks: QueryKeys) -> Any: - filter_func = partial(self._filter_nodes_by, qks=qks) - return nx.subgraph_view(self.db, filter_node=filter_func) - - def topological_sort(self, subgraph: Any) -> Any: - return list(nx.topological_sort(subgraph)) - - @staticmethod - def _load_from_path(file_path: str) -> None: - with open(file_path, "rb") as f: - bytes = f.read() - return _deserialize(blob=bytes, from_bytes=True) - - -@serializable() -class InMemoryGraphConfig(StoreConfig): - __canonical_name__ = "InMemoryGraphConfig" - - store_type: type[BaseGraphStore] = NetworkXBackingStore - client_config: StoreClientConfig = Field(default_factory=InMemoryStoreClientConfig) - locking_config: LockingConfig = Field(default_factory=ThreadingLockingConfig) - - -@serializable() -class ActionGraphStore: - pass - - -@serializable() -class InMemoryActionGraphStore(ActionGraphStore): - __canonical_name__ = "InMemoryActionGraphStore" - - def __init__(self, store_config: StoreConfig, reset: bool = False): - self.store_config: StoreConfig = store_config - self.graph: BaseGraphStore = self.store_config.store_type( - self.store_config, reset - ) - - def set( - self, - node: NodeActionData, - credentials: SyftVerifyKey, - parent_uids: list[UID] | None = None, - ) -> Result[NodeActionData, str]: - if self.graph.exists(uid=node.id): - return Err(f"Node already exists in the graph: {node}") - - self.graph.set(uid=node.id, data=node) - - if parent_uids is None: - parent_uids = [] - - for parent_uid in parent_uids: - result = self.add_edge( - parent=parent_uid, - child=node.id, - credentials=credentials, - ) - if result.is_err(): - return result - - return Ok(node) - - def get( - self, - uid: UID, - credentials: SyftVerifyKey, - ) -> Result[NodeActionData, str]: - # 🟡 TODO: Add permission check - if self.graph.exists(uid=uid): - node_data = self.graph.get(uid=uid) - return Ok(node_data) - return Err(f"Node does not exists with id: {uid}") - - def delete( - self, - uid: UID, - credentials: SyftVerifyKey, - ) -> Result[bool, str]: - # 🟡 TODO: Add permission checks - if self.graph.exists(uid=uid): - self.graph.delete(uid=uid) - return Ok(True) - return Err(f"Node does not exists with id: {uid}") - - def update( - self, - uid: UID, - data: NodeActionDataUpdate, - credentials: SyftVerifyKey, - ) -> Result[NodeActionData, str]: - # 🟡 TODO: Add permission checks - node_data = self.graph.get(uid=uid) - if node_data is not None: - for key, val in data.to_dict(exclude_empty=True).items(): - setattr(node_data, key, val) - self.graph.update(uid=uid, data=node_data) - return Ok(node_data) - return Err(f"Node does not exists for uid: {uid}") - - def update_non_mutated_successor( - self, - node_id: UID, - nm_successor_id: UID, - credentials: SyftVerifyKey, - ) -> Result[NodeActionData, str]: - """ - Used when a node is a mutagen and to update non-mutated - successor for all nodes between node_id and nm_successor_id - """ - node_data = self.graph.get(uid=node_id) - - data = NodeActionDataUpdate( - next_mutagen_node=nm_successor_id, - last_nm_mutagen_node=nm_successor_id, - is_mutated=True, - ) - - if not node_data.is_mutated: - # If current node is not mutated, then mark it as mutated - return self.update(uid=node_id, data=data, credentials=credentials) - else: - # loop through successive mutagen nodes and - # update their last_nm_mutagen_node id - while node_id != nm_successor_id: - node_data = self.graph.get(uid=node_id) - - # If node is the last added mutagen node, - # then in that case its `next_mutagen_node` will be None - # Therefore update its values to nm_successor_id - next_mutagen_node = ( - nm_successor_id - if node_data.next_mutagen_node is None - else node_data.next_mutagen_node - ) - - data = NodeActionDataUpdate( - last_nm_mutagen_node=nm_successor_id, - is_mutated=True, - next_mutagen_node=next_mutagen_node, - ) - - # Update each successive mutagen node - result = self.update( - uid=node_id, - data=data, - credentials=credentials, - ) - node_id = node_data.next_mutagen_node - - return result - - def _get_last_non_mutated_mutagen( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[UID, str]: - node_data = self.graph.get(uid=uid) - if node_data.is_mutated: - return Ok(node_data.last_nm_mutagen_node) - - return Ok(uid) - - def add_edge( - self, - parent: UID, - child: UID, - credentials: SyftVerifyKey, - ) -> Result[bool, str]: - if not self.graph.exists(parent): - return Err(f"Node does not exists for uid (parent): {parent}") - - if not self.graph.exists(child): - return Err(f"Node does not exists for uid (child): {child}") - - result = self._get_last_non_mutated_mutagen( - uid=parent, - credentials=credentials, - ) - - if result.is_err(): - return result - - new_parent = result.ok() - - self.graph.add_edge(parent=new_parent, child=child) - - return Ok(True) - - def is_parent(self, parent: UID, child: UID) -> Result[bool, str]: - if self.graph.exists(child): - parents = self.graph.get_predecessors(child) - result = parent in parents - return Ok(result) - return Err(f"Node doesn't exists for id: {child}") - - def query( - self, - qks: QueryKey | QueryKeys, - credentials: SyftVerifyKey, - ) -> Result[list[NodeActionData], str]: - if isinstance(qks, QueryKey): - qks = QueryKeys(qks=[qks]) - subgraph = self.graph.subgraph(qks=qks) - return Ok(self.graph.topological_sort(subgraph=subgraph)) - - def nodes(self, credentials: SyftVerifyKey) -> Result[list, str]: - return Ok(self.graph.nodes()) - - def edges(self, credentials: SyftVerifyKey) -> Result[list, str]: - return Ok(self.graph.edges()) diff --git a/packages/syft/src/syft/service/action/action_graph_service.py b/packages/syft/src/syft/service/action/action_graph_service.py deleted file mode 100644 index 8ea4cca2240..00000000000 --- a/packages/syft/src/syft/service/action/action_graph_service.py +++ /dev/null @@ -1,208 +0,0 @@ -# stdlib - -# third party -from pydantic import ValidationError - -# relative -from ...node.credentials import SyftVerifyKey -from ...serde.serializable import serializable -from ...store.document_store import PartitionKey -from ...store.document_store import QueryKeys -from ...types.uid import UID -from ..code.user_code import UserVerifyKeyPartitionKey -from ..context import AuthedServiceContext -from ..response import SyftError -from ..response import SyftSuccess -from ..service import AbstractService -from ..service import service_method -from .action_graph import ActionGraphStore -from .action_graph import ExecutionStatus -from .action_graph import NodeActionData -from .action_graph import NodeActionDataUpdate -from .action_graph import NodeType -from .action_object import Action -from .action_object import ActionObject - -ExecutionStatusPartitionKey = PartitionKey(key="status", type_=ExecutionStatus) - - -@serializable() -class ActionGraphService(AbstractService): - store: ActionGraphStore - - def __init__(self, store: ActionGraphStore): - self.store = store - - @service_method(path="graph.add_action", name="add_action") - def add_action( - self, context: AuthedServiceContext, action: Action - ) -> tuple[NodeActionData, NodeActionData] | SyftError: - # Create a node for the action - input_uids, output_uid = self._extract_input_and_output_from_action( - action=action - ) - node = NodeActionData.from_action( - action=action, credentials=context.credentials - ) - - result = self.store.set( - credentials=context.credentials, node=node, parent_uids=input_uids - ) - - if result.is_err(): - return SyftError(message=result.err()) - - action_node = result.ok() - - if action_node.is_mutagen: - # updated non-mutated successor for all nodes between - # node_id and nm_successor_id - if action.remote_self is None: - return SyftError(message=f"action {action}'s remote_self is None") - result = self.store.update_non_mutated_successor( - node_id=action.remote_self.id, - nm_successor_id=action_node.id, - credentials=context.credentials, - ) - else: - # Create a node for the result object - node = NodeActionData( - id=output_uid, - user_verify_key=context.credentials, - type=NodeType.ACTION_OBJECT, - ) - - result = self.store.set( - credentials=context.credentials, - node=node, - parent_uids=[action.id], - ) - - if result.is_err(): - return SyftError(message=result.err()) - - result_node = result.ok() - - return action_node, result_node - - @service_method(path="graph.add_action_obj", name="add_action_obj") - def add_action_obj( - self, context: AuthedServiceContext, action_obj: ActionObject - ) -> NodeActionData | SyftError: - node = NodeActionData.from_action_obj( - action_obj=action_obj, credentials=context.credentials - ) - result = self.store.set( - credentials=context.credentials, - node=node, - ) - if result.is_err(): - return SyftError(message=result.err()) - - return result.ok() - - def _extract_input_and_output_from_action( - self, action: Action - ) -> tuple[set[UID], UID | None]: - input_uids = set() - - if action.remote_self is not None: - input_uids.add(action.remote_self.id) - - for arg in action.args: - input_uids.add(arg.id) - - for _, kwarg in action.kwargs.items(): - input_uids.add(kwarg.id) - - output_uid = action.result_id.id if action.result_id is not None else None - - return input_uids, output_uid - - def get( - self, uid: UID, context: AuthedServiceContext - ) -> NodeActionData | SyftError: - result = self.store.get(uid=uid, credentials=context.credentials) - if result.is_err(): - return SyftError(message=result.err()) - return result.ok() - - def remove_node( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.store.delete( - uid=uid, - credentials=context.credentials, - ) - if result.is_ok(): - return SyftSuccess( - message=f"Successfully deleted node with uid: {uid} from the graph." - ) - - return SyftError(message=result.err()) - - def get_all_nodes(self, context: AuthedServiceContext) -> list | SyftError: - result = self.store.nodes(context.credentials) - if result.is_ok(): - return result.ok() - - return SyftError(message="Failed to fetch nodes from the graph") - - def get_all_edges(self, context: AuthedServiceContext) -> list | SyftError: - result = self.store.edges(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message="Failed to fetch nodes from the graph") - - def update( - self, - context: AuthedServiceContext, - uid: UID, - node_data: NodeActionDataUpdate, - ) -> NodeActionData | SyftError: - result = self.store.update( - uid=uid, data=node_data, credentials=context.credentials - ) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) - - def update_action_status( - self, - context: AuthedServiceContext, - action_id: UID, - status: ExecutionStatus, - ) -> SyftSuccess | SyftError: - try: - node_data = NodeActionDataUpdate(status=status) - except ValidationError as e: - return SyftError(message=f"ValidationError: {e}") - result = self.store.update( - uid=action_id, data=node_data, credentials=context.credentials - ) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) - - def get_by_action_status( - self, context: AuthedServiceContext, status: ExecutionStatus - ) -> list[NodeActionData] | SyftError: - qks = QueryKeys(qks=[ExecutionStatusPartitionKey.with_obj(status)]) - - result = self.store.query(qks=qks, credentials=context.credentials) - if result.is_ok(): - return result.ok() - - return SyftError(message=result.err()) - - def get_by_verify_key( - self, context: AuthedServiceContext, verify_key: SyftVerifyKey - ) -> list[NodeActionData] | SyftError: - # TODO: Add a Query for Credentials as well, - qks = QueryKeys(qks=[UserVerifyKeyPartitionKey.with_obj(verify_key)]) - - result = self.store.query(qks=qks, credentials=context.credentials) - if result.is_ok(): - return result.ok() - - return SyftError(message=result.err()) diff --git a/packages/syft/src/syft/service/action/action_object.py b/packages/syft/src/syft/service/action/action_object.py index 42330c8d7b0..dc072b7a8d8 100644 --- a/packages/syft/src/syft/service/action/action_object.py +++ b/packages/syft/src/syft/service/action/action_object.py @@ -3,13 +3,15 @@ # stdlib from collections.abc import Callable +from collections.abc import Iterable from enum import Enum import inspect from io import BytesIO +import logging from pathlib import Path +import sys import threading import time -import traceback import types from typing import Any from typing import ClassVar @@ -20,32 +22,33 @@ from pydantic import Field from pydantic import field_validator from pydantic import model_validator -from result import Err -from result import Ok -from result import Result from typing_extensions import Self # relative -from ...client.api import APIRegistry from ...client.api import SyftAPI from ...client.api import SyftAPICall from ...client.client import SyftClient -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable from ...serde.serialize import _serialize as serialize -from ...service.response import SyftError +from ...server.credentials import SyftVerifyKey +from ...service.blob_storage.util import can_upload_to_blob_storage +from ...service.response import SyftSuccess +from ...service.response import SyftWarning from ...store.linked_obj import LinkedObject from ...types.base import SyftBaseModel from ...types.datetime import DateTime -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.errors import SyftException +from ...types.result import Err +from ...types.result import as_result +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftBaseObject from ...types.syft_object import SyftObject +from ...types.syft_object_registry import SyftObjectRegistry from ...types.syncable_object import SyncableSyftObject from ...types.uid import LineageID from ...types.uid import UID -from ...util.logger import debug -from ..response import SyftException +from ...util.util import prompt_warning_message +from ..context import AuthedServiceContext from ..service import from_api_or_context from .action_data_empty import ActionDataEmpty from .action_data_empty import ActionDataLink @@ -55,6 +58,8 @@ from .action_types import action_type_for_type from .action_types import action_types +logger = logging.getLogger(__name__) + if TYPE_CHECKING: # relative from ..sync.diff_state import AttrDiff @@ -62,14 +67,14 @@ NoneType = type(None) -@serializable() +@serializable(canonical_name="TwinMode", version=1) class TwinMode(Enum): NONE = 0 PRIVATE = 1 MOCK = 2 -@serializable() +@serializable(canonical_name="ActionType", version=1) class ActionType(Enum): GETATTRIBUTE = 1 METHOD = 2 @@ -77,6 +82,7 @@ class ActionType(Enum): FUNCTION = 8 CREATEOBJECT = 16 SYFTFUNCTION = 32 + TWINAPI = 64 def repr_cls(c: Any) -> str: @@ -103,7 +109,7 @@ class Action(SyftObject): """ __canonical_name__ = "Action" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 __attr_searchable__: ClassVar[list[str]] = [] @@ -129,11 +135,8 @@ def full_path(self) -> str: @property def job_display_name(self) -> str: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if self.user_code_id is not None and api is not None: + if self.user_code_id is not None: + api = self.get_api() user_code = api.services.code.get_by_id(self.user_code_id) return user_code.service_func_name else: @@ -195,6 +198,16 @@ def from_api_call(cls, api_call: SyftAPICall) -> Action: ) return action + @classmethod + def from_api_endpoint_execution(cls: type[Self]) -> Action: + return cls( + args=[], + kwargs={}, + result_id=LineageID(), + action_type=ActionType.TWINAPI, + user_code_id=None, + ) + def __repr__(self) -> str: def repr_uid(_id: LineageID) -> str: return f"{str(_id)[:3]}..{str(_id)[-1]}" @@ -211,6 +224,7 @@ def repr_uid(_id: LineageID) -> str: ) +@serializable(canonical_name="ActionObjectPointer", version=1) class ActionObjectPointer: pass @@ -220,6 +234,8 @@ class ActionObjectPointer: HOOK_ON_POINTERS = "ON_POINTERS" passthrough_attrs = [ + "id", + "refresh_object", "__dict__", # python "__class__", # python "__repr_name__", # python @@ -231,11 +247,16 @@ class ActionObjectPointer: "__fields_set__", # pydantic "__repr_str__", # pydantic "__repr_args__", # pydantic + "__pydantic_fields__", # pydantic "__post_init__", # syft + "_get_api", # syft "__validate_private_attrs__", # syft "id", # syft - "to_mongo", # syft 🟡 TODO 23: Add composeable / inheritable object passthrough attrs + "created_date", # syft + "updated_date", # syft + "deleted_date", # syft "__attr_searchable__", # syft + "__attr_unique__", # syft "__canonical_name__", # syft "__version__", # syft "__args__", # pydantic @@ -250,11 +271,12 @@ class ActionObjectPointer: "_save_to_blob_storage_", # syft "syft_action_data", # syft "syft_resolved", # syft - "syft_action_data_node_id", - "node_uid", + "syft_action_data_server_id", + "server_uid", "migrate_to", # syft "to_dict", # syft "dict", # syft + "has_storage_permission", # syft "_iter", # pydantic "__exclude_fields__", # pydantic "__include_fields__", # pydantic @@ -298,10 +320,21 @@ class ActionObjectPointer: "__private_sync_attr_mocks__", # syft "__exclude_sync_diff_attrs__", # syft "__repr_attrs__", # syft + "get_sync_dependencies", + "_data_repr", + "syft_eq", # syft + "__table_coll_widths__", + "_clear_cache", + "_set_reprs", + "get_api", + "get_api_wrapped", ] dont_wrap_output_attrs = [ + "id", + "refresh_object", "__repr__", "__str__", + "__repr_attrs__", "_repr_html_", "_repr_markdown_", "_repr_latex_", @@ -311,14 +344,24 @@ class ActionObjectPointer: "__bool__", "__len__", "syft_resolved", # syft - "node_uid", - "syft_action_data_node_id", + "server_uid", + "syft_action_data_server_id", "__sha256__", "__hash_exclude_attrs__", "__exclude_sync_diff_attrs__", # syft - "__repr_attrs__", + "__repr_attrs__", # syft + "get_sync_dependencies", # syft + "syft_eq", # syft + "__table_coll_widths__", + "_clear_cache", + "_set_reprs", + "get_api", + "get_api_wrapped", ] dont_make_side_effects = [ + "refresh_object", + "id", + "__repr_attrs__", "_repr_html_", "_repr_markdown_", "_repr_latex_", @@ -328,22 +371,44 @@ class ActionObjectPointer: "__len__", "shape", "syft_resolved", # syft - "node_uid", - "syft_action_data_node_id", + "server_uid", + "syft_action_data_server_id", "__sha256__", "__hash_exclude_attrs__", "__exclude_sync_diff_attrs__", # syft "__repr_attrs__", + "get_sync_dependencies", + "syft_eq", # syft + "__table_coll_widths__", + "_clear_cache", + "_set_reprs", + "get_api", + "get_api_wrapped", ] action_data_empty_must_run = [ "__repr__", "__str__", ] +methods_to_check_in_cache = [ + "_ipython_display_", + "_repr_mimebundle_", + "_repr_latex_", + "_repr_javascript_", + "_repr_html_", + "_repr_jpeg_", + "_repr_png_", + "_repr_svg_", + "_repr_pretty_", + "_repr_pdf_", + "_repr_json_", + "_repr_markdown_", +] + class PreHookContext(SyftBaseObject): __canonical_name__ = "PreHookContext" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 """Hook context @@ -352,8 +417,8 @@ class PreHookContext(SyftBaseObject): The ActionObject to use for the action op_name: str The method name to use for the action - node_uid: Optional[UID] - Optional Syft node UID + server_uid: Optional[UID] + Optional Syft server UID result_id: Optional[Union[UID, LineageID]] Optional result Syft UID action: Optional[Action] @@ -362,16 +427,17 @@ class PreHookContext(SyftBaseObject): obj: Any = None op_name: str - node_uid: UID | None = None + server_uid: UID | None = None result_id: UID | LineageID | None = None result_twin_type: TwinMode | None = None action: Action | None = None action_type: ActionType | None = None +@as_result(SyftException) def make_action_side_effect( context: PreHookContext, *args: Any, **kwargs: Any -) -> Result[Ok[tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]], Err[str]]: +) -> tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]: """Create a new action from context_op_name, and add it to the PreHookContext Parameters: @@ -385,8 +451,6 @@ def make_action_side_effect( - Ok[[Tuple[PreHookContext, Tuple[Any, ...], Dict[str, Any]]] on success - Err[str] on failure """ - # relative - try: action = context.obj.syft_make_action_with_self( op=context.op_name, @@ -395,11 +459,10 @@ def make_action_side_effect( action_type=context.action_type, ) context.action = action - except Exception: - print(f"make_action_side_effect failed with {traceback.format_exc()}") - return Err(f"make_action_side_effect failed with {traceback.format_exc()}") + except Exception as e: + raise SyftException(public_message=f"make_action_side_effect failed {e}") from e - return Ok((context, args, kwargs)) + return context, args, kwargs class TraceResultRegistry: @@ -438,98 +501,77 @@ class TraceResult(SyftBaseModel): is_tracing: bool = False +@as_result(SyftException) def trace_action_side_effect( context: PreHookContext, *args: Any, **kwargs: Any -) -> Result[Ok[tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]], Err[str]]: +) -> tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]: action = context.action if action is not None and TraceResultRegistry.current_thread_is_tracing(): trace_result = TraceResultRegistry.get_trace_result_for_thread() trace_result.result += [action] # type: ignore - return Ok((context, args, kwargs)) + return context, args, kwargs def convert_to_pointers( api: SyftAPI, - node_uid: UID | None = None, + server_uid: UID | None = None, args: list | None = None, kwargs: dict | None = None, ) -> tuple[list, dict]: # relative from ..dataset.dataset import Asset - arg_list = [] - kwarg_dict = {} - if args is not None: - for arg in args: - if ( - not isinstance(arg, ActionObject | Asset | UID) - and api.signing_key is not None # type: ignore[unreachable] - ): - arg = ActionObject.from_obj( # type: ignore[unreachable] - syft_action_data=arg, - syft_client_verify_key=api.signing_key.verify_key, - syft_node_location=api.node_uid, - ) - arg.syft_node_uid = node_uid - r = arg._save_to_blob_storage() - if isinstance(r, SyftError): - print(r.message) - arg = api.services.action.set(arg) - arg_list.append(arg) - - if kwargs is not None: - for k, arg in kwargs.items(): - if ( - not isinstance(arg, ActionObject | Asset | UID) - and api.signing_key is not None # type: ignore[unreachable] - ): - arg = ActionObject.from_obj( # type: ignore[unreachable] - syft_action_data=arg, - syft_client_verify_key=api.signing_key.verify_key, - syft_node_location=api.node_uid, - ) - arg.syft_node_uid = node_uid - r = arg._save_to_blob_storage() - if isinstance(r, SyftError): - print(r.message) - arg = api.services.action.set(arg) + def process_arg(arg: ActionObject | Asset | UID | Any) -> Any: + if ( + not isinstance(arg, ActionObject | Asset | UID) + and api.signing_key is not None # type: ignore[unreachable] + ): + arg = ActionObject.from_obj( # type: ignore[unreachable] + syft_action_data=arg, + syft_client_verify_key=api.signing_key.verify_key, + syft_server_location=api.server_uid, + ) + arg.syft_server_uid = server_uid + r = arg._save_to_blob_storage().unwrap() + if isinstance(r, SyftWarning): + logger.debug(r.message) + arg = api.services.action.set(arg) + return arg - kwarg_dict[k] = arg + arg_list = [process_arg(arg) for arg in args] if args else [] + kwarg_dict = {k: process_arg(v) for k, v in kwargs.items()} if kwargs else {} return arg_list, kwarg_dict +@as_result(SyftException) def send_action_side_effect( context: PreHookContext, *args: Any, **kwargs: Any -) -> Result[Ok[tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]], Err[str]]: - """Create a new action from the context.op_name, and execute it on the remote node.""" +) -> tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]: + """Create a new action from the context.op_name, and execute it on the remote server.""" try: if context.action is None: - result = make_action_side_effect(context, *args, **kwargs) - if result.is_err(): - raise RuntimeError(result.err()) - - context, _, _ = result.ok() + context, _, _ = make_action_side_effect(context, *args, **kwargs).unwrap() action_result = context.obj.syft_execute_action(context.action, sync=True) if not isinstance(action_result, ActionObject): - raise RuntimeError(f"Got back unexpected response : {action_result}") + raise SyftException( + public_message=f"Got back unexpected response : {action_result}" + ) else: - context.node_uid = action_result.syft_node_uid + context.server_uid = action_result.syft_server_uid context.result_id = action_result.id context.result_twin_type = action_result.syft_twin_type except Exception as e: - return Err( - f"send_action_side_effect failed with {e}\n {traceback.format_exc()}" - ) - return Ok((context, args, kwargs)) + # print(e) + raise SyftException(public_message=f"send_action_side_effect failed {e}") from e + return context, args, kwargs -def propagate_node_uid( - context: PreHookContext, op: str, result: Any -) -> Result[Ok[Any], Err[str]]: - """Patch the result to include the syft_node_uid +@as_result(SyftException) +def propagate_server_uid(context: PreHookContext, op: str, result: Any) -> Any: + """Patch the result to include the syft_server_uid Parameters: context: PreHookContext @@ -543,24 +585,28 @@ def propagate_node_uid( - Err[str] on failure """ if context.op_name in dont_make_side_effects or not hasattr( - context.obj, "syft_node_uid" + context.obj, "syft_server_uid" ): - return Ok(result) + return result try: - syft_node_uid = getattr(context.obj, "syft_node_uid", None) - if syft_node_uid is None: - raise RuntimeError("Can't proagate node_uid because parent doesnt have one") + syft_server_uid = getattr(context.obj, "syft_server_uid", None) + if syft_server_uid is None: + raise SyftException( + public_message="Can't proagate server_uid because parent doesnt have one" + ) if op not in context.obj._syft_dont_wrap_attrs(): - if hasattr(result, "syft_node_uid"): - result.syft_node_uid = syft_node_uid + if hasattr(result, "syft_server_uid"): + result.syft_server_uid = syft_server_uid else: - raise RuntimeError("dont propogate node_uid because output isnt wrapped") + raise SyftException( + public_message="dont propogate server_uid because output isnt wrapped" + ) except Exception: - return Err(f"propagate_node_uid failed with {traceback.format_exc()}") + raise SyftException(public_message="propagate_server_uid failed") - return Ok(result) + return result def debox_args_and_kwargs(args: Any, kwargs: Any) -> tuple[Any, Any]: @@ -582,6 +628,7 @@ def debox_args_and_kwargs(args: Any, kwargs: Any) -> tuple[Any, Any]: BASE_PASSTHROUGH_ATTRS: list[str] = [ + "id", "is_mock", "is_real", "is_twin", @@ -605,8 +652,8 @@ def debox_args_and_kwargs(args: Any, kwargs: Any) -> tuple[Any, Any]: "reload_cache", "syft_resolved", "refresh_object", - "syft_action_data_node_id", - "node_uid", + "syft_action_data_server_id", + "server_uid", "__sha256__", "__hash_exclude_attrs__", "__hash__", @@ -614,15 +661,31 @@ def debox_args_and_kwargs(args: Any, kwargs: Any) -> tuple[Any, Any]: "_has_private_sync_attrs", "__exclude_sync_diff_attrs__", "__repr_attrs__", + "get_sync_dependencies", + "_data_repr", + "syft_eq", + "__table_coll_widths__", + "_clear_cache", + "_set_reprs", + "get_api", + "get_api_wrapped", ] +def truncate_str(string: str, length: int = 100) -> str: + stringlen = len(string) + if stringlen > length: + n_hidden = stringlen - length + string = f"{string[:length]}... ({n_hidden} characters hidden)" + return string + + @serializable(without=["syft_pre_hooks__", "syft_post_hooks__"]) class ActionObject(SyncableSyftObject): """Action object for remote execution.""" __canonical_name__ = "ActionObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 __private_sync_attr_mocks__: ClassVar[dict[str, Any]] = { "syft_action_data_cache": None, "syft_blob_storage_entry_id": None, @@ -640,7 +703,7 @@ class ActionObject(SyncableSyftObject): syft_parent_kwargs: Any | None = None syft_history_hash: int | None = None syft_internal_type: ClassVar[type[Any]] - syft_node_uid: UID | None = None + syft_server_uid: UID | None = None syft_pre_hooks__: dict[str, list] = {} syft_post_hooks__: dict[str, list] = {} syft_twin_type: TwinMode = TwinMode.NONE @@ -652,7 +715,8 @@ class ActionObject(SyncableSyftObject): syft_resolve_data: bool | None = None syft_created_at: DateTime | None = None syft_resolved: bool = True - syft_action_data_node_id: UID | None = None + syft_action_data_server_id: UID | None = None + syft_action_saved_to_blob_store: bool = True # syft_dont_wrap_attrs = ["shape"] def syft_get_diffs(self, ext_obj: Any) -> list[AttrDiff]: @@ -667,36 +731,39 @@ def syft_get_diffs(self, ext_obj: Any) -> list[AttrDiff]: low_data = ext_obj.syft_action_data high_data = self.syft_action_data - if low_data != high_data: + + try: + cmp = low_data != high_data + if isinstance(cmp, Iterable): + cmp = all(cmp) + except Exception: + cmp = False + + if cmp: diff_attr = AttrDiff( attr_name="syft_action_data", low_attr=low_data, high_attr=high_data ) diff_attrs.append(diff_attr) return diff_attrs - def _set_obj_location_(self, node_uid: UID, credentials: SyftVerifyKey) -> None: - self.syft_node_location = node_uid + def _set_obj_location_(self, server_uid: UID, credentials: SyftVerifyKey) -> None: + self.syft_server_location = server_uid self.syft_client_verify_key = credentials - if self.syft_action_data_node_id is None: - self.syft_action_data_node_id = node_uid + if self.syft_action_data_server_id is None: + self.syft_action_data_server_id = server_uid @property def syft_action_data(self) -> Any: - if ( - self.syft_blob_storage_entry_id - and self.syft_created_at - and not TraceResultRegistry.current_thread_is_tracing() - ): + if self.syft_blob_storage_entry_id and self.syft_created_at: self.reload_cache() - return self.syft_action_data_cache - def reload_cache(self) -> SyftError | None: + def reload_cache(self) -> None: # If ActionDataEmpty then try to fetch it from store. if isinstance(self.syft_action_data_cache, ActionDataEmpty): blob_storage_read_method = from_api_or_context( func_or_path="blob_storage.read", - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) @@ -704,133 +771,153 @@ def reload_cache(self) -> SyftError | None: blob_retrieval_object = blob_storage_read_method( uid=self.syft_blob_storage_entry_id ) - if isinstance(blob_retrieval_object, SyftError): - print( - "Could not fetch actionobject data\n", - type(blob_retrieval_object), - ) - return blob_retrieval_object + # relative from ...store.blob_storage import BlobRetrieval - if isinstance(blob_retrieval_object, SyftError): - return blob_retrieval_object - elif isinstance(blob_retrieval_object, BlobRetrieval): + if isinstance(blob_retrieval_object, BlobRetrieval): # TODO: This change is temporary to for gateway to be compatible with the new blob storage self.syft_action_data_cache = blob_retrieval_object.read() - self.syft_action_data_type = type(self.syft_action_data) + self.syft_action_data_type = type(self.syft_action_data_cache) return None else: # In the case of gateway, we directly receive the actual object - # TODO: The ideal solution would be to stream the object from the domain through the gateway + # TODO: The ideal solution would be to stream the object from the datasite through the gateway # Currently , we are just passing the object as it is, which would be fixed later. self.syft_action_data_cache = blob_retrieval_object - self.syft_action_data_type = type(self.syft_action_data) + self.syft_action_data_type = type(self.syft_action_data_cache) return None else: - print("cannot reload cache") - return None + raise SyftException( + public_meesage="Could not reload cache, could not get read method" + ) return None - def _save_to_blob_storage_(self, data: Any) -> SyftError | None: + def _save_to_blob_storage_(self, data: Any) -> SyftWarning | None: # relative from ...types.blob_storage import BlobFile from ...types.blob_storage import CreateBlobStorageEntry if not isinstance(data, ActionDataEmpty): - if isinstance(data, BlobFile) and not data.uploaded: - api = APIRegistry.api_for( - self.syft_node_location, self.syft_client_verify_key - ) - data.upload_to_blobstorage_from_api(api) + if isinstance(data, BlobFile): + if not data.uploaded: + api = self.get_api() + data._upload_to_blobstorage_from_api(api) else: - storage_entry = CreateBlobStorageEntry.from_obj(data) + get_metadata = from_api_or_context( + func_or_path="metadata.get_metadata", + syft_server_location=self.syft_server_location, + syft_client_verify_key=self.syft_client_verify_key, + ) + if ( + get_metadata is not None + and not can_upload_to_blob_storage(data, get_metadata()).unwrap() + ): + self.syft_action_saved_to_blob_store = False + return SyftWarning( + message=( + f"The action object {self.id} was not saved to" + f" the blob store but to memory cache since it is small." + ) + ) + serialized = serialize(data, to_bytes=True) + size = sys.getsizeof(serialized) + storage_entry = CreateBlobStorageEntry.from_obj(data, file_size=size) + + if not TraceResultRegistry.current_thread_is_tracing(): + self.syft_action_data_cache = self.as_empty_data() if self.syft_blob_storage_entry_id is not None: # TODO: check if it already exists storage_entry.id = self.syft_blob_storage_entry_id allocate_method = from_api_or_context( func_or_path="blob_storage.allocate", - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) if allocate_method is not None: blob_deposit_object = allocate_method(storage_entry) - - if isinstance(blob_deposit_object, SyftError): - return blob_deposit_object - - result = blob_deposit_object.write( - BytesIO(serialize(data, to_bytes=True)) - ) - if isinstance(result, SyftError): - return result + blob_deposit_object.write(BytesIO(serialized)).unwrap() self.syft_blob_storage_entry_id = ( blob_deposit_object.blob_storage_entry_id ) else: - print("cannot save to blob storage") + logger.warn("cannot save to blob storage. allocate_method=None") self.syft_action_data_type = type(data) - - if inspect.isclass(data): - self.syft_action_data_repr_ = repr_cls(data) - else: - self.syft_action_data_repr_ = ( - data._repr_markdown_() - if hasattr(data, "_repr_markdown_") - else data.__repr__() - ) - self.syft_action_data_str_ = str(data) + self._set_reprs(data) self.syft_has_bool_attr = hasattr(data, "__bool__") else: - debug("skipping writing action object to store, passed data was empty.") + logger.debug( + "skipping writing action object to store, passed data was empty." + ) self.syft_action_data_cache = data return None - def _save_to_blob_storage(self) -> SyftError | None: + @as_result(SyftException) + def _save_to_blob_storage( + self, allow_empty: bool = False + ) -> SyftSuccess | SyftWarning: data = self.syft_action_data - if isinstance(data, SyftError): - return data + if isinstance(data, ActionDataEmpty): - return SyftError(message=f"cannot store empty object {self.id}") + raise SyftException( + public_message=f"cannot store empty object {self.id} to the blob storage" + ) + result = self._save_to_blob_storage_(data) - if isinstance(result, SyftError): + if isinstance(result, SyftWarning): return result if not TraceResultRegistry.current_thread_is_tracing(): - self.syft_action_data_cache = self.as_empty_data() - return None + self._clear_cache() + return SyftSuccess(message=f"Saved action object {self.id} to the blob store") + + def _clear_cache(self) -> None: + self.syft_action_data_cache = self.as_empty_data() + + def _set_reprs(self, data: any) -> None: + if inspect.isclass(data): + self.syft_action_data_repr_ = truncate_str(repr_cls(data)) + else: + self.syft_action_data_repr_ = truncate_str( + data._repr_markdown_() + if hasattr(data, "_repr_markdown_") + else data.__repr__() + ) + self.syft_action_data_str_ = truncate_str(str(data)) @property def is_pointer(self) -> bool: - return self.syft_node_uid is not None + return self.syft_server_uid is not None @property def syft_lineage_id(self) -> LineageID: - """Compute the LineageID of the ActionObject, using the `id` and the `syft_history_hash` memebers""" + """Compute the LineageID of the ActionObject, using the `id` and the `syft_history_hash` members""" return LineageID(self.id, self.syft_history_hash) model_config = ConfigDict(validate_assignment=True) @model_validator(mode="before") @classmethod - def __check_action_data(cls, values: dict) -> dict: - v = values.get("syft_action_data_cache") - if values.get("syft_action_data_type", None) is None: - values["syft_action_data_type"] = type(v) - if not isinstance(v, ActionDataEmpty): - if inspect.isclass(v): - values["syft_action_data_repr_"] = repr_cls(v) - else: - values["syft_action_data_repr_"] = ( - v._repr_markdown_() - if v is not None and hasattr(v, "_repr_markdown_") - else v.__repr__() - ) - values["syft_action_data_str_"] = str(v) - values["syft_has_bool_attr"] = hasattr(v, "__bool__") + def __check_action_data(cls, values: Any) -> dict: + if isinstance(values, dict): + v = values.get("syft_action_data_cache") + if values.get("syft_action_data_type", None) is None: + values["syft_action_data_type"] = type(v) + if not isinstance(v, ActionDataEmpty): + if inspect.isclass(v): + values["syft_action_data_repr_"] = truncate_str(repr_cls(v)) + else: + values["syft_action_data_repr_"] = truncate_str( + v._repr_markdown_() + if v is not None and hasattr(v, "_repr_markdown_") + else v.__repr__() + ) + values["syft_action_data_str_"] = truncate_str(str(v)) + values["syft_has_bool_attr"] = hasattr(v, "__bool__") + return values @property @@ -845,9 +932,9 @@ def is_real(self) -> bool: def is_twin(self) -> bool: return self.syft_twin_type != TwinMode.NONE - def syft_point_to(self, node_uid: UID) -> ActionObject: - """Set the syft_node_uid, used in the post hooks""" - self.syft_node_uid = node_uid + def syft_point_to(self, server_uid: UID) -> ActionObject: + """Set the syft_server_uid, used in the post hooks""" + self.syft_server_uid = server_uid return self def syft_get_property(self, obj: Any, method: str) -> Any: @@ -862,6 +949,11 @@ def syft_is_property(self, obj: Any, method: str) -> bool: klass_method ) + def syft_eq(self, ext_obj: Self | None) -> bool: + if ext_obj is None: + return False + return self.id.id == ext_obj.id.id + def syft_execute_action( self, action: Action, sync: bool = True ) -> ActionObjectPointer: @@ -876,36 +968,42 @@ def syft_execute_action( Returns: ActionObjectPointer """ - if self.syft_node_uid is None: - raise SyftException("Pointers can't execute without a node_uid.") + if self.syft_server_uid is None: + raise SyftException( + public_message="Pointers can't execute without a server_uid." + ) # relative - from ...client.api import APIRegistry from ...client.api import SyftAPICall - api = APIRegistry.api_for( - node_uid=self.syft_node_uid, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - raise ValueError(f"api is None. You must login to {self.syft_node_uid}") + api = self.get_api() + # if api is None: + # raise ValueError(f"api is None. You must login to {self.syft_server_uid}") kwargs = {"action": action} api_call = SyftAPICall( - node_uid=self.syft_node_uid, path="action.execute", args=[], kwargs=kwargs + server_uid=self.syft_server_uid, + path="action.execute", + args=[], + kwargs=kwargs, ) - return api.make_call(api_call) + res = api.make_call(api_call) + if isinstance(res, SyftSuccess): + return res.value # type: ignore[return-value] + return res # type: ignore[return-value] - def request(self, client: SyftClient) -> Any | SyftError: + def request(self, client: SyftClient) -> Any: # relative from ..request.request import ActionStoreChange from ..request.request import SubmitRequest - action_object_link = LinkedObject.from_obj(self, node_uid=self.syft_node_uid) + action_object_link = LinkedObject.from_obj( + self, server_uid=self.syft_server_uid + ) permission_change = ActionStoreChange( linked_obj=action_object_link, apply_permission_type=ActionPermission.READ ) if client.credentials is None: - return SyftError(f"{client} has no signing key") + raise SyftException(public_message=f"{client} has no signing key") submit_request = SubmitRequest( changes=[permission_change], requesting_user_verify_key=client.credentials.verify_key, @@ -913,9 +1011,9 @@ def request(self, client: SyftClient) -> Any | SyftError: return client.api.services.request.submit(submit_request) def _syft_try_to_save_to_store(self, obj: SyftObject) -> None: - if self.syft_node_uid is None or self.syft_client_verify_key is None: + if self.syft_server_uid is None or self.syft_client_verify_key is None: return - elif obj.syft_node_uid is not None: + elif obj.syft_server_uid is not None: return if obj.syft_blob_storage_entry_id is not None: @@ -929,10 +1027,9 @@ def _syft_try_to_save_to_store(self, obj: SyftObject) -> None: # 3) it shouldnt send in the first place as it already exists # relative - from ...client.api import APIRegistry - if obj.syft_node_location is None: - obj.syft_node_location = obj.syft_node_uid + if obj.syft_server_location is None: + obj.syft_server_location = obj.syft_server_uid action = Action( path="", @@ -949,21 +1046,19 @@ def _syft_try_to_save_to_store(self, obj: SyftObject) -> None: trace_result = TraceResultRegistry.get_trace_result_for_thread() trace_result.result += [action] # type: ignore - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - print( - f"failed saving {obj} to blob storage, api is None. You must login to {self.syft_node_location}" + api = self.get_api_wrapped().unwrap( + public_message=( + f"Failed saving {obj} to blob storage, api is None." + f" You must login to {self.syft_server_location}." ) - return - else: - obj._set_obj_location_(api.node_uid, api.signing_key.verify_key) # type: ignore[union-attr] + ) + + obj._set_obj_location_(api.server_uid, api.signing_key.verify_key) # type: ignore[union-attr] - res = api.services.action.execute(action) - if isinstance(res, SyftError): - print(f"Failed to to store (arg) {obj} to store, {res}") + try: + api.services.action.execute(action) + except Exception as e: + print(f"Failed to to store (arg) {obj} to store, {e}") def _syft_prepare_obj_uid(self, obj: Any) -> LineageID: # We got the UID @@ -983,7 +1078,7 @@ def _syft_prepare_obj_uid(self, obj: Any) -> LineageID: act_obj = ActionObject.from_obj( obj, syft_client_verify_key=self.syft_client_verify_key, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, ) self._syft_try_to_save_to_store(act_obj) @@ -1028,14 +1123,9 @@ def syft_make_action( if kwargs is None: kwargs = {} - arg_ids = [] - kwarg_ids = {} - - for obj in args: - arg_ids.append(self._syft_prepare_obj_uid(obj)) + arg_ids = [self._syft_prepare_obj_uid(obj) for obj in args] - for k, obj in kwargs.items(): - kwarg_ids[k] = self._syft_prepare_obj_uid(obj) + kwarg_ids = {k: self._syft_prepare_obj_uid(obj) for k, obj in kwargs.items()} action = Action( path=path, @@ -1080,6 +1170,15 @@ def syft_make_action_with_self( action_type=action_type, ) + def get_sync_dependencies( + self, context: AuthedServiceContext, **kwargs: dict + ) -> list[UID]: + try: + job = context.server.services.job.get_by_result_id(context, self.id.id) + return [job.id] + except SyftException: + return [] + def syft_get_path(self) -> str: """Get the type path of the underlying object""" if ( @@ -1114,14 +1213,28 @@ def wrapper( return wrapper def send(self, client: SyftClient) -> Any: - return self._send(client, add_storage_permission=True) - - def _send(self, client: SyftClient, add_storage_permission: bool = True) -> Self: - """Send the object to a Syft Client""" - self._set_obj_location_(client.id, client.verify_key) - self._save_to_blob_storage() - res = client.api.services.action.set( - self, add_storage_permission=add_storage_permission + return self._send( + server_uid=client.id, + verify_key=client.verify_key, + add_storage_permission=True, + ) + + def _send( + self, + server_uid: UID, + verify_key: SyftVerifyKey, + add_storage_permission: bool = True, + ) -> Self: + self._set_obj_location_(server_uid, verify_key) + + blob_storage_res = self._save_to_blob_storage().unwrap() + api = self._get_api() + + if isinstance(blob_storage_res, SyftWarning): + logger.debug(blob_storage_res.message) + res = api.services.action.set( + self, + add_storage_permission=add_storage_permission, ) if isinstance(res, ActionObject): self.syft_created_at = res.syft_created_at @@ -1130,26 +1243,20 @@ def _send(self, client: SyftClient, add_storage_permission: bool = True) -> Self def get_from(self, client: SyftClient) -> Any: """Get the object from a Syft Client""" res = client.api.services.action.get(self.id) - if not isinstance(res, ActionObject): - return SyftError(message=f"{res}") - else: - return res.syft_action_data + return res.syft_action_data - def refresh_object(self) -> ActionObject: + def refresh_object(self, resolve_nested: bool = True) -> ActionObject: # relative - from ...client.api import APIRegistry - - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, + return self.get_api().services.action.get( + self.id, resolve_nested=resolve_nested ) - if api is None: - return SyftError( - message=f"api is None. You must login to {self.syft_node_location}" - ) - res = api.services.action.get(self.id) - return res + def has_storage_permission(self) -> bool: + try: + api = self.get_api() + return api.services.action.has_storage_permission(self.id.id) + except Exception: + return False def get(self, block: bool = False) -> Any: """Get the object from a Syft Client""" @@ -1159,13 +1266,18 @@ def get(self, block: bool = False) -> Any: self.wait() res = self.refresh_object() - if not isinstance(res, ActionObject): - return SyftError(message=f"{res}") # type: ignore + raise SyftException(public_message=f"{res}") # type: ignore + elif issubclass(res.syft_action_data_type, Err): + raise SyftException(public_message=f"{res.syft_action_data.err()}") else: + if not self.has_storage_permission(): + prompt_warning_message( + message="This is a placeholder object, the real data lives on a different server and is not synced." + ) nested_res = res.syft_action_data if isinstance(nested_res, ActionObject): - nested_res.syft_node_location = res.syft_node_location + nested_res.syft_server_location = res.syft_server_location nested_res.syft_client_verify_key = res.syft_client_verify_key return nested_res @@ -1201,7 +1313,7 @@ def from_path( id: UID | None = None, syft_lineage_id: LineageID | None = None, syft_client_verify_key: SyftVerifyKey | None = None, - syft_node_location: UID | None = None, + syft_server_location: UID | None = None, ) -> ActionObject: """Create an Action Object from a file.""" # relative @@ -1223,8 +1335,8 @@ def from_path( if syft_client_verify_key is not None: action_object.syft_client_verify_key = syft_client_verify_key - if syft_node_location is not None: - action_object.syft_node_location = syft_node_location + if syft_server_location is not None: + action_object.syft_server_location = syft_server_location if syft_lineage_id is not None: action_object.id = syft_lineage_id.id @@ -1240,9 +1352,9 @@ def from_obj( id: UID | None = None, syft_lineage_id: LineageID | None = None, syft_client_verify_key: SyftVerifyKey | None = None, - syft_node_location: UID | None = None, + syft_server_location: UID | None = None, syft_resolved: bool | None = True, - data_node_id: UID | None = None, + data_server_id: UID | None = None, syft_blob_storage_entry_id: UID | None = None, ) -> ActionObject: """Create an ActionObject from an existing object. @@ -1258,10 +1370,22 @@ def from_obj( if id is not None and syft_lineage_id is not None and id != syft_lineage_id.id: raise ValueError("UID and LineageID should match") + # check if the object's type is supported + try: + SyftObjectRegistry.get_canonical_name_version(syft_action_data) + except Exception: + obj_type = type(syft_action_data) + raise SyftException( + public_message=( + f"Error when creating action object for {syft_action_data}.\n" + f"Unsupported data type: '{obj_type.__module__}.{obj_type.__name__}'" + ) + ) + action_type = action_type_for_object(syft_action_data) action_object = action_type(syft_action_data_cache=syft_action_data) action_object.syft_blob_storage_entry_id = syft_blob_storage_entry_id - action_object.syft_action_data_node_id = data_node_id + action_object.syft_action_data_server_id = data_server_id action_object.syft_resolved = syft_resolved if id is not None: @@ -1270,8 +1394,8 @@ def from_obj( if syft_client_verify_key is not None: action_object.syft_client_verify_key = syft_client_verify_key - if syft_node_location is not None: - action_object.syft_node_location = syft_node_location + if syft_server_location is not None: + action_object.syft_server_location = syft_server_location if syft_lineage_id is not None: action_object.id = syft_lineage_id.id @@ -1297,25 +1421,26 @@ def as_empty_data(self) -> ActionDataEmpty: def wait(self, timeout: int | None = None) -> ActionObject: # relative - from ...client.api import APIRegistry - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) + api = self.get_api() if isinstance(self.id, LineageID): obj_id = self.id.id else: obj_id = self.id counter = 0 - while api and not api.services.action.is_resolved(obj_id): - time.sleep(1) - if timeout is not None: - counter += 1 - if counter > timeout: - return SyftError(message="Reached Timeout!") - + while api: + obj_resolved: bool | str = api.services.action.is_resolved(obj_id) + if isinstance(obj_resolved, str): + raise SyftException(public_message=obj_resolved) + if obj_resolved: + break + if not obj_resolved: + time.sleep(1) + if timeout is not None: + counter += 1 + if counter > timeout: + raise SyftException(public_message="Reached Timeout!") return self @staticmethod @@ -1332,13 +1457,16 @@ def link( @staticmethod def obj_not_ready( - id: UID, + id: UID, syft_server_location: UID, syft_client_verify_key: SyftVerifyKey ) -> ActionObject: inner_obj = ObjectNotReady(obj_id=id) res = ActionObject.from_obj( id=id, syft_action_data=inner_obj, + syft_resolved=False, + syft_server_location=syft_server_location, + syft_client_verify_key=syft_client_verify_key, ) return res @@ -1350,7 +1478,7 @@ def empty( id: UID | None = None, syft_lineage_id: LineageID | None = None, syft_resolved: bool | None = True, - data_node_id: UID | None = None, + data_server_id: UID | None = None, syft_blob_storage_entry_id: UID | None = None, ) -> Self: """Create an ActionObject from a type, using a ActionDataEmpty object @@ -1373,7 +1501,7 @@ def empty( syft_lineage_id=syft_lineage_id, syft_action_data=empty, syft_resolved=syft_resolved, - data_node_id=data_node_id, + data_server_id=data_server_id, syft_blob_storage_entry_id=syft_blob_storage_entry_id, ) res.__dict__["syft_internal_type"] = syft_internal_type @@ -1384,36 +1512,67 @@ def __post_init__(self) -> None: if HOOK_ALWAYS not in self.syft_pre_hooks__: self.syft_pre_hooks__[HOOK_ALWAYS] = [] - if HOOK_ON_POINTERS not in self.syft_post_hooks__: + if HOOK_ON_POINTERS not in self.syft_pre_hooks__: self.syft_pre_hooks__[HOOK_ON_POINTERS] = [] - # this should be a list as orders matters - for side_effect in [make_action_side_effect]: - if side_effect not in self.syft_pre_hooks__[HOOK_ALWAYS]: - self.syft_pre_hooks__[HOOK_ALWAYS].append(side_effect) - - for side_effect in [send_action_side_effect]: - if side_effect not in self.syft_pre_hooks__[HOOK_ON_POINTERS]: - self.syft_pre_hooks__[HOOK_ON_POINTERS].append(side_effect) - - if trace_action_side_effect not in self.syft_pre_hooks__[HOOK_ALWAYS]: - self.syft_pre_hooks__[HOOK_ALWAYS].append(trace_action_side_effect) - if HOOK_ALWAYS not in self.syft_post_hooks__: self.syft_post_hooks__[HOOK_ALWAYS] = [] if HOOK_ON_POINTERS not in self.syft_post_hooks__: self.syft_post_hooks__[HOOK_ON_POINTERS] = [] - for side_effect in [propagate_node_uid]: - if side_effect not in self.syft_post_hooks__[HOOK_ALWAYS]: - self.syft_post_hooks__[HOOK_ALWAYS].append(side_effect) + api = self.get_api_wrapped() + + eager_execution_enabled = ( + api.is_ok() + and api.unwrap().metadata is not None + and api.unwrap().metadata.eager_execution_enabled # type: ignore + ) + + self._syft_add_pre_hooks__(eager_execution_enabled) + self._syft_add_post_hooks__(eager_execution_enabled) if isinstance(self.syft_action_data_type, ActionObject): raise Exception("Nested ActionObjects", self.syft_action_data_repr_) self.syft_history_hash = hash(self.id) + def _syft_add_pre_hooks__(self, eager_execution: bool) -> None: + """ + Add pre-hooks + + Args: + eager_execution: bool: If eager execution is enabled, hooks for + tracing and executing the action on remote are added. + """ + + # this should be a list as orders matters + for side_effect in [make_action_side_effect]: + if side_effect not in self.syft_pre_hooks__[HOOK_ALWAYS]: + self.syft_pre_hooks__[HOOK_ALWAYS].append(side_effect) + + if eager_execution: + for side_effect in [send_action_side_effect]: + if side_effect not in self.syft_pre_hooks__[HOOK_ON_POINTERS]: + self.syft_pre_hooks__[HOOK_ON_POINTERS].append(side_effect) + + if trace_action_side_effect not in self.syft_pre_hooks__[HOOK_ALWAYS]: + self.syft_pre_hooks__[HOOK_ALWAYS].append(trace_action_side_effect) + + def _syft_add_post_hooks__(self, eager_execution: bool) -> None: + """ + Add post-hooks + + Args: + eager_execution: bool: If eager execution is enabled, hooks for + tracing and executing the action on remote are added. + """ + if eager_execution: + # this should be a list as orders matters + for side_effect in [propagate_server_uid]: + if side_effect not in self.syft_post_hooks__[HOOK_ALWAYS]: + self.syft_post_hooks__[HOOK_ALWAYS].append(side_effect) + def _syft_run_pre_hooks__( self, context: PreHookContext, name: str, args: Any, kwargs: Any ) -> tuple[PreHookContext, tuple[Any, ...], dict[str, Any]]: @@ -1425,7 +1584,7 @@ def _syft_run_pre_hooks__( if result.is_ok(): context, result_args, result_kwargs = result.ok() else: - debug(f"Pre-hook failed with {result.err()}") + logger.debug(f"Pre-hook failed with {result.err()}") if name not in self._syft_dont_wrap_attrs(): if HOOK_ALWAYS in self.syft_pre_hooks__: for hook in self.syft_pre_hooks__[HOOK_ALWAYS]: @@ -1433,8 +1592,9 @@ def _syft_run_pre_hooks__( if result.is_ok(): context, result_args, result_kwargs = result.ok() else: - msg = result.err().replace("\\n", "\n") - debug(f"Pre-hook failed with {msg}") + msg = str(result.err()) + msg = msg.replace("\\n", "\n") + logger.debug(f"Pre-hook failed with {msg}") if self.is_pointer: if name not in self._syft_dont_wrap_attrs(): @@ -1444,8 +1604,9 @@ def _syft_run_pre_hooks__( if result.is_ok(): context, result_args, result_kwargs = result.ok() else: - msg = result.err().replace("\\n", "\n") - debug(f"Pre-hook failed with {msg}") + msg = str(result.err()) + msg = msg.replace("\\n", "\n") + logger.debug(f"Pre-hook failed with {msg}") return context, result_args, result_kwargs @@ -1460,7 +1621,7 @@ def _syft_run_post_hooks__( if result.is_ok(): new_result = result.ok() else: - debug(f"Post hook failed with {result.err()}") + logger.debug(f"Post hook failed with {result.err()}") if name not in self._syft_dont_wrap_attrs(): if HOOK_ALWAYS in self.syft_post_hooks__: @@ -1469,7 +1630,7 @@ def _syft_run_post_hooks__( if result.is_ok(): new_result = result.ok() else: - debug(f"Post hook failed with {result.err()}") + logger.debug(f"Post hook failed with {result.err()}") if self.is_pointer: if name not in self._syft_dont_wrap_attrs(): @@ -1479,7 +1640,7 @@ def _syft_run_post_hooks__( if result.is_ok(): new_result = result.ok() else: - debug(f"Post hook failed with {result.err()}") + logger.debug(f"Post hook failed with {result.err()}") return new_result @@ -1497,7 +1658,7 @@ def _syft_output_action_object( result = constructor( syft_twin_type=syft_twin_type, syft_action_data_cache=result, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) return result @@ -1514,8 +1675,6 @@ def _syft_get_attr_context(self, name: str) -> Any: """Find which instance - Syft ActionObject or the original object - has the requested attribute.""" defined_on_self = name in self.__dict__ or name in self.__private_attributes__ - debug(">> ", name, ", defined_on_self = ", defined_on_self) - # use the custom defined version context_self = self if not defined_on_self: @@ -1526,7 +1685,7 @@ def _syft_get_attr_context(self, name: str) -> Any: def _syft_attr_propagate_ids( self, context: PreHookContext, name: str, result: Any ) -> Any: - """Patch the results with the syft_history_hash, node_uid, and result_id.""" + """Patch the results with the syft_history_hash, server_uid, and result_id.""" if name in self._syft_dont_wrap_attrs(): return result @@ -1537,14 +1696,14 @@ def _syft_attr_propagate_ids( if context.action is not None: result.syft_history_hash = context.action.syft_history_hash - # Propagate Syft Node UID - result.syft_node_uid = context.node_uid + # Propagate Syft Server UID + result.syft_server_uid = context.server_uid - # Propogate Syft Node Location and Client Verify Key - result.syft_node_location = context.syft_node_location + # Propogate Syft Server Location and Client Verify Key + result.syft_server_location = context.syft_server_location result.syft_client_verify_key = context.syft_client_verify_key - # Propogate Syft blob storage entry id + # Propagate Syft blob storage entry id object_attrs = [ "syft_blob_storage_entry_id", "syft_action_data_repr_", @@ -1573,11 +1732,11 @@ def _syft_wrap_attribute_for_bool_on_nonbools(self, name: str) -> Any: "[_wrap_attribute_for_bool_on_nonbools] self.syft_action_data already implements the bool operator" ) - debug("[__getattribute__] Handling bool on nonbools") + logger.debug("[__getattribute__] Handling bool on nonbools") context = PreHookContext( obj=self, op_name=name, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) context, _, _ = self._syft_run_pre_hooks__(context, name, (), {}) @@ -1606,13 +1765,13 @@ def _syft_wrap_attribute_for_properties(self, name: str) -> Any: raise RuntimeError( "[_wrap_attribute_for_properties] Use this only on properties" ) - debug(f"[__getattribute__] Handling property {name} ") + logger.debug(f"[__getattribute__] Handling property {name}") context = PreHookContext( obj=self, op_name=name, action_type=ActionType.GETATTRIBUTE, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) context, _, _ = self._syft_run_pre_hooks__(context, name, (), {}) @@ -1630,7 +1789,7 @@ def _syft_wrap_attribute_for_methods(self, name: str) -> Any: def fake_func(*args: Any, **kwargs: Any) -> Any: return ActionDataEmpty(syft_internal_type=self.syft_internal_type) - debug(f"[__getattribute__] Handling method {name} ") + logger.debug(f"[__getattribute__] Handling method {name}") if ( issubclass(self.syft_action_data_type, ActionDataEmpty) and name not in action_data_empty_must_run @@ -1646,7 +1805,7 @@ def _base_wrapper(*args: Any, **kwargs: Any) -> Any: obj=self, op_name=name, action_type=ActionType.METHOD, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) context, pre_hook_args, pre_hook_kwargs = self._syft_run_pre_hooks__( @@ -1667,20 +1826,20 @@ def _base_wrapper(*args: Any, **kwargs: Any) -> Any: return post_result if inspect.ismethod(original_func) or inspect.ismethoddescriptor(original_func): - debug("Running method: ", name) + logger.debug(f"Running method: {name}") def wrapper(_self: Any, *args: Any, **kwargs: Any) -> Any: return _base_wrapper(*args, **kwargs) wrapper = types.MethodType(wrapper, type(self)) else: - debug("Running non-method: ", name) + logger.debug(f"Running non-method: {name}") wrapper = _base_wrapper try: wrapper.__doc__ = original_func.__doc__ - debug( + logger.debug( "Found original signature for ", name, inspect.signature(original_func), @@ -1689,7 +1848,7 @@ def wrapper(_self: Any, *args: Any, **kwargs: Any) -> Any: original_func ) except Exception: - debug("name", name, "has no signature") + logger.debug(f"name={name} has no signature") # third party return wrapper @@ -1713,7 +1872,7 @@ def fake_func(*args: Any, **kwargs: Any) -> Any: obj=self, op_name=op_name, action_type=ActionType.SETATTRIBUTE, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) context, pre_hook_args, pre_hook_kwargs = self._syft_run_pre_hooks__( @@ -1744,6 +1903,10 @@ def __getattribute__(self, name: str) -> Any: name: str The name of the attribute to access. """ + # bypass ipython canary verification + if name == "_ipython_canary_method_should_not_exist_": + return None + # bypass certain attrs to prevent recursion issues if name.startswith("_syft") or name.startswith("syft"): return object.__getattribute__(self, name) @@ -1754,13 +1917,17 @@ def __getattribute__(self, name: str) -> Any: # third party if name in self._syft_passthrough_attrs(): return object.__getattribute__(self, name) - context_self = self._syft_get_attr_context(name) # Handle bool operator on nonbools if name == "__bool__" and not self.syft_has_bool_attr: return self._syft_wrap_attribute_for_bool_on_nonbools(name) + # check cache first + if name in methods_to_check_in_cache: + return getattr(self.syft_action_data_cache, name, None) + # Handle Properties + context_self = self._syft_get_attr_context(name) if self.syft_is_property(context_self, name): return self._syft_wrap_attribute_for_properties(name) @@ -1770,12 +1937,12 @@ def __getattribute__(self, name: str) -> Any: @property def is_link(self) -> bool: - return isinstance(self.syft_action_data, ActionDataLink) + return self.syft_action_data_type is ActionDataLink def __setattr__(self, name: str, value: Any) -> Any: defined_on_self = name in self.__dict__ or name in self.__private_attributes__ - debug(">> ", name, ", defined_on_self = ", defined_on_self) + logger.debug(f">> {name} defined_on_self={defined_on_self}") # use the custom defined version if defined_on_self: @@ -1817,7 +1984,17 @@ def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: else self.syft_action_data_cache.__repr__() ) - return f"```python\n{res}\n```\n{data_repr_}" + return f"\n**{res}**\n\n{data_repr_}\n" + + def _data_repr(self) -> str | None: + if isinstance(self.syft_action_data_cache, ActionDataEmpty): + data_repr = self.syft_action_data_repr_ + elif inspect.isclass(self.syft_action_data_cache): + data_repr = repr_cls(self.syft_action_data_cache) + else: + data_repr = self.syft_action_data_cache.__repr__() + + return data_repr def __repr__(self) -> str: if self.is_mock: @@ -1826,14 +2003,8 @@ def __repr__(self) -> str: res = "TwinPointer(Real)" if not self.is_twin: res = "Pointer" - if isinstance(self.syft_action_data_cache, ActionDataEmpty): - data_repr_ = self.syft_action_data_repr_ - else: - if inspect.isclass(self.syft_action_data_cache): - data_repr_ = repr_cls(self.syft_action_data_cache) - else: - data_repr_ = self.syft_action_data_cache.__repr__() - return f"{res}:\n{data_repr_}" + data_repr = self._data_repr() + return f"{res}:\n{data_repr}" def __call__(self, *args: Any, **kwds: Any) -> Any: return self.__call__(*args, **kwds) @@ -1996,8 +2167,13 @@ def __rrshift__(self, other: Any) -> Any: @serializable() class AnyActionObject(ActionObject): + """ + This is a catch-all class for all objects that are not + defined in the `action_types` dictionary. + """ + __canonical_name__ = "AnyActionObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: ClassVar[type[Any]] = NoneType # type: ignore # syft_passthrough_attrs: List[str] = [] @@ -2015,13 +2191,13 @@ def __int__(self) -> float: def debug_original_func(name: str, func: Callable) -> None: - debug(f"{name} func is:") - debug("inspect.isdatadescriptor", inspect.isdatadescriptor(func)) - debug("inspect.isgetsetdescriptor", inspect.isgetsetdescriptor(func)) - debug("inspect.isfunction", inspect.isfunction(func)) - debug("inspect.isbuiltin", inspect.isbuiltin(func)) - debug("inspect.ismethod", inspect.ismethod(func)) - debug("inspect.ismethoddescriptor", inspect.ismethoddescriptor(func)) + logger.debug(f"{name} func is:") + logger.debug(f"inspect.isdatadescriptor = {inspect.isdatadescriptor(func)}") + logger.debug(f"inspect.isgetsetdescriptor = {inspect.isgetsetdescriptor(func)}") + logger.debug(f"inspect.isfunction = {inspect.isfunction(func)}") + logger.debug(f"inspect.isbuiltin = {inspect.isbuiltin(func)}") + logger.debug(f"inspect.ismethod = {inspect.ismethod(func)}") + logger.debug(f"inspect.ismethoddescriptor = {inspect.ismethoddescriptor(func)}") def is_action_data_empty(obj: Any) -> bool: @@ -2035,7 +2211,7 @@ def has_action_data_empty(args: Any, kwargs: Any) -> bool: if is_action_data_empty(a): return True - for _, a in kwargs.items(): + for a in kwargs.values(): if is_action_data_empty(a): return True return False diff --git a/packages/syft/src/syft/service/action/action_permissions.py b/packages/syft/src/syft/service/action/action_permissions.py index 2d1c268a1f6..ab6f9b7ce9a 100644 --- a/packages/syft/src/syft/service/action/action_permissions.py +++ b/packages/syft/src/syft/service/action/action_permissions.py @@ -3,12 +3,12 @@ from typing import Any # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey from ...types.uid import UID -@serializable() +@serializable(canonical_name="ActionPermission", version=1) class ActionPermission(Enum): OWNER = 1 READ = 2 @@ -17,16 +17,33 @@ class ActionPermission(Enum): ALL_WRITE = 32 EXECUTE = 64 ALL_EXECUTE = 128 + ALL_OWNER = 256 + + @property + def as_compound(self) -> "ActionPermission": + if self in COMPOUND_ACTION_PERMISSION: + return self + elif self == ActionPermission.READ: + return ActionPermission.ALL_READ + elif self == ActionPermission.WRITE: + return ActionPermission.ALL_WRITE + elif self == ActionPermission.EXECUTE: + return ActionPermission.ALL_EXECUTE + elif self == ActionPermission.OWNER: + return ActionPermission.ALL_OWNER + else: + raise Exception(f"Invalid compound permission {self}") COMPOUND_ACTION_PERMISSION = { ActionPermission.ALL_READ, ActionPermission.ALL_WRITE, ActionPermission.ALL_EXECUTE, + ActionPermission.ALL_OWNER, } -@serializable() +@serializable(canonical_name="ActionObjectPermission", version=1) class ActionObjectPermission: def __init__( self, @@ -41,6 +58,20 @@ def __init__( self.credentials = credentials self.permission = permission + @classmethod + def from_permission_string( + cls, uid: UID, permission_string: str + ) -> "ActionObjectPermission": + if permission_string.startswith("ALL_"): + permission = ActionPermission[permission_string] + verify_key = None + else: + verify_key_str, perm_str = permission_string.split("_", 1) + permission = ActionPermission[perm_str] + verify_key = SyftVerifyKey.from_string(verify_key_str) + + return cls(uid=uid, permission=permission, credentials=verify_key) + @property def permission_string(self) -> str: if self.permission in COMPOUND_ACTION_PERMISSION: @@ -50,6 +81,10 @@ def permission_string(self) -> str: return f"{self.credentials.verify}_{self.permission.name}" return f"{self.permission.name}" + @property + def compound_permission_string(self) -> str: + return self.permission.as_compound.name + def _coll_repr_(self) -> dict[str, Any]: return { "uid": str(self.uid), @@ -94,16 +129,21 @@ def __init__(self, uid: UID, credentials: SyftVerifyKey): self.permission = ActionPermission.EXECUTE +@serializable(canonical_name="StoragePermission", version=1) class StoragePermission: - def __init__(self, uid: UID, node_uid: UID): + def __init__(self, uid: UID, server_uid: UID): self.uid = uid - self.node_uid = node_uid + self.server_uid = server_uid def __repr__(self) -> str: - return f"StoragePermission: {self.uid} on {self.node_uid}" + return f"StoragePermission: {self.uid} on {self.server_uid}" def _coll_repr_(self) -> dict[str, Any]: return { "uid": str(self.uid), - "node_uid": str(self.node_uid), + "server_uid": str(self.server_uid), } + + @property + def permission_string(self) -> str: + return str(self.server_uid) diff --git a/packages/syft/src/syft/service/action/action_service.py b/packages/syft/src/syft/service/action/action_service.py index 513ca48ff94..fa7174d992e 100644 --- a/packages/syft/src/syft/service/action/action_service.py +++ b/packages/syft/src/syft/service/action/action_service.py @@ -1,30 +1,31 @@ # stdlib import importlib +import logging from typing import Any -from typing import cast # third party import numpy as np -from result import Err -from result import Ok -from result import Result # relative -from ...abstract_node import AbstractNode -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...types.datetime import DateTime +from ...types.errors import SyftException +from ...types.result import as_result from ...types.syft_object import SyftObject from ...types.twin_object import TwinObject from ...types.uid import UID -from ..blob_storage.service import BlobStorageService from ..code.user_code import UserCode from ..code.user_code import execute_byte_code from ..context import AuthedServiceContext from ..policy.policy import OutputPolicy from ..policy.policy import retrieve_from_db -from ..response import SyftError +from ..response import SyftResponseMessage from ..response import SyftSuccess +from ..response import SyftWarning from ..service import AbstractService from ..service import SERVICE_TO_TYPES from ..service import TYPE_TO_SERVICE @@ -33,6 +34,7 @@ from ..user.user_roles import ADMIN_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL from ..user.user_roles import ServiceRole +from .action_endpoint import CustomEndpointActionObject from .action_object import Action from .action_object import ActionObject from .action_object import ActionObjectPointer @@ -42,34 +44,40 @@ from .action_permissions import ActionObjectPermission from .action_permissions import ActionObjectREAD from .action_permissions import ActionPermission -from .action_store import ActionStore +from .action_permissions import StoragePermission +from .action_store import ActionObjectStash from .action_types import action_type_for_type from .numpy import NumpyArrayObject from .pandas import PandasDataFrameObject # noqa: F401 from .pandas import PandasSeriesObject # noqa: F401 +logger = logging.getLogger(__name__) -@serializable() + +@serializable(canonical_name="ActionService", version=1) class ActionService(AbstractService): - def __init__(self, store: ActionStore) -> None: - self.store = store + stash: ActionObjectStash + + def __init__(self, store: DBManager) -> None: + self.stash = ActionObjectStash(store) @service_method(path="action.np_array", name="np_array") def np_array(self, context: AuthedServiceContext, data: Any) -> Any: + # TODO: REMOVE! if not isinstance(data, np.ndarray): data = np.array(data) - # cast here since we are sure that AuthedServiceContext has a node - context.node = cast(AbstractNode, context.node) + # cast here since we are sure that AuthedServiceContext has a server + np_obj = NumpyArrayObject( dtype=data.dtype, shape=data.shape, syft_action_data_cache=data, - syft_node_location=context.node.id, + syft_server_location=context.server.id, syft_client_verify_key=context.credentials, ) - blob_store_result = np_obj._save_to_blob_storage() - if isinstance(blob_store_result, SyftError): - return blob_store_result + blob_store_result = np_obj._save_to_blob_storage().unwrap() + if isinstance(blob_store_result, SyftWarning): + logger.debug(blob_store_result.message) np_pointer = self._set(context, np_obj) return np_pointer @@ -84,53 +92,118 @@ def set( context: AuthedServiceContext, action_object: ActionObject | TwinObject, add_storage_permission: bool = True, - ) -> Result[ActionObject, str]: + ignore_detached_objs: bool = False, + ) -> ActionObject: return self._set( context, action_object, has_result_read_permission=True, add_storage_permission=add_storage_permission, - ) + ignore_detached_objs=ignore_detached_objs, + ).unwrap() + def is_detached_obj( + self, + action_object: ActionObject | TwinObject, + ignore_detached_obj: bool = False, + ) -> bool: + """ + A detached object is an object that is not yet saved to the blob storage. + """ + if ( + isinstance(action_object, TwinObject) + and ( + ( + action_object.mock_obj.syft_action_saved_to_blob_store + and action_object.mock_obj.syft_blob_storage_entry_id is None + ) + or ( + action_object.private_obj.syft_action_saved_to_blob_store + and action_object.private_obj.syft_blob_storage_entry_id is None + ) + ) + and not ignore_detached_obj + ): + return True + if isinstance(action_object, ActionObject) and ( + action_object.syft_action_saved_to_blob_store + and action_object.syft_blob_storage_entry_id is None + and not ignore_detached_obj + ): + return True + return False + + @as_result(StashException, SyftException) def _set( self, context: AuthedServiceContext, action_object: ActionObject | TwinObject, has_result_read_permission: bool = False, add_storage_permission: bool = True, - ) -> Result[ActionObject, str]: + ignore_detached_objs: bool = False, + ) -> ActionObject: + if self.is_detached_obj(action_object, ignore_detached_objs): + raise SyftException( + public_message="You uploaded an ActionObject that is not yet in the blob storage" + ) + """Save an object to the action store""" # 🟡 TODO 9: Create some kind of type checking / protocol for SyftSerializable if isinstance(action_object, ActionObject): action_object.syft_created_at = DateTime.now() - else: + ( + action_object._clear_cache() + if action_object.syft_action_saved_to_blob_store + else None + ) + else: # TwinObject action_object.private_obj.syft_created_at = DateTime.now() # type: ignore[unreachable] action_object.mock_obj.syft_created_at = DateTime.now() + # Clear cache if data is saved to blob storage + ( + action_object.private_obj._clear_cache() + if action_object.private_obj.syft_action_saved_to_blob_store + else None + ) + ( + action_object.mock_obj._clear_cache() + if action_object.mock_obj.syft_action_saved_to_blob_store + else None + ) + # If either context or argument is True, has_result_read_permission is True has_result_read_permission = ( context.extra_kwargs.get("has_result_read_permission", False) or has_result_read_permission ) - result = self.store.set( + self.stash.set_or_update( uid=action_object.id, credentials=context.credentials, syft_object=action_object, has_result_read_permission=has_result_read_permission, add_storage_permission=add_storage_permission, - ) - if result.is_ok(): - if isinstance(action_object, TwinObject): - if has_result_read_permission: - action_object = action_object.private - else: - action_object = action_object.mock - context.node = cast(AbstractNode, context.node) - action_object.syft_point_to(context.node.id) - return Ok(action_object) - return result.err() + ).unwrap() + + if isinstance(action_object, TwinObject): + # give read permission to the mock + # if mock is saved to blob store, then add READ permission + if action_object.mock_obj.syft_action_saved_to_blob_store: + blob_id = action_object.mock_obj.syft_blob_storage_entry_id + permission = ActionObjectPermission(blob_id, ActionPermission.ALL_READ) + # add_permission is not resultified. + context.server.services.blob_storage.stash.add_permission(permission) + + if has_result_read_permission: + action_object = action_object.private + else: + action_object = action_object.mock + + action_object.syft_point_to(context.server.id) + + return action_object @service_method( path="action.is_resolved", name="is_resolved", roles=GUEST_ROLE_LEVEL @@ -139,63 +212,41 @@ def is_resolved( self, context: AuthedServiceContext, uid: UID, - ) -> Result[Ok[bool], Err[str]]: + ) -> bool: """Get an object from the action store""" - # relative - from .action_data_empty import ActionDataLink - - result = self._get(context, uid) - if result.is_ok(): - obj = result.ok() - if isinstance(obj.syft_action_data, ActionDataLink): - result = self.resolve_links( - context, obj.syft_action_data.action_object_id.id - ) - - # Checking in case any error occurred - if result.is_err(): - return result + obj = self._get(context, uid).unwrap() - return Ok(result.syft_resolved) + if obj.is_link: + result = self.resolve_links( + context, obj.syft_action_data.action_object_id.id + ).unwrap() + return result.syft_resolved - # If it's a leaf but not resolved yet, return false - elif not obj.syft_resolved: - return Ok(False) + # If it's a leaf but not resolved yet, return false + if not obj.syft_resolved: + return False - # If it's not an action data link or non resolved (empty). It's resolved - return Ok(True) + # If it's not an action data link or non resolved (empty). It's resolved + return True - # If it's not in the store or permission error, return the error - return result - - @service_method( - path="action.resolve_links", name="resolve_links", roles=GUEST_ROLE_LEVEL - ) + @as_result(StashException, NotFoundException) def resolve_links( self, context: AuthedServiceContext, uid: UID, twin_mode: TwinMode = TwinMode.PRIVATE, - ) -> Result[Ok[ActionObject], Err[str]]: + ) -> ActionObject: """Get an object from the action store""" - # relative - from .action_data_empty import ActionDataLink - - result = self.store.get(uid=uid, credentials=context.credentials) # If user has permission to get the object / object exists - if result.is_ok(): - obj = result.ok() - - # If it's not a leaf - if isinstance(obj.syft_action_data, ActionDataLink): - nested_result = self.resolve_links( - context, obj.syft_action_data.action_object_id.id, twin_mode - ) - return nested_result + result = self.stash.get(uid=uid, credentials=context.credentials).unwrap() - # If it's a leaf - return result + # If it's not a leaf + if result.is_link: + return self.resolve_links( + context, result.syft_action_data.action_object_id.id, twin_mode + ).unwrap() + # If it's a leaf return result @service_method(path="action.get", name="get", roles=GUEST_ROLE_LEVEL) @@ -205,10 +256,13 @@ def get( uid: UID, twin_mode: TwinMode = TwinMode.PRIVATE, resolve_nested: bool = True, - ) -> Result[Ok[ActionObject], Err[str]]: + ) -> ActionObject | TwinObject: """Get an object from the action store""" - return self._get(context, uid, twin_mode, resolve_nested=resolve_nested) + return self._get( + context, uid, twin_mode, resolve_nested=resolve_nested + ).unwrap() + @as_result(StashException, NotFoundException, SyftException) def _get( self, context: AuthedServiceContext, @@ -216,120 +270,132 @@ def _get( twin_mode: TwinMode = TwinMode.PRIVATE, has_permission: bool = False, resolve_nested: bool = True, - ) -> Result[ActionObject, str]: + ) -> ActionObject | TwinObject: """Get an object from the action store""" - # stdlib - - # relative - from .action_data_empty import ActionDataLink - - result = self.store.get( + obj = self.stash.get( uid=uid, credentials=context.credentials, has_permission=has_permission + ).unwrap() + + # TODO: Is this necessary? + if context.server is None: + raise SyftException(public_message=f"Server not found. Context: {context}") + obj._set_obj_location_( + context.server.id, + context.credentials, ) - if result.is_ok() and context.node is not None: - obj: TwinObject | ActionObject = result.ok() - obj._set_obj_location_( - context.node.id, - context.credentials, - ) - # Resolve graph links - if ( - not isinstance(obj, TwinObject) # type: ignore[unreachable] - and resolve_nested - and isinstance(obj.syft_action_data, ActionDataLink) - ): - if not self.is_resolved( # type: ignore[unreachable] - context, obj.syft_action_data.action_object_id.id - ).ok(): - return SyftError(message="This object is not resolved yet.") - result = self.resolve_links( - context, obj.syft_action_data.action_object_id.id, twin_mode - ) - return result - if isinstance(obj, TwinObject): - if twin_mode == TwinMode.PRIVATE: - obj = obj.private - obj.syft_point_to(context.node.id) - elif twin_mode == TwinMode.MOCK: - obj = obj.mock - obj.syft_point_to(context.node.id) - else: - obj.mock.syft_point_to(context.node.id) - obj.private.syft_point_to(context.node.id) - return Ok(obj) - else: - return result + + # Resolve graph links + if not isinstance(obj, TwinObject) and resolve_nested and obj.is_link: # type: ignore [unreachable] + # if not self.is_resolved( # type: ignore [unreachable] + # context, obj.syft_action_data.action_object_id.id + # ): + # raise SyftException(public_message="This object is not resolved yet.") + + return self.resolve_links( # type: ignore + context, obj.syft_action_data.action_object_id.id, twin_mode + ).unwrap() + + if isinstance(obj, TwinObject): + if twin_mode == TwinMode.PRIVATE: + obj = obj.private + obj.syft_point_to(context.server.id) + elif twin_mode == TwinMode.MOCK: + obj = obj.mock + obj.syft_point_to(context.server.id) + else: + obj.mock.syft_point_to(context.server.id) + obj.private.syft_point_to(context.server.id) + + return obj @service_method( path="action.get_pointer", name="get_pointer", roles=GUEST_ROLE_LEVEL ) def get_pointer( self, context: AuthedServiceContext, uid: UID - ) -> Result[ActionObjectPointer, str]: + ) -> ActionObjectPointer: """Get a pointer from the action store""" - context.node = cast(AbstractNode, context.node) - result = self.store.get_pointer( - uid=uid, credentials=context.credentials, node_uid=context.node.id + obj = self.stash.get_pointer( + uid=uid, credentials=context.credentials, server_uid=context.server.id + ).unwrap() + + obj._set_obj_location_( + context.server.id, + context.credentials, ) - if result.is_ok(): - obj = result.ok() - obj._set_obj_location_( - context.node.id, - context.credentials, - ) - return Ok(obj) - return Err(result.err()) + + return obj @service_method(path="action.get_mock", name="get_mock", roles=GUEST_ROLE_LEVEL) - def get_mock( - self, context: AuthedServiceContext, uid: UID - ) -> Result[SyftError, SyftObject]: + def get_mock(self, context: AuthedServiceContext, uid: UID) -> SyftObject: """Get a pointer from the action store""" - result = self.store.get_mock(uid=uid) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + return self.stash.get_mock(credentials=context.credentials, uid=uid).unwrap() + + @service_method( + path="action.has_storage_permission", + name="has_storage_permission", + roles=GUEST_ROLE_LEVEL, + ) + def has_storage_permission(self, context: AuthedServiceContext, uid: UID) -> bool: + return self.stash.has_storage_permission( + StoragePermission(uid=uid, server_uid=context.server.id) + ) + + def has_read_permission(self, context: AuthedServiceContext, uid: UID) -> bool: + return self.stash.has_permissions( + [ActionObjectREAD(uid=uid, credentials=context.credentials)] + ) # not a public service endpoint + @as_result(SyftException) def _user_code_execute( self, context: AuthedServiceContext, code_item: UserCode, kwargs: dict[str, Any], result_id: UID | None = None, - ) -> Result[ActionObjectPointer, Err]: + ) -> ActionObjectPointer: override_execution_permission = ( context.has_execute_permissions or context.role == ServiceRole.ADMIN ) - input_policy = code_item.get_input_policy(context) + output_policy = code_item.get_output_policy(context) + + # Unwrap nested ActionObjects + for _, arg in kwargs.items(): + self.flatten_action_arg(context, arg) if isinstance(arg, UID) else None if not override_execution_permission: if input_policy is None: - if not code_item.output_policy_approved: - return Err("Execution denied: Your code is waiting for approval") - return Err(f"No input policy defined for user code: {code_item.id}") + if not code_item.is_output_policy_approved(context).unwrap(): + raise SyftException( + public_message="Execution denied: Your code is waiting for approval" + ) + raise SyftException( + public_message=f"No input policy defined for user code: {code_item.id}" + ) - # Filter input kwargs based on policy - filtered_kwargs = input_policy.filter_kwargs( - kwargs=kwargs, context=context, code_item_id=code_item.id + # validate input policy, raises if not valid + input_policy.is_valid( + context=context, + usr_input_kwargs=kwargs, ) - if filtered_kwargs.is_err(): - return filtered_kwargs - filtered_kwargs = filtered_kwargs.ok() - # validate input policy - is_approved = input_policy._is_valid( + # Filter input kwargs based on policy + filtered_kwargs = input_policy.filter_kwargs( + kwargs=kwargs, context=context, - usr_input_kwargs=kwargs, - code_item_id=code_item.id, ) - if is_approved.is_err(): - return is_approved else: - filtered_kwargs = retrieve_from_db(code_item.id, kwargs, context).ok() - # update input policy to track any input state + filtered_kwargs = retrieve_from_db(kwargs, context).unwrap() + if hasattr(input_policy, "transform_kwargs"): + filtered_kwargs = input_policy.transform_kwargs( # type: ignore + context, + filtered_kwargs, + ).unwrap() + + # update input policy to track any input state has_twin_inputs = False real_kwargs = {} @@ -343,10 +409,22 @@ def _user_code_execute( try: if not has_twin_inputs: # no twins + # allow python types from inputpolicy filtered_kwargs = filter_twin_kwargs( - real_kwargs, twin_mode=TwinMode.NONE - ) + real_kwargs, twin_mode=TwinMode.NONE, allow_python_types=True + ).unwrap() exec_result = execute_byte_code(code_item, filtered_kwargs, context) + if exec_result.errored: + raise SyftException(public_message=exec_result.safe_error_message) + + if output_policy: + exec_result.result = output_policy.apply_to_output( + context, + exec_result.result, + update_policy=not override_execution_permission, + ) + code_item.output_policy = output_policy # type: ignore + context.server.services.user_code.update_code_state(context, code_item) if isinstance(exec_result.result, ActionObject): result_action_object = ActionObject.link( result_id=result_id, pointer_id=exec_result.result.id @@ -356,16 +434,31 @@ def _user_code_execute( else: # twins private_kwargs = filter_twin_kwargs( - real_kwargs, twin_mode=TwinMode.PRIVATE - ) + real_kwargs, twin_mode=TwinMode.PRIVATE, allow_python_types=True + ).unwrap() private_exec_result = execute_byte_code( code_item, private_kwargs, context ) + if private_exec_result.errored: + raise SyftException( + public_message=private_exec_result.safe_error_message + ) + + if output_policy: + private_exec_result.result = output_policy.apply_to_output( + context, + private_exec_result.result, + update_policy=not override_execution_permission, + ) + code_item.output_policy = output_policy # type: ignore + context.server.services.user_code.update_code_state(context, code_item) result_action_object_private = wrap_result( result_id, private_exec_result.result ) - mock_kwargs = filter_twin_kwargs(real_kwargs, twin_mode=TwinMode.MOCK) + mock_kwargs = filter_twin_kwargs( + real_kwargs, twin_mode=TwinMode.MOCK, allow_python_types=True + ).unwrap() # relative from .action_data_empty import ActionDataEmpty @@ -375,6 +468,16 @@ def _user_code_execute( mock_exec_result = execute_byte_code( code_item, mock_kwargs, context ) + + if mock_exec_result.errored: + raise SyftException( + public_message=mock_exec_result.safe_error_message + ) + + if output_policy: + mock_exec_result.result = output_policy.apply_to_output( + context, mock_exec_result.result, update_policy=False + ) mock_exec_result_obj = mock_exec_result.result result_action_object_mock = wrap_result(result_id, mock_exec_result_obj) @@ -385,17 +488,23 @@ def _user_code_execute( mock_obj=result_action_object_mock, ) except Exception as e: - # import traceback - # return Err(f"_user_code_execute failed. {e} {traceback.format_exc()}") - return Err(f"_user_code_execute failed. {e}") - return Ok(result_action_object) + raise SyftException.from_exception(e) + + return result_action_object + + # def raise_for_failed_execution(self, output: ExecutionOutput): + # if output.errored: + # raise SyftException(public_message="Execution of usercode failed, ask admin", + # private_message=output.stdout + "\n" + output.stderr) + @as_result(SyftException) def set_result_to_store( self, result_action_object: ActionObject | TwinObject, context: AuthedServiceContext, output_policy: OutputPolicy | None = None, - ) -> Result[ActionObject, str] | SyftError: + has_result_read_permission: bool = False, + ) -> ActionObject: result_id = result_action_object.id # result_blob_id = result_action_object.syft_blob_storage_entry_id @@ -408,15 +517,21 @@ def set_result_to_store( else: output_readers = [] + # If flag is True, user has read permissions to the results in BlobStore + if has_result_read_permission: + output_readers.append(context.credentials) + read_permission = ActionPermission.READ - context.node = cast(AbstractNode, context.node) + result_action_object._set_obj_location_( - context.node.id, + context.server.id, context.credentials, ) - blob_store_result = result_action_object._save_to_blob_storage() - if isinstance(blob_store_result, SyftError): - return blob_store_result + blob_store_result: SyftResponseMessage = ( + result_action_object._save_to_blob_storage().unwrap() + ) + if isinstance(blob_store_result, SyftWarning): + logger.debug(blob_store_result.message) # IMPORTANT: DO THIS ONLY AFTER ._save_to_blob_storage if isinstance(result_action_object, TwinObject): @@ -425,16 +540,14 @@ def set_result_to_store( result_blob_id = result_action_object.syft_blob_storage_entry_id # type: ignore[unreachable] # pass permission information to the action store as extra kwargs - context.extra_kwargs = {"has_result_read_permission": True} - - set_result = self._set(context, result_action_object) + # context.extra_kwargs = {"has_result_read_permission": True} - if set_result.is_err(): - return set_result - - blob_storage_service: AbstractService = context.node.get_service( - BlobStorageService - ) + # Since this just meta data about the result, they always have access to it. + set_result = self._set( + context, + result_action_object, + has_result_read_permission=True, + ).unwrap() def store_permission( x: SyftVerifyKey | None = None, @@ -448,19 +561,23 @@ def blob_permission( if len(output_readers) > 0: store_permissions = [store_permission(x) for x in output_readers] - self.store.add_permissions(store_permissions) + self.stash.add_permissions(store_permissions) - blob_permissions = [blob_permission(x) for x in output_readers] - blob_storage_service.stash.add_permissions(blob_permissions) + if result_blob_id is not None: + blob_permissions = [blob_permission(x) for x in output_readers] + context.server.services.blob_storage.stash.add_permissions( + blob_permissions + ) return set_result + @as_result(SyftException) def execute_plan( self, plan: Any, context: AuthedServiceContext, plan_kwargs: dict[str, ActionObject], - ) -> Result[ActionObject, str] | SyftError: + ) -> ActionObject: id2inpkey = {v.id: k for k, v in plan.inputs.items()} for plan_action in plan.actions: @@ -480,43 +597,44 @@ def execute_plan( plan_action.kwargs[k] = plan_kwargs[id2inpkey[arg]] for plan_action in plan.actions: - action_res = self.execute(context, plan_action) - if isinstance(action_res, SyftError): - return action_res + self.execute(context, plan_action) result_id = plan.outputs[0].id - return self._get(context, result_id, TwinMode.MOCK, has_permission=True) + return self._get( + context, result_id, TwinMode.NONE, has_permission=True + ).unwrap() + @as_result(SyftException) def call_function( self, context: AuthedServiceContext, action: Action - ) -> Result[ActionObject, str] | Err: + ) -> ActionObject: # run function/class init _user_lib_config_registry = UserLibConfigRegistry.from_user(context.credentials) absolute_path = f"{action.path}.{action.op}" if absolute_path in _user_lib_config_registry: # TODO: implement properly # Now we are assuming its a function/class - return execute_callable(self, context, action) + return execute_callable(self, context, action).unwrap() else: - return Err( - f"Failed executing {action}. You have no permission for {absolute_path}" + raise SyftException( + public_message=f"Failed executing {action}. You have no permission for {absolute_path}" ) + @as_result(SyftException) def set_attribute( self, context: AuthedServiceContext, action: Action, resolved_self: ActionObject | TwinObject, - ) -> Result[TwinObject | ActionObject, str]: - args, _ = resolve_action_args(action, context, self) - if args.is_err(): - return Err( - f"Failed executing action {action}, could not resolve args: {args.err()}" - ) - else: - args = args.ok() + ) -> TwinObject: + args, _ = resolve_action_args(action, context, self).unwrap( + public_message=f"Failed executing action {action} (could not resolve args)" + ) if not isinstance(args[0], ActionObject): - return Err( - f"Failed executing action {action} setattribute requires a non-twin string as first argument" + raise SyftException( + public_message=( + f"Failed executing action {action} setattribute requires" + " a non-twin string as first argument" + ) ) name = args[0].syft_action_data # dont do the whole filtering dance with the name @@ -524,63 +642,60 @@ def set_attribute( if isinstance(resolved_self, TwinObject): # todo, create copy? - private_args = filter_twin_args(args, twin_mode=TwinMode.PRIVATE) + private_args = filter_twin_args(args, twin_mode=TwinMode.PRIVATE).unwrap() private_val = private_args[0] setattr(resolved_self.private.syft_action_data, name, private_val) # todo: what do we use as data for the mock here? # depending on permisisons? - public_args = filter_twin_args(args, twin_mode=TwinMode.MOCK) + public_args = filter_twin_args(args, twin_mode=TwinMode.MOCK).unwrap() public_val = public_args[0] setattr(resolved_self.mock.syft_action_data, name, public_val) - return Ok( - TwinObject( - id=action.result_id, - private_obj=ActionObject.from_obj( - resolved_self.private.syft_action_data - ), - private_obj_id=action.result_id, - mock_obj=ActionObject.from_obj(resolved_self.mock.syft_action_data), - mock_obj_id=action.result_id, - ) + return TwinObject( + id=action.result_id, + private_obj=ActionObject.from_obj( + resolved_self.private.syft_action_data + ), + private_obj_id=action.result_id, + mock_obj=ActionObject.from_obj(resolved_self.mock.syft_action_data), + mock_obj_id=action.result_id, ) + else: # TODO: Implement for twinobject args - args = filter_twin_args(args, twin_mode=TwinMode.NONE) # type: ignore[unreachable] + args = filter_twin_args(args, twin_mode=TwinMode.NONE).unwrap() # type: ignore[unreachable] val = args[0] setattr(resolved_self.syft_action_data, name, val) - return Ok( - ActionObject.from_obj(resolved_self.syft_action_data), - ) + return (ActionObject.from_obj(resolved_self.syft_action_data),) # todo: permissions # setattr(resolved_self.syft_action_data, name, val) # val = resolved_self.syft_action_data # result_action_object = Ok(wrap_result(action.result_id, val)) + @as_result(SyftException) def get_attribute( self, action: Action, resolved_self: ActionObject | TwinObject - ) -> Ok[TwinObject | ActionObject]: + ) -> TwinObject | ActionObject: if isinstance(resolved_self, TwinObject): private_result = getattr(resolved_self.private.syft_action_data, action.op) mock_result = getattr(resolved_self.mock.syft_action_data, action.op) - return Ok( - TwinObject( - id=action.result_id, - private_obj=ActionObject.from_obj(private_result), - private_obj_id=action.result_id, - mock_obj=ActionObject.from_obj(mock_result), - mock_obj_id=action.result_id, - ) + return TwinObject( + id=action.result_id, + private_obj=ActionObject.from_obj(private_result), + private_obj_id=action.result_id, + mock_obj=ActionObject.from_obj(mock_result), + mock_obj_id=action.result_id, ) else: val = getattr(resolved_self.syft_action_data, action.op) # type: ignore[unreachable] - return Ok(wrap_result(action.result_id, val)) + return wrap_result(action.result_id, val) + @as_result(SyftException) def call_method( self, context: AuthedServiceContext, action: Action, resolved_self: ActionObject | TwinObject, - ) -> Result[TwinObject | Any, str]: + ) -> TwinObject | Any: if isinstance(resolved_self, TwinObject): # method private_result = execute_object( @@ -589,119 +704,174 @@ def call_method( resolved_self.private, action, twin_mode=TwinMode.PRIVATE, - ) - if private_result.is_err(): - return Err( - f"Failed executing action {action}, result is an error: {private_result.err()}" - ) + ).unwrap(public_message=f"Failed executing action {action}") mock_result = execute_object( self, context, resolved_self.mock, action, twin_mode=TwinMode.MOCK + ).unwrap(public_message=f"Failed executing action {action}") + + return TwinObject( + id=action.result_id, + private_obj=private_result, + private_obj_id=action.result_id, + mock_obj=mock_result, + mock_obj_id=action.result_id, ) - if mock_result.is_err(): - return Err( - f"Failed executing action {action}, result is an error: {mock_result.err()}" - ) + else: + return execute_object(self, context, resolved_self, action).unwrap() # type:ignore[unreachable] + + as_result(SyftException) - private_result = private_result.ok() - mock_result = mock_result.ok() + def unwrap_nested_actionobjects( + self, context: AuthedServiceContext, data: Any + ) -> Any: + """recursively unwraps nested action objects""" - return Ok( - TwinObject( - id=action.result_id, - private_obj=private_result, - private_obj_id=action.result_id, - mock_obj=mock_result, - mock_obj_id=action.result_id, + if isinstance(data, list): + return [self.unwrap_nested_actionobjects(context, obj) for obj in data] + + if isinstance(data, dict): + return { + key: self.unwrap_nested_actionobjects(context, obj) + for key, obj in data.items() + } + + if isinstance(data, ActionObject): + res = self.get(context=context, uid=data.id) + + nested_res = res.syft_action_data + + if isinstance(nested_res, ActionObject): + raise SyftException( + public_message="More than double nesting of ActionObjects is currently not supported" ) - ) - else: - return execute_object(self, context, resolved_self, action) # type:ignore[unreachable] + + return nested_res + + return data + + def contains_nested_actionobjects(self, data: Any) -> bool: + """ + returns if this is a list/set/dict that contains ActionObjects + """ + + def unwrap_collection(col: set | dict | list) -> [Any]: # type: ignore + return_values = [] + if isinstance(col, dict): + values = list(col.values()) + list(col.keys()) + else: + values = list(col) + for v in values: + if isinstance(v, list | dict | set): + return_values += unwrap_collection(v) + else: + return_values.append(v) + return return_values + + if isinstance(data, list | dict | set): + values = unwrap_collection(data) + has_action_object = any(isinstance(x, ActionObject) for x in values) + return has_action_object + elif isinstance(data, ActionObject): + return True + return False + + def flatten_action_arg(self, context: AuthedServiceContext, arg: UID) -> None: + """ "If the argument is a collection (of collections) of ActionObjects, + We want to flatten the collection and upload a new ActionObject that contins + its values. E.g. [[ActionObject1, ActionObject2],[ActionObject3, ActionObject4]] + -> [[value1, value2],[value3, value4]] + """ + root_context = context.as_root_context() + + action_object = self.get(context=root_context, uid=arg) + data = action_object.syft_action_data + + if self.contains_nested_actionobjects(data): + new_data = self.unwrap_nested_actionobjects(context, data) + # Update existing action object with the new flattened data + action_object.syft_action_data_cache = new_data + + # we should create this with the permissions as the old object + # currently its using the client verify key on the object + action_object._save_to_blob_storage().unwrap() + # we should create this with the permissions of the old object + self._set( + context=root_context, + action_object=action_object, + ).unwrap() + + return None @service_method(path="action.execute", name="execute", roles=GUEST_ROLE_LEVEL) - def execute( - self, context: AuthedServiceContext, action: Action - ) -> Result[ActionObject, Err]: + def execute(self, context: AuthedServiceContext, action: Action) -> ActionObject: """Execute an operation on objects in the action store""" # relative from .plan import Plan - context.node = cast(AbstractNode, context.node) if action.action_type == ActionType.CREATEOBJECT: - result_action_object = Ok(action.create_object) - # print(action.create_object, "already in blob storage") + result_action_object = action.create_object elif action.action_type == ActionType.SYFTFUNCTION: - usercode_service = context.node.get_service("usercodeservice") kwarg_ids = {} for k, v in action.kwargs.items(): # transform lineage ids into ids kwarg_ids[k] = v.id - result_action_object = usercode_service._call( + return context.server.services.user_code._call( # type: ignore[union-attr] context, action.user_code_id, action.result_id, **kwarg_ids - ) - return result_action_object + ).unwrap() elif action.action_type == ActionType.FUNCTION: - result_action_object = self.call_function(context, action) + result_action_object = self.call_function(context, action).unwrap() else: resolved_self = self._get( context=context, uid=action.remote_self, twin_mode=TwinMode.NONE, has_permission=True, + ).unwrap( + public_message=f"Failed executing action {action}, could not resolve self: {action.remote_self}" ) - if resolved_self.is_err(): - return Err( - f"Failed executing action {action}, could not resolve self: {resolved_self.err()}" - ) - resolved_self = resolved_self.ok() if action.op == "__call__" and resolved_self.syft_action_data_type == Plan: result_action_object = self.execute_plan( plan=resolved_self.syft_action_data, context=context, plan_kwargs=action.kwargs, - ) - return result_action_object + ).unwrap() elif action.action_type == ActionType.SETATTRIBUTE: result_action_object = self.set_attribute( context, action, resolved_self - ) + ).unwrap() elif action.action_type == ActionType.GETATTRIBUTE: - result_action_object = self.get_attribute(action, resolved_self) + result_action_object = self.get_attribute( + action, resolved_self + ).unwrap() elif action.action_type == ActionType.METHOD: - result_action_object = self.call_method(context, action, resolved_self) + result_action_object = self.call_method( + context, action, resolved_self + ).unwrap() else: - return Err("Unknown action") - - if result_action_object.is_err(): - return Err( - f"Failed executing action {action}, result is an error: {result_action_object.err()}" - ) - else: - result_action_object = result_action_object.ok() + raise SyftException(public_message="unknown action") # check if we have read permissions on the result has_result_read_permission = self.has_read_permission_for_action_result( context, action ) - - result_action_object._set_obj_location_( - context.node.id, + result_action_object._set_obj_location_( # type: ignore[union-attr] + context.server.id, context.credentials, ) - - blob_store_result = result_action_object._save_to_blob_storage() - if isinstance(blob_store_result, SyftError): - return blob_store_result - + blob_store_result = result_action_object._save_to_blob_storage().unwrap() # type: ignore[union-attr] # pass permission information to the action store as extra kwargs context.extra_kwargs = { "has_result_read_permission": has_result_read_permission } - - set_result = self._set(context, result_action_object) - if set_result.is_err(): - return Err( - f"Failed executing action {action}, set result is an error: {set_result.err()}" - ) + if isinstance(blob_store_result, SyftWarning): + logger.debug(blob_store_result.message) + set_result = self._set( + context, + result_action_object, + ) + set_result = set_result.unwrap( + public_message=f"Failed executing action {action}" + ) return set_result @@ -715,82 +885,149 @@ def has_read_permission_for_action_result( ActionObjectREAD(uid=_id, credentials=context.credentials) for _id in action_obj_ids ] - return self.store.has_permissions(permissions) + return self.stash.has_permissions(permissions) @service_method(path="action.exists", name="exists", roles=GUEST_ROLE_LEVEL) - def exists( - self, context: AuthedServiceContext, obj_id: UID - ) -> Result[SyftSuccess, SyftError]: + def exists(self, context: AuthedServiceContext, obj_id: UID) -> bool: """Checks if the given object id exists in the Action Store""" - if self.store.exists(obj_id): - return SyftSuccess(message=f"Object: {obj_id} exists") - else: - return SyftError(message=f"Object: {obj_id} does not exist") + return self.stash.exists(context.credentials, obj_id) - @service_method(path="action.delete", name="delete", roles=ADMIN_ROLE_LEVEL) + @service_method( + path="action.delete", + name="delete", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) def delete( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - res = self.store.delete(context.credentials, uid) - if res.is_err(): - return SyftError(message=res.err()) - return SyftSuccess(message="Great Success!") + self, context: AuthedServiceContext, uid: UID, soft_delete: bool = False + ) -> SyftSuccess: + obj = self.stash.get(uid=uid, credentials=context.credentials).unwrap() + + return_msg = [] + + # delete any associated blob storage entry object to the action object + blob_del_res = self._delete_blob_storage_entry( + context=context, obj=obj + ).unwrap() + return_msg.append(blob_del_res.message) + + # delete the action object from the action store + store_del_res = self._delete_from_action_store( + context=context, uid=obj.id, soft_delete=soft_delete + ).unwrap() + return_msg.append(store_del_res.message) + return SyftSuccess(message="\n".join(return_msg)) + + @as_result(SyftException) + def _delete_blob_storage_entry( + self, + context: AuthedServiceContext, + obj: TwinObject | ActionObject, + ) -> SyftSuccess: + deleted_blob_ids = [] + + if isinstance(obj, ActionObject) and obj.syft_blob_storage_entry_id: + context.server.services.blob_storage.delete( + context=context, uid=obj.syft_blob_storage_entry_id + ) + deleted_blob_ids.append(obj.syft_blob_storage_entry_id) + + if isinstance(obj, TwinObject): + if obj.private.syft_blob_storage_entry_id: + context.server.services.blob_storage.delete( + context=context, uid=obj.private.syft_blob_storage_entry_id + ) + deleted_blob_ids.append(obj.private.syft_blob_storage_entry_id) + + if obj.mock.syft_blob_storage_entry_id: + context.server.services.blob_storage.delete( + context=context, uid=obj.mock.syft_blob_storage_entry_id + ) + deleted_blob_ids.append(obj.mock.syft_blob_storage_entry_id) + message = f"Deleted blob storage entries: {', '.join(str(blob_id) for blob_id in deleted_blob_ids)}" + return SyftSuccess(message=message) + + @as_result(SyftException) + def _delete_from_action_store( + self, + context: AuthedServiceContext, + uid: UID, + soft_delete: bool = False, + ) -> SyftSuccess: + if soft_delete: + obj = self.stash.get(uid=uid, credentials=context.credentials).unwrap() + + if isinstance(obj, TwinObject): + self._soft_delete_action_obj( + context=context, action_obj=obj.private + ).unwrap() + self._soft_delete_action_obj( + context=context, action_obj=obj.mock + ).unwrap() + if isinstance(obj, ActionObject): + self._soft_delete_action_obj(context=context, action_obj=obj).unwrap() + else: + self.stash.delete_by_uid(credentials=context.credentials, uid=uid).unwrap() + + return SyftSuccess(message=f"Action object with uid '{uid}' deleted.") + + @as_result(SyftException) + def _soft_delete_action_obj( + self, context: AuthedServiceContext, action_obj: ActionObject + ) -> ActionObject: + action_obj.syft_action_data_cache = None + action_obj._save_to_blob_storage().unwrap() + return self._set( + context=context, + action_object=action_obj, + ).unwrap() + + +@as_result(SyftException) def resolve_action_args( action: Action, context: AuthedServiceContext, service: ActionService -) -> tuple[Ok[dict], bool]: +) -> tuple[list, bool]: has_twin_inputs = False args = [] for arg_id in action.args: arg_value = service._get( context=context, uid=arg_id, twin_mode=TwinMode.NONE, has_permission=True - ) - if arg_value.is_err(): - return arg_value, False - if isinstance(arg_value.ok(), TwinObject): + ).unwrap() + if isinstance(arg_value, TwinObject): has_twin_inputs = True - args.append(arg_value.ok()) - return Ok(args), has_twin_inputs + args.append(arg_value) + return args, has_twin_inputs +@as_result(SyftException) def resolve_action_kwargs( action: Action, context: AuthedServiceContext, service: ActionService -) -> tuple[Ok[dict], bool]: +) -> tuple[dict, bool]: has_twin_inputs = False kwargs = {} for key, arg_id in action.kwargs.items(): kwarg_value = service._get( context=context, uid=arg_id, twin_mode=TwinMode.NONE, has_permission=True - ) - if kwarg_value.is_err(): - return kwarg_value, False - if isinstance(kwarg_value.ok(), TwinObject): + ).unwrap() + if isinstance(kwarg_value, TwinObject): has_twin_inputs = True - kwargs[key] = kwarg_value.ok() - return Ok(kwargs), has_twin_inputs + kwargs[key] = kwarg_value + return kwargs, has_twin_inputs +@as_result(SyftException) def execute_callable( service: ActionService, context: AuthedServiceContext, action: Action, -) -> Result[ActionObject, str]: - args, has_arg_twins = resolve_action_args(action, context, service) - kwargs, has_kwargs_twins = resolve_action_kwargs(action, context, service) +) -> ActionObject: + args, has_arg_twins = resolve_action_args(action, context, service).unwrap() + kwargs, has_kwargs_twins = resolve_action_kwargs(action, context, service).unwrap() has_twin_inputs = has_arg_twins or has_kwargs_twins - if args.is_err(): - return args - else: - args = args.ok() - if kwargs.is_err(): - return kwargs - else: - kwargs = kwargs.ok() - # 🔵 TODO 10: Get proper code From old RunClassMethodAction to ensure the function # is not bound to the original object or mutated - # stdlib # TODO: get from CMPTree is probably safer def _get_target_callable(path: str, op: str) -> Any: @@ -804,119 +1041,102 @@ def _get_target_callable(path: str, op: str) -> Any: target_callable = _get_target_callable(action.path, action.op) result = None - try: - if target_callable: - if not has_twin_inputs: - # if twin_mode == TwinMode.NONE and not has_twin_inputs: - twin_mode = TwinMode.NONE - # no twins - filtered_args = filter_twin_args(args, twin_mode=twin_mode) - filtered_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode) - result = target_callable(*filtered_args, **filtered_kwargs) - result_action_object = wrap_result(action.result_id, result) - else: - twin_mode = TwinMode.PRIVATE - private_args = filter_twin_args(args, twin_mode=twin_mode) - private_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode) - private_result = target_callable(*private_args, **private_kwargs) - result_action_object_private = wrap_result( - action.result_id, private_result - ) - - twin_mode = TwinMode.MOCK - mock_args = filter_twin_args(args, twin_mode=twin_mode) - mock_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode) - mock_result = target_callable(*mock_args, **mock_kwargs) - result_action_object_mock = wrap_result(action.result_id, mock_result) - - result_action_object = TwinObject( - id=action.result_id, - private_obj=result_action_object_private, - mock_obj=result_action_object_mock, - ) + if not target_callable: + raise SyftException(public_message="No target callable found") + + if not has_twin_inputs: + # if twin_mode == TwinMode.NONE and not has_twin_inputs: + twin_mode = TwinMode.NONE + # no twins + filtered_args = filter_twin_args(args, twin_mode=twin_mode).unwrap() + filtered_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode).unwrap() + result = target_callable(*filtered_args, **filtered_kwargs) + result_action_object = wrap_result(action.result_id, result) + else: + twin_mode = TwinMode.PRIVATE + private_args = filter_twin_args(args, twin_mode=twin_mode).unwrap() + private_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode).unwrap() + private_result = target_callable(*private_args, **private_kwargs) + result_action_object_private = wrap_result(action.result_id, private_result) + + twin_mode = TwinMode.MOCK + mock_args = filter_twin_args(args, twin_mode=twin_mode).unwrap() + mock_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode).unwrap() + mock_result = target_callable(*mock_args, **mock_kwargs) + result_action_object_mock = wrap_result(action.result_id, mock_result) + + result_action_object = TwinObject( + id=action.result_id, + private_obj=result_action_object_private, + mock_obj=result_action_object_mock, + ) - except Exception as e: - print("what is this exception", e) - return Err(e) - return Ok(result_action_object) + return result_action_object +@as_result(SyftException) def execute_object( service: ActionService, context: AuthedServiceContext, resolved_self: ActionObject, action: Action, twin_mode: TwinMode = TwinMode.NONE, -) -> Result[Ok[TwinObject | ActionObject], Err[str]]: +) -> TwinObject | ActionObject: unboxed_resolved_self = resolved_self.syft_action_data - _args, has_arg_twins = resolve_action_args(action, context, service) + args, has_arg_twins = resolve_action_args(action, context, service).unwrap() - kwargs, has_kwargs_twins = resolve_action_kwargs(action, context, service) - if _args.is_err(): - return _args - else: - args = _args.ok() - if kwargs.is_err(): - return kwargs - else: - kwargs = kwargs.ok() + kwargs, has_kwargs_twins = resolve_action_kwargs(action, context, service).unwrap() has_twin_inputs = has_arg_twins or has_kwargs_twins # 🔵 TODO 10: Get proper code From old RunClassMethodAction to ensure the function # is not bound to the original object or mutated target_method = getattr(unboxed_resolved_self, action.op, None) result = None - try: - if target_method: - if twin_mode == TwinMode.NONE and not has_twin_inputs: - # no twins - filtered_args = filter_twin_args(args, twin_mode=twin_mode) - filtered_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode) - result = target_method(*filtered_args, **filtered_kwargs) - result_action_object = wrap_result(action.result_id, result) - elif twin_mode == TwinMode.NONE and has_twin_inputs: - # self isn't a twin but one of the inputs is - private_args = filter_twin_args(args, twin_mode=TwinMode.PRIVATE) - private_kwargs = filter_twin_kwargs(kwargs, twin_mode=TwinMode.PRIVATE) - private_result = target_method(*private_args, **private_kwargs) - result_action_object_private = wrap_result( - action.result_id, private_result - ) - - mock_args = filter_twin_args(args, twin_mode=TwinMode.MOCK) - mock_kwargs = filter_twin_kwargs(kwargs, twin_mode=TwinMode.MOCK) - mock_result = target_method(*mock_args, **mock_kwargs) - result_action_object_mock = wrap_result(action.result_id, mock_result) - - result_action_object = TwinObject( - id=action.result_id, - private_obj=result_action_object_private, - mock_obj=result_action_object_mock, - ) - elif twin_mode == twin_mode.PRIVATE: # type:ignore - # twin private path - private_args = filter_twin_args(args, twin_mode=twin_mode) # type:ignore[unreachable] - private_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode) - result = target_method(*private_args, **private_kwargs) - result_action_object = wrap_result(action.result_id, result) - elif twin_mode == twin_mode.MOCK: # type:ignore - # twin mock path - mock_args = filter_twin_args(args, twin_mode=twin_mode) # type:ignore[unreachable] - mock_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode) - target_method = getattr(unboxed_resolved_self, action.op, None) - result = target_method(*mock_args, **mock_kwargs) - result_action_object = wrap_result(action.result_id, result) - else: - raise Exception( - f"Bad combination of: twin_mode: {twin_mode} and has_twin_inputs: {has_twin_inputs}" - ) - else: - return Err("Missing target method") - except Exception as e: - return Err(e) + if not target_method: + raise SyftException(public_message="could not find target method") + if twin_mode == TwinMode.NONE and not has_twin_inputs: + # no twins + filtered_args = filter_twin_args(args, twin_mode=twin_mode).unwrap() + filtered_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode).unwrap() + result = target_method(*filtered_args, **filtered_kwargs) + result_action_object = wrap_result(action.result_id, result) + elif twin_mode == TwinMode.NONE and has_twin_inputs: + # self isn't a twin but one of the inputs is + private_args = filter_twin_args(args, twin_mode=TwinMode.PRIVATE).unwrap() + private_kwargs = filter_twin_kwargs(kwargs, twin_mode=TwinMode.PRIVATE).unwrap() + private_result = target_method(*private_args, **private_kwargs) + result_action_object_private = wrap_result(action.result_id, private_result) + + mock_args = filter_twin_args(args, twin_mode=TwinMode.MOCK).unwrap() + mock_kwargs = filter_twin_kwargs(kwargs, twin_mode=TwinMode.MOCK).unwrap() + mock_result = target_method(*mock_args, **mock_kwargs) + result_action_object_mock = wrap_result(action.result_id, mock_result) + + result_action_object = TwinObject( + id=action.result_id, + private_obj=result_action_object_private, + mock_obj=result_action_object_mock, + ) + elif twin_mode == twin_mode.PRIVATE: # type:ignore + # twin private path + private_args = filter_twin_args(args, twin_mode=twin_mode).unwrap() # type:ignore[unreachable] + private_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode).unwrap() + result = target_method(*private_args, **private_kwargs) + result_action_object = wrap_result(action.result_id, result) + elif twin_mode == twin_mode.MOCK: # type:ignore + # twin mock path + mock_args = filter_twin_args(args, twin_mode=twin_mode).unwrap() # type:ignore[unreachable] + mock_kwargs = filter_twin_kwargs(kwargs, twin_mode=twin_mode).unwrap() + target_method = getattr(unboxed_resolved_self, action.op, None) + result = target_method(*mock_args, **mock_kwargs) + result_action_object = wrap_result(action.result_id, result) + else: + raise SyftException( + public_message=f"Bad combination of: twin_mode: {twin_mode} and has_twin_inputs: {has_twin_inputs}" + ) - return Ok(result_action_object) + return result_action_object def wrap_result(result_id: UID, result: Any) -> ActionObject: @@ -926,6 +1146,7 @@ def wrap_result(result_id: UID, result: Any) -> ActionObject: return result_action_object +@as_result(SyftException) def filter_twin_args(args: list[Any], twin_mode: TwinMode) -> Any: filtered = [] for arg in args: @@ -935,15 +1156,18 @@ def filter_twin_args(args: list[Any], twin_mode: TwinMode) -> Any: elif twin_mode == TwinMode.MOCK: filtered.append(arg.mock.syft_action_data) else: - raise Exception( - f"Filter can only use {TwinMode.PRIVATE} or {TwinMode.MOCK}" + raise SyftException( + public_message=f"Filter can only use {TwinMode.PRIVATE} or {TwinMode.MOCK}" ) else: filtered.append(arg.syft_action_data) return filtered -def filter_twin_kwargs(kwargs: dict, twin_mode: TwinMode) -> Any: +@as_result(SyftException) +def filter_twin_kwargs( + kwargs: dict, twin_mode: TwinMode, allow_python_types: bool = False +) -> Any: filtered = {} for k, v in kwargs.items(): if isinstance(v, TwinObject): @@ -952,11 +1176,21 @@ def filter_twin_kwargs(kwargs: dict, twin_mode: TwinMode) -> Any: elif twin_mode == TwinMode.MOCK: filtered[k] = v.mock.syft_action_data else: - raise Exception( - f"Filter can only use {TwinMode.PRIVATE} or {TwinMode.MOCK}" + raise SyftException( + public_message=f"Filter can only use {TwinMode.PRIVATE} or {TwinMode.MOCK}" ) else: - filtered[k] = v.syft_action_data + if isinstance(v, ActionObject): + filtered[k] = v.syft_action_data + elif ( + isinstance(v, str | int | float | dict | CustomEndpointActionObject) + and allow_python_types + ): + filtered[k] = v + else: + raise SyftException( + public_message=f"unexepected value {v} passed to filtered twin kwargs" + ) return filtered diff --git a/packages/syft/src/syft/service/action/action_store.py b/packages/syft/src/syft/service/action/action_store.py index 4b71dc7ea74..e6597d19d25 100644 --- a/packages/syft/src/syft/service/action/action_store.py +++ b/packages/syft/src/syft/service/action/action_store.py @@ -1,370 +1,134 @@ # future from __future__ import annotations -# stdlib -import threading - -# third party -from result import Err -from result import Ok -from result import Result - # relative -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.dict_document_store import DictStoreConfig -from ...store.document_store import BasePartitionSettings -from ...store.document_store import StoreConfig +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result from ...types.syft_object import SyftObject from ...types.twin_object import TwinObject -from ...types.uid import LineageID from ...types.uid import UID -from ..response import SyftSuccess +from .action_object import ActionObject from .action_object import is_action_data_empty from .action_permissions import ActionObjectEXECUTE -from .action_permissions import ActionObjectOWNER from .action_permissions import ActionObjectPermission from .action_permissions import ActionObjectREAD from .action_permissions import ActionObjectWRITE -from .action_permissions import ActionPermission from .action_permissions import StoragePermission -lock = threading.RLock() - - -class ActionStore: - pass - - -@serializable() -class KeyValueActionStore(ActionStore): - """Generic Key-Value Action store. - - Parameters: - store_config: StoreConfig - Backend specific configuration, including connection configuration, database name, or client class type. - root_verify_key: Optional[SyftVerifyKey] - Signature verification key, used for checking access permissions. - """ - - def __init__( - self, - node_uid: UID, - store_config: StoreConfig, - root_verify_key: SyftVerifyKey | None = None, - ) -> None: - self.node_uid = node_uid - self.store_config = store_config - self.settings = BasePartitionSettings(name="Action") - self.data = self.store_config.backing_store( - "data", self.settings, self.store_config - ) - self.permissions = self.store_config.backing_store( - "permissions", self.settings, self.store_config, ddtype=set - ) - self.storage_permissions = self.store_config.backing_store( - "storage_permissions", self.settings, self.store_config, ddtype=set - ) - if root_verify_key is None: - root_verify_key = SyftSigningKey.generate().verify_key - self.root_verify_key = root_verify_key +@serializable(canonical_name="ActionObjectSQLStore", version=1) +class ActionObjectStash(ObjectStash[ActionObject]): + # We are storing ActionObject, Action, TwinObject + allow_any_type = True + @as_result(NotFoundException, SyftException) def get( self, uid: UID, credentials: SyftVerifyKey, has_permission: bool = False - ) -> Result[SyftObject, str]: + ) -> ActionObject: uid = uid.id # We only need the UID from LineageID or UID - - # if you get something you need READ permission - read_permission = ActionObjectREAD(uid=uid, credentials=credentials) - if has_permission or self.has_permission(read_permission): - try: - if isinstance(uid, LineageID): - syft_object = self.data[uid.id] - elif isinstance(uid, UID): - syft_object = self.data[uid] - else: - raise Exception(f"Unrecognized UID type: {type(uid)}") - return Ok(syft_object) - except Exception as e: - return Err(f"Could not find item with uid {uid}, {e}") - return Err(f"Permission: {read_permission} denied") - - def get_mock(self, uid: UID) -> Result[SyftObject, str]: + # TODO remove and use get_by_uid instead + return self.get_by_uid( + credentials=credentials, + uid=uid, + has_permission=has_permission, + ).unwrap() + + @as_result(NotFoundException, SyftException) + def get_mock(self, credentials: SyftVerifyKey, uid: UID) -> SyftObject: uid = uid.id # We only need the UID from LineageID or UID - try: - syft_object = self.data[uid] - if isinstance(syft_object, TwinObject) and not is_action_data_empty( - syft_object.mock - ): - return Ok(syft_object.mock) - return Err("No mock") - except Exception as e: - return Err(f"Could not find item with uid {uid}, {e}") + obj = self.get_by_uid( + credentials=credentials, uid=uid, has_permission=True + ).unwrap() + if isinstance(obj, TwinObject) and not is_action_data_empty(obj.mock): + return obj.mock + raise NotFoundException(public_message=f"No mock found for object {uid}") + @as_result(NotFoundException, SyftException) def get_pointer( self, uid: UID, credentials: SyftVerifyKey, - node_uid: UID, - ) -> Result[SyftObject, str]: + server_uid: UID, + ) -> SyftObject: uid = uid.id # We only need the UID from LineageID or UID - try: - if uid in self.data: - obj = self.data[uid] - read_permission = ActionObjectREAD(uid=uid, credentials=credentials) - - # if you have permission you can have private data - if self.has_permission(read_permission): - if isinstance(obj, TwinObject): - return Ok(obj.private.syft_point_to(node_uid)) - return Ok(obj.syft_point_to(node_uid)) - - # if its a twin with a mock anyone can have this - if isinstance(obj, TwinObject): - return Ok(obj.mock.syft_point_to(node_uid)) - - # finally worst case you get ActionDataEmpty so you can still trace - return Ok(obj.as_empty().syft_point_to(node_uid)) + obj = self.get_by_uid( + credentials=credentials, uid=uid, has_permission=True + ).unwrap() + has_permissions = self.has_permission( + ActionObjectREAD(uid=uid, credentials=credentials) + ) - return Err("Permission denied") - except Exception as e: - return Err(str(e)) + if has_permissions: + if isinstance(obj, TwinObject): + return obj.private.syft_point_to(server_uid) + return obj.syft_point_to(server_uid) # type: ignore - def exists(self, uid: UID) -> bool: - uid = uid.id # We only need the UID from LineageID or UID + # if its a twin with a mock anyone can have this + if isinstance(obj, TwinObject): + return obj.mock.syft_point_to(server_uid) - return uid in self.data + # finally worst case you get ActionDataEmpty so you can still trace + return obj.as_empty().syft_point_to(server_uid) # type: ignore - def set( + @as_result(SyftException, StashException) + def set_or_update( # type: ignore self, uid: UID, credentials: SyftVerifyKey, syft_object: SyftObject, has_result_read_permission: bool = False, add_storage_permission: bool = True, - ) -> Result[SyftSuccess, Err]: + ) -> UID: uid = uid.id # We only need the UID from LineageID or UID - # if you set something you need WRITE permission - write_permission = ActionObjectWRITE(uid=uid, credentials=credentials) - can_write = self.has_permission(write_permission) - - if not self.exists(uid=uid): - # attempt to claim it for writing - if has_result_read_permission: - ownership_result = self.take_ownership(uid=uid, credentials=credentials) - can_write = True if ownership_result.is_ok() else False - else: - # root takes owneship, but you can still write - ownership_result = self.take_ownership( - uid=uid, credentials=self.root_verify_key - ) - can_write = True if ownership_result.is_ok() else False - - if can_write: - self.data[uid] = syft_object - if uid not in self.permissions: - # create default permissions - self.permissions[uid] = set() + if self.exists(credentials=credentials, uid=uid): + permissions: list[ActionObjectPermission] = [] if has_result_read_permission: - self.add_permission(ActionObjectREAD(uid=uid, credentials=credentials)) + permissions.append(ActionObjectREAD(uid=uid, credentials=credentials)) else: - self.add_permissions( + permissions.extend( [ ActionObjectWRITE(uid=uid, credentials=credentials), ActionObjectEXECUTE(uid=uid, credentials=credentials), ] ) - - if uid not in self.storage_permissions: - # create default storage permissions - self.storage_permissions[uid] = set() + storage_permission = [] if add_storage_permission: - self.add_storage_permission( - StoragePermission(uid=uid, node_uid=self.node_uid) - ) - - return Ok(SyftSuccess(message=f"Set for ID: {uid}")) - return Err(f"Permission: {write_permission} denied") - - def take_ownership( - self, uid: UID, credentials: SyftVerifyKey - ) -> Result[SyftSuccess, str]: - uid = uid.id # We only need the UID from LineageID or UID - - # first person using this UID can claim ownership - if uid not in self.permissions and uid not in self.data: - self.add_permissions( - [ - ActionObjectOWNER(uid=uid, credentials=credentials), - ActionObjectWRITE(uid=uid, credentials=credentials), - ActionObjectREAD(uid=uid, credentials=credentials), - ActionObjectEXECUTE(uid=uid, credentials=credentials), - ] - ) - return Ok(SyftSuccess(message=f"Ownership of ID: {uid} taken.")) - return Err(f"UID: {uid} already owned.") - - def delete(self, uid: UID, credentials: SyftVerifyKey) -> Result[SyftSuccess, str]: - uid = uid.id # We only need the UID from LineageID or UID - - # if you delete something you need OWNER permission - # is it bad to evict a key and have someone else reuse it? - # perhaps we should keep permissions but no data? - owner_permission = ActionObjectOWNER(uid=uid, credentials=credentials) - if self.has_permission(owner_permission): - if uid in self.data: - del self.data[uid] - if uid in self.permissions: - del self.permissions[uid] - return Ok(SyftSuccess(message=f"ID: {uid} deleted")) - return Err(f"Permission: {owner_permission} denied") - - def has_permission(self, permission: ActionObjectPermission) -> bool: - if not isinstance(permission.permission, ActionPermission): - raise Exception(f"ObjectPermission type: {permission.permission} not valid") - - if ( - permission.credentials is not None - and self.root_verify_key.verify == permission.credentials.verify - ): - return True - - if ( - permission.uid in self.permissions - and permission.permission_string in self.permissions[permission.uid] - ): - return True - - # 🟡 TODO 14: add ALL_READ, ALL_EXECUTE etc - if permission.permission == ActionPermission.OWNER: - pass - elif permission.permission == ActionPermission.READ: - pass - elif permission.permission == ActionPermission.WRITE: - pass - elif permission.permission == ActionPermission.EXECUTE: - pass - - return False - - def has_permissions(self, permissions: list[ActionObjectPermission]) -> bool: - return all(self.has_permission(p) for p in permissions) - - def add_permission(self, permission: ActionObjectPermission) -> None: - permissions = self.permissions[permission.uid] - permissions.add(permission.permission_string) - self.permissions[permission.uid] = permissions - - def remove_permission(self, permission: ActionObjectPermission) -> None: - permissions = self.permissions[permission.uid] - permissions.remove(permission.permission_string) - self.permissions[permission.uid] = permissions - - def add_permissions(self, permissions: list[ActionObjectPermission]) -> None: - for permission in permissions: - self.add_permission(permission) - - def add_storage_permission(self, permission: StoragePermission) -> None: - permissions = self.storage_permissions[permission.uid] - permissions.add(permission.node_uid) - self.storage_permissions[permission.uid] = permissions - - def add_storage_permissions(self, permissions: list[StoragePermission]) -> None: - for permission in permissions: - self.add_storage_permission(permission) - - def remove_storage_permission(self, permission: StoragePermission) -> None: - permissions = self.storage_permissions[permission.uid] - permissions.remove(permission.node_uid) - self.storage_permissions[permission.uid] = permissions - - def has_storage_permission(self, permission: StoragePermission) -> bool: - if permission.uid in self.storage_permissions: - return permission.node_uid in self.storage_permissions[permission.uid] - return False - - def migrate_data( - self, to_klass: SyftObject, credentials: SyftVerifyKey - ) -> Result[bool, str]: - has_root_permission = credentials == self.root_verify_key - - if has_root_permission: - for key, value in self.data.items(): - try: - if value.__canonical_name__ != to_klass.__canonical_name__: - continue - migrated_value = value.migrate_to(to_klass.__version__) - except Exception as e: - return Err( - f"Failed to migrate data to {to_klass} for qk: {key}. Exception: {e}" - ) - result = self.set( - uid=key, - credentials=credentials, - syft_object=migrated_value, + storage_permission.append( + StoragePermission(uid=uid, server_uid=self.server_uid) ) - if result.is_err(): - return result.err() - - return Ok(True) - - return Err("You don't have permissions to migrate data.") - + self.update( + credentials=credentials, + obj=syft_object, + ).unwrap() + self.add_permissions(permissions).unwrap() + self.add_storage_permissions(storage_permission).unwrap() + return uid -@serializable() -class DictActionStore(KeyValueActionStore): - """Dictionary-Based Key-Value Action store. - - Parameters: - store_config: StoreConfig - Backend specific configuration, including client class type. - root_verify_key: Optional[SyftVerifyKey] - Signature verification key, used for checking access permissions. - """ - - def __init__( - self, - node_uid: UID, - store_config: StoreConfig | None = None, - root_verify_key: SyftVerifyKey | None = None, - ) -> None: - store_config = store_config if store_config is not None else DictStoreConfig() - super().__init__( - node_uid=node_uid, - store_config=store_config, - root_verify_key=root_verify_key, + owner_credentials = ( + credentials if has_result_read_permission else self.root_verify_key ) - - -@serializable() -class SQLiteActionStore(KeyValueActionStore): - """SQLite-Based Key-Value Action store. - - Parameters: - store_config: StoreConfig - SQLite specific configuration, including connection settings or client class type. - root_verify_key: Optional[SyftVerifyKey] - Signature verification key, used for checking access permissions. - """ - - pass - - -@serializable() -class MongoActionStore(KeyValueActionStore): - """Mongo-Based Action store. - - Parameters: - store_config: StoreConfig - Mongo specific configuration. - root_verify_key: Optional[SyftVerifyKey] - Signature verification key, used for checking access permissions. - """ - - pass + # if not has_result_read_permission + # root takes owneship, but you can still write and execute + super().set( + credentials=owner_credentials, + obj=syft_object, + add_permissions=[ + ActionObjectWRITE(uid=uid, credentials=credentials), + ActionObjectEXECUTE(uid=uid, credentials=credentials), + ], + add_storage_permission=add_storage_permission, + ).unwrap() + + return uid + + def set(self, *args, **kwargs): # type: ignore + raise Exception("Use `ActionObjectStash.set_or_update` instead.") diff --git a/packages/syft/src/syft/service/action/action_types.py b/packages/syft/src/syft/service/action/action_types.py index a1db49f8a59..c7bd730d557 100644 --- a/packages/syft/src/syft/service/action/action_types.py +++ b/packages/syft/src/syft/service/action/action_types.py @@ -1,10 +1,12 @@ # stdlib +import logging from typing import Any # relative -from ...util.logger import debug from .action_data_empty import ActionDataEmpty +logger = logging.getLogger(__name__) + action_types: dict = {} @@ -15,17 +17,17 @@ def action_type_for_type(obj_or_type: Any) -> type: obj_or_type: Union[object, type] Can be an object or a class """ + if isinstance(obj_or_type, ActionDataEmpty): + obj_or_type = obj_or_type.syft_internal_type if type(obj_or_type) != type: - if isinstance(obj_or_type, ActionDataEmpty): - obj_or_type = obj_or_type.syft_internal_type - else: - obj_or_type = type(obj_or_type) + obj_or_type = type(obj_or_type) if obj_or_type not in action_types: - debug(f"WARNING: No Type for {obj_or_type}, returning {action_types[Any]}") - return action_types[Any] + logger.debug( + f"WARNING: No Type for {obj_or_type}, returning {action_types[Any]}" + ) - return action_types[obj_or_type] + return action_types.get(obj_or_type, action_types[Any]) def action_type_for_object(obj: Any) -> type: @@ -38,7 +40,7 @@ def action_type_for_object(obj: Any) -> type: _type = type(obj) if _type not in action_types: - debug(f"WARNING: No Type for {_type}, returning {action_types[Any]}") + logger.debug(f"WARNING: No Type for {_type}, returning {action_types[Any]}") return action_types[Any] return action_types[_type] diff --git a/packages/syft/src/syft/service/action/numpy.py b/packages/syft/src/syft/service/action/numpy.py index da8c8aecc05..1949eeb0575 100644 --- a/packages/syft/src/syft/service/action/numpy.py +++ b/packages/syft/src/syft/service/action/numpy.py @@ -8,20 +8,20 @@ # relative from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from .action_object import ActionObject from .action_object import ActionObjectPointer from .action_object import BASE_PASSTHROUGH_ATTRS from .action_types import action_types -# @serializable(attrs=["id", "node_uid", "parent_id"]) +# @serializable(attrs=["id", "server_uid", "parent_id"]) # class NumpyArrayObjectPointer(ActionObjectPointer): # _inflix_operations = ["__add__", "__sub__", "__eq__", "__mul__"] # __canonical_name__ = "NumpyArrayObjectPointer" -# __version__ = SYFT_OBJECT_VERSION_2 +# __version__ = SYFT_OBJECT_VERSION_1 -# def get_from(self, domain_client) -> Any: -# return domain_client.api.services.action.get(self.id).syft_action_data +# def get_from(self, datasite_client) -> Any: +# return datasite_client.api.services.action.get(self.id).syft_action_data class NumpyArrayObjectPointer(ActionObjectPointer): @@ -40,10 +40,12 @@ def numpy_like_eq(left: Any, right: Any) -> bool: # 🔵 TODO 7: Map TPActionObjects and their 3rd Party types like numpy type to these # classes for bi-directional lookup. + + @serializable() class NumpyArrayObject(ActionObject, np.lib.mixins.NDArrayOperatorsMixin): __canonical_name__ = "NumpyArrayObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: ClassVar[type[Any]] = np.ndarray syft_pointer_type: ClassVar[type[ActionObjectPointer]] = NumpyArrayObjectPointer @@ -86,7 +88,7 @@ def __array_ufunc__( @serializable() class NumpyScalarObject(ActionObject, np.lib.mixins.NDArrayOperatorsMixin): __canonical_name__ = "NumpyScalarObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: ClassVar[type] = np.number syft_passthrough_attrs: list[str] = BASE_PASSTHROUGH_ATTRS @@ -99,7 +101,7 @@ def __float__(self) -> float: @serializable() class NumpyBoolObject(ActionObject, np.lib.mixins.NDArrayOperatorsMixin): __canonical_name__ = "NumpyBoolObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: ClassVar[type] = np.bool_ syft_passthrough_attrs: list[str] = BASE_PASSTHROUGH_ATTRS diff --git a/packages/syft/src/syft/service/action/pandas.py b/packages/syft/src/syft/service/action/pandas.py index 4d47261ef3e..9de480ddd0f 100644 --- a/packages/syft/src/syft/service/action/pandas.py +++ b/packages/syft/src/syft/service/action/pandas.py @@ -8,7 +8,7 @@ # relative from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from .action_object import ActionObject from .action_object import BASE_PASSTHROUGH_ATTRS from .action_types import action_types @@ -17,7 +17,7 @@ @serializable() class PandasDataFrameObject(ActionObject): __canonical_name__ = "PandasDataframeObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: ClassVar[type] = DataFrame syft_passthrough_attrs: list[str] = BASE_PASSTHROUGH_ATTRS @@ -39,11 +39,16 @@ def syft_is_property(self, obj: Any, method: str) -> bool: return True return super().syft_is_property(obj, method) + def __bool__(self) -> bool: + if self.syft_action_data_cache is None: + return False + return bool(self.syft_action_data_cache.empty) + @serializable() class PandasSeriesObject(ActionObject): __canonical_name__ = "PandasSeriesObject" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type = Series syft_passthrough_attrs: list[str] = BASE_PASSTHROUGH_ATTRS diff --git a/packages/syft/src/syft/service/action/plan.py b/packages/syft/src/syft/service/action/plan.py index 0bab10c0958..cca7437f869 100644 --- a/packages/syft/src/syft/service/action/plan.py +++ b/packages/syft/src/syft/service/action/plan.py @@ -8,7 +8,7 @@ from ... import Worker from ...client.client import SyftClient from ...serde.recursive import recursive_serde_register -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject from .action_object import Action from .action_object import TraceResultRegistry @@ -16,7 +16,7 @@ class Plan(SyftObject): __canonical_name__ = "Plan" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 syft_passthrough_attrs: list[str] = [ "inputs", @@ -68,6 +68,8 @@ def planify(func: Callable) -> ActionObject: client = worker.root_client if client is None: raise ValueError("Not able to get client for plan building") + # if client.settings is not None: + # client.settings.enable_eager_execution(enable=True) # NOTE: Disabled until we bring back eager execution TraceResultRegistry.set_trace_result_for_current_thread(client=client) try: # TraceResult._client = client diff --git a/packages/syft/src/syft/service/action/verification.py b/packages/syft/src/syft/service/action/verification.py index 063634e993c..e155ff3c4a8 100644 --- a/packages/syft/src/syft/service/action/verification.py +++ b/packages/syft/src/syft/service/action/verification.py @@ -7,7 +7,7 @@ import pandas as pd # relative -from ..response import SyftError +from ...types.errors import SyftException from ..response import SyftResponseMessage from ..response import SyftSuccess from .action_object import ActionObject @@ -57,7 +57,7 @@ def compare_hashes( target_hashes: list[int] | int, traced_hashes: list[int] | int, traced_results: Any, -) -> SyftSuccess | SyftError: +) -> SyftSuccess: if target_hashes == traced_hashes: msg = "Code Verification passed with matching hashes! Congratulations, and thank you for supporting PySyft!" return SyftSuccess(message=msg) @@ -66,7 +66,7 @@ def compare_hashes( f"Hashes do not match! Target hashes were: {target_hashes} but Traced hashes were: {traced_results}. " f"Please try checking the logs." ) - return SyftError(message=msg) + raise SyftException(public_message=msg) def code_verification(func: Callable) -> Callable: @@ -81,12 +81,12 @@ def code_verification(func: Callable) -> Callable: - boolean:: if history hashes match """ - def wrapper(*args: Any, **kwargs: Any) -> SyftSuccess | SyftError: + def wrapper(*args: Any, **kwargs: Any) -> SyftSuccess: trace_assets = [] for asset in args: if not isinstance(asset, ActionObject): - raise Exception( - f"ActionObject expected, instead received: {type(asset)}" + raise SyftException( + public_message=f"ActionObject expected, instead received: {type(asset)}" ) # Manual type casting for now, to automate later if isinstance(asset.syft_action_data, np.ndarray): @@ -123,6 +123,6 @@ def wrapper(*args: Any, **kwargs: Any) -> SyftSuccess | SyftError: f"Hashes do not match! Target hashes were: {results} but Traced hashes were: {traced_results}. " f"Please try checking the logs." ) - return SyftError(message=msg) + raise SyftException(public_message=msg) return wrapper diff --git a/packages/syft/src/syft/service/api/api.py b/packages/syft/src/syft/service/api/api.py new file mode 100644 index 00000000000..f9f2731d3a3 --- /dev/null +++ b/packages/syft/src/syft/service/api/api.py @@ -0,0 +1,948 @@ +# stdlib +import ast +from collections.abc import Callable +import inspect +from inspect import Signature +import keyword +import linecache +import re +import textwrap +from textwrap import dedent +from typing import Any +from typing import cast + +# third party +from pydantic import ValidationError +from pydantic import field_validator +from pydantic import model_validator + +# relative +from ...abstract_server import AbstractServer +from ...client.client import SyftClient +from ...serde.serializable import serializable +from ...serde.signature import signature_remove_context +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_migration import migrate +from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SyftObject +from ...types.syncable_object import SyncableSyftObject +from ...types.transforms import TransformContext +from ...types.transforms import drop +from ...types.transforms import generate_action_object_id +from ...types.transforms import generate_id +from ...types.transforms import keep +from ...types.transforms import make_set_default +from ...types.transforms import transform +from ...types.uid import UID +from ...util.misc_objs import MarkdownDescription +from ..context import AuthedServiceContext +from ..response import SyftError +from ..user.user import UserView +from .utils import print as log_print + +NOT_ACCESSIBLE_STRING = "N / A" + + +class HelperFunctionSet: + def __init__(self, helper_functions: dict[str, Callable]) -> None: + self.helper_functions = helper_functions + for name, func in helper_functions.items(): + setattr(self, name, func) + + +class TwinAPIAuthedContext(AuthedServiceContext): + __canonical_name__ = "TwinAPIAuthedContext" + __version__ = SYFT_OBJECT_VERSION_1 + + user: UserView | None = None + settings: dict[str, Any] | None = None + code: HelperFunctionSet | None = None + state: dict[Any, Any] | None = None + admin_client: SyftClient | None = None + user_client: SyftClient | None = None + + +@serializable() +class TwinAPIContextView(SyftObject): + __canonical_name__ = "TwinAPIContextView" + __version__ = SYFT_OBJECT_VERSION_1 + + __repr_attrs__ = ["settings", "state", "user"] + user: UserView + settings: dict[str, Any] + state: dict[Any, Any] + + +def get_signature(func: Callable) -> Signature: + sig = inspect.signature(func) + sig = signature_remove_context(sig) + return sig + + +def register_fn_in_linecache(fname: str, src: str) -> None: + """adds a function to linecache, such that inspect.getsource works for functions nested in this function. + This only works if the same function is compiled under the same filename""" + lines = [ + line + "\n" for line in src.splitlines() + ] # use same splitting method same as linecache 112 (py3.12) + linecache.cache[fname] = (137, None, lines, fname) + + +@serializable() +class TwinAPIEndpointViewV1(SyftObject): + # version + __canonical_name__ = "CustomAPIView" + __version__ = SYFT_OBJECT_VERSION_1 + + path: str + action_object_id: UID + signature: Signature + access: str = "Public" + mock_function: str | None = None + private_function: str | None = None + description: MarkdownDescription | None = None + mock_helper_functions: list[str] | None = None + private_helper_functions: list[str] | None = None + worker_pool: str | None = None + endpoint_timeout: int = 60 + + +@serializable() +class TwinAPIEndpointView(SyftObject): + # version + __canonical_name__ = "CustomAPIView" + __version__ = SYFT_OBJECT_VERSION_2 + + path: str + action_object_id: UID + signature: Signature + access: str = "Public" + mock_function: str | None = None + private_function: str | None = None + description: MarkdownDescription | None = None + mock_helper_functions: list[str] | None = None + private_helper_functions: list[str] | None = None + worker_pool_name: str | None = None + endpoint_timeout: int = 60 + + __repr_attrs__ = [ + "path", + "signature", + "worker_pool_name", + "endpoint_timeout", + ] + + def _coll_repr_(self) -> dict[str, Any]: + if self.mock_function: + mock_parsed_code = ast.parse(self.mock_function) + mock_function_name = [ + server.name + for server in ast.walk(mock_parsed_code) + if isinstance(server, ast.FunctionDef) + ][0] + else: + mock_function_name = NOT_ACCESSIBLE_STRING + + if self.private_function: + private_parsed_code = ast.parse(self.private_function) + private_function_name = [ + server.name + for server in ast.walk(private_parsed_code) + if isinstance(server, ast.FunctionDef) + ][0] + else: + private_function_name = NOT_ACCESSIBLE_STRING + + worker_pool_name = "UNSET (DEFAULT)" + if self.worker_pool_name is not None: + worker_pool_name = self.worker_pool_name + return { + "API path": self.path, + "Signature": self.path + str(self.signature), + "Access": self.access, + "Mock Function": mock_function_name, + "Private Function": private_function_name, + "Worker Pool": worker_pool_name, + } + + +@serializable() +class Endpoint(SyftObject): + """Base class to perform basic Endpoint validation for both public/private endpoints.""" + + # version + __canonical_name__ = "CustomApiEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + + api_code: str + func_name: str + settings: dict[str, Any] | None = None + view_access: bool = True + helper_functions: dict[str, str] | None = None + state: dict[Any, Any] | None = None + signature: Signature + + __exclude_sync_diff_attrs__ = ["state"] + + def __repr__(self) -> str: + type_name = type(self).__name__ + repr_str = f"""<{type_name}: {self.func_name}> + + {self.api_code} + """ + return textwrap.dedent(repr_str) + + @field_validator("api_code", check_fields=False) + @classmethod + def validate_api_code(cls, api_code: str) -> str: + valid_code = True + api_code = dedent(api_code) + try: + ast.parse(api_code) + except SyntaxError: + # If the code isn't valid Python syntax + valid_code = False + + if not valid_code: + raise ValueError("Code must be a valid Python function.") + + return api_code + + @field_validator("func_name", check_fields=False) + @classmethod + def validate_func_name(cls, func_name: str) -> str: + if not str.isidentifier(func_name) or keyword.iskeyword(func_name): + raise ValueError("Invalid function name.") + return func_name + + @field_validator("settings", check_fields=False) + @classmethod + def validate_settings( + cls, settings: dict[str, Any] | None + ) -> dict[str, Any] | None: + return settings + + def update_state(self, state: dict[Any, Any]) -> None: + self.state = state + + def build_internal_context( + self, + context: AuthedServiceContext, + admin_client: SyftClient | None = None, + user_client: SyftClient | None = None, + ) -> TwinAPIAuthedContext: + helper_function_dict: dict[str, Callable] = {} + self.helper_functions = self.helper_functions or {} + for helper_name, helper_code in self.helper_functions.items(): + # Create a dictionary to serve as local scope + local_scope: dict[str, Callable] = {} + + # Execute the function string within the local scope + exec(helper_code, local_scope) # nosec + helper_function_dict[helper_name] = local_scope[helper_name] + + helper_function_set = HelperFunctionSet(helper_function_dict) + + user = context.server.services.user.get_current_user(context) + + return TwinAPIAuthedContext( + credentials=context.credentials, + role=context.role, + job_id=context.job_id, + extra_kwargs=context.extra_kwargs, + has_execute_permissions=context.has_execute_permissions, + server=context.server, + id=context.id, + settings=self.settings or {}, + code=helper_function_set, + state=self.state or {}, + user=user, + admin_client=admin_client, + user_client=user_client, + ) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + # relative + from ... import SyftSigningKey + from ..context import AuthedServiceContext + + mock_context = AuthedServiceContext( + server=AbstractServer(), credentials=SyftSigningKey.generate().verify_key + ) + return self.call_locally(mock_context, *args, **kwargs) + + def call_locally( + self, context: AuthedServiceContext, *args: Any, **kwargs: Any + ) -> Any: + inner_function = ast.parse(self.api_code).body[0] + inner_function.decorator_list = [] + # compile the function + raw_byte_code = compile(ast.unparse(inner_function), "", "exec") + + # load it + exec(raw_byte_code) # nosec + + internal_context = self.build_internal_context(context=context) + + # execute it + evil_string = f"{self.func_name}(*args, **kwargs,context=internal_context)" + result = eval(evil_string, None, locals()) # nosec + + # Update code context state + self.update_state(internal_context.state) + + # return the results + return result + + +@serializable() +class PrivateAPIEndpoint(Endpoint): + # version + __canonical_name__ = "PrivateAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + + view_access: bool = False + + +@serializable() +class PublicAPIEndpoint(Endpoint): + # version + __canonical_name__ = "PublicAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + + +class BaseTwinAPIEndpoint(SyftObject): + __canonical_name__ = "BaseTwinAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + + @model_validator(mode="before") + @classmethod + def validate_signature(cls, data: dict[str, Any]) -> dict[str, Any]: + mock_function = data["mock_function"] # mock_function can't be None + private_function = data.get("private_function") + + # Add none check + if private_function and private_function.signature != mock_function.signature: + raise ValueError( + "Mock and Private API Endpoints must have the same signature." + ) + + return data + + @field_validator("path", check_fields=False) + @classmethod + def validate_path(cls, path: str) -> str: + # TODO: Check path doesn't collide with system endpoints + + if path.startswith(".") or path.endswith("."): + raise ValueError("Path cannot start or end with a '.'") + if not path.islower(): + raise ValueError("Path must be lowercase") + parts = path.split(".") + for part in parts: + if not str.isidentifier(part) or keyword.iskeyword(part): + raise ValueError(f"Invalid path: {part} is not a valid identifier") + + return path + + @field_validator("private_function", check_fields=False) + @classmethod + def validate_private_function( + cls, private_function: PrivateAPIEndpoint | None + ) -> PrivateAPIEndpoint | None: + # TODO: what kind of validation should we do here? + + return private_function + + @field_validator("mock_function", check_fields=False) + @classmethod + def validate_mock_function( + cls, mock_function: PublicAPIEndpoint + ) -> PublicAPIEndpoint: + # TODO: what kind of validation should we do here? + return mock_function + + +@serializable() +class UpdateTwinAPIEndpoint(PartialSyftObject, BaseTwinAPIEndpoint): + # version + __canonical_name__ = "UpdateTwinAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + + path: str + private_function: PrivateAPIEndpoint | None = None + mock_function: PublicAPIEndpoint + description: MarkdownDescription | None = None + endpoint_timeout: int = 60 + + +@serializable() +class CreateTwinAPIEndpointV1(BaseTwinAPIEndpoint): + # version + __canonical_name__ = "CreateTwinAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + + path: str + private_function: PrivateAPIEndpoint | None = None + mock_function: PublicAPIEndpoint + signature: Signature + description: MarkdownDescription | None = None + worker_pool: str | None = None + endpoint_timeout: int = 60 + + +@serializable() +class CreateTwinAPIEndpoint(BaseTwinAPIEndpoint): + # version + __canonical_name__ = "CreateTwinAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_2 + + path: str + private_function: PrivateAPIEndpoint | None = None + mock_function: PublicAPIEndpoint + signature: Signature + description: MarkdownDescription | None = None + worker_pool_name: str | None = None + endpoint_timeout: int = 60 + + def __init__( + self, description: str | MarkdownDescription | None = "", **kwargs: Any + ) -> None: + if isinstance(description, str): + description = MarkdownDescription(text=description) + + super().__init__(**kwargs, description=description) + + +@serializable() +class TwinAPIEndpointV1(SyncableSyftObject): + # version + __canonical_name__: str = "TwinAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_1 + __exclude_sync_diff_attrs__ = ["private_function"] + __private_sync_attr_mocks__ = { + "private_function": None, + } + + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + + path: str + private_function: PrivateAPIEndpoint | None = None + mock_function: PublicAPIEndpoint + signature: Signature + description: MarkdownDescription | None = None + action_object_id: UID + worker_pool: str | None = None + endpoint_timeout: int = 60 + + __attr_searchable__ = ["path"] + __attr_unique__ = ["path"] + __repr_attrs__ = [ + "path", + "description", + "private_function", + "mock_function", + "endpoint_timeout", + ] + + +@serializable() +class TwinAPIEndpoint(SyncableSyftObject): + # version + __canonical_name__: str = "TwinAPIEndpoint" + __version__ = SYFT_OBJECT_VERSION_2 + __exclude_sync_diff_attrs__ = ["private_function"] + __private_sync_attr_mocks__ = { + "private_function": None, + } + + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + + path: str + private_function: PrivateAPIEndpoint | None = None + mock_function: PublicAPIEndpoint + signature: Signature + description: MarkdownDescription | None = None + action_object_id: UID + worker_pool_name: str | None = None + endpoint_timeout: int = 60 + + __attr_searchable__ = ["path"] + __attr_unique__ = ["path"] + __repr_attrs__ = [ + "path", + "description", + "private_function", + "mock_function", + "endpoint_timeout", + ] + + def has_mock(self) -> bool: + return self.api_mock_code is not None + + def has_permission(self, context: AuthedServiceContext) -> bool: + """Check if the user has permission to access the endpoint. + + Args: + context: The context of the user requesting the code. + Returns: + bool: True if the user has permission to access the endpoint, False otherwise. + """ + if context.role.value == 128: + return True + return False + + def select_code( + self, context: AuthedServiceContext + ) -> PrivateAPIEndpoint | PublicAPIEndpoint | None: + """Select the code to execute based on the user's permissions and public code availability. + + Args: + context: The context of the user requesting the code. + Returns: + Result[Ok, Err]: The selected code to execute. + """ + if self.has_permission(context) and self.private_function: + return self.private_function + return self.mock_function + + def exec( + self, + context: AuthedServiceContext, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + """Execute the code based on the user's permissions and public code availability. + + Args: + context: The context of the user requesting the code. + *args: Any + **kwargs: Any + Returns: + Any: The result of the executed code. + """ + selected_code = self.select_code(context) + return self.exec_code(selected_code, context, *args, log_id=log_id, **kwargs) + + def exec_mock_function( + self, + context: AuthedServiceContext, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + """Execute the public code if it exists.""" + if self.mock_function: + return self.exec_code( + self.mock_function, context, *args, log_id=log_id, **kwargs + ) + + raise SyftException(public_message="No public code available") + + def exec_private_function( + self, + context: AuthedServiceContext, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + """Execute the private code if user is has the proper permissions. + + Args: + context: The context of the user requesting the code. + *args: Any + **kwargs: Any + Returns: + Any: The result of the executed code. + """ + if self.private_function is None: + raise SyftException(public_message="No private code available") + + if self.has_permission(context): + return self.exec_code( + self.private_function, context, *args, log_id=log_id, **kwargs + ) + + raise SyftException(public_message="You're not allowed to run this code.") + + def get_user_client_from_server(self, context: AuthedServiceContext) -> SyftClient: + # get a user client + guest_client = context.server.get_guest_client() + user_client = guest_client + private_key = context.server.services.user.signing_key_for_verify_key( + context.credentials + ) + signing_key = private_key.signing_key + user_client.credentials = signing_key + return user_client + + def get_admin_client_from_server(self, context: AuthedServiceContext) -> SyftClient: + admin_client = context.server.get_guest_client() + admin_client.credentials = context.server.signing_key + return admin_client + + @as_result(SyftException) + def exec_code( + self, + code: PrivateAPIEndpoint | PublicAPIEndpoint, + context: AuthedServiceContext, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + # stdlib + import builtins as __builtin__ + import functools + + original_print = __builtin__.print + + try: + if log_id is not None: + print = functools.partial(log_print, context, log_id) + else: + print = original_print # type: ignore + + inner_function = ast.parse(code.api_code).body[0] + inner_function.decorator_list = [] + # compile the function + src = ast.unparse(inner_function) + raw_byte_code = compile(src, code.func_name, "exec") + register_fn_in_linecache(code.func_name, src) + user_client = self.get_user_client_from_server(context) + admin_client = self.get_admin_client_from_server(context) + + internal_context = code.build_internal_context( + context=context, admin_client=admin_client, user_client=user_client + ) + evil_string = f"{code.func_name}(*args, **kwargs,context=internal_context)" + + _globals = {"print": print} + # load it + exec(raw_byte_code, _globals, locals()) # nosec + + # execute it + evil_string = f"{code.func_name}(*args, **kwargs,context=internal_context)" + result = None + try: + # users can raise SyftException in their code + result = eval(evil_string, _globals, locals()) # nosec + except SyftException as e: + # capture it as the result variable + result = e + + # run all this code to clean up the state + code.update_state(internal_context.state) + + if isinstance(code, PublicAPIEndpoint): + self.mock_function = code + else: + self.private_function = code # type: ignore + + api_service = context.server.get_service("apiservice") + api_service.stash.upsert( + context.server.services.user.root_verify_key, self + ).unwrap() + + print = original_print # type: ignore + # if we caught a SyftException above we will raise and auto wrap to Result + if isinstance(result, SyftException): + raise result + + # here we got a non Exception result which will also be wrapped in Result + # return the results + return result + except Exception as e: + # If it's admin, return the error message. + # TODO: cleanup typeerrors + if context.role.value == 128 or isinstance(e, TypeError): + raise SyftException( + public_message=f"An error was raised during the execution of the API endpoint call: \n {str(e)}" + ) + else: + raise SyftException( + public_message=( + "Oops something went wrong during this endpoint execution, " + "please contact your admin." + ) + ) + + +def set_access_type(context: TransformContext) -> TransformContext: + if context.output is not None and context.obj is not None: + if context.obj.private_function is not None: + context.output["access"] = "Private / Mock" + else: + context.output["access"] = "Public" + return context + + +def check_and_cleanup_signature(context: TransformContext) -> TransformContext: + if context.output is not None and context.obj is not None: + params = dict(context.obj.signature.parameters) + if "context" not in params: + raise ValueError( + "Function Signature must include 'context' [AuthedContext] parameters." + ) + params.pop("context", None) + context.output["signature"] = Signature( + list(params.values()), + return_annotation=context.obj.signature.return_annotation, + ) + return context + + +def decorator_cleanup(code: str) -> str: + # Regular expression to remove decorator + # It matches from "@" to "def" (non-greedy) across multiple lines + decorator_regex = r"@.*?def " + + # Substituting the matched pattern with "def" + return re.sub(decorator_regex, "def ", code, count=1, flags=re.DOTALL) + + +def extract_code_string(code_field: str) -> Callable: + def code_string(context: TransformContext) -> TransformContext: + if context.obj is not None and context.output is not None: + endpoint_type = ( + context.obj.private_function + if code_field == "private_function" + else context.obj.mock_function + ) + helper_function_field = ( + "mock_helper_functions" + if code_field == "mock_function" + else "private_helper_functions" + ) + + context.server = cast(AbstractServer, context.server) + admin_key = context.server.services.user.root_verify_key + + # If endpoint exists **AND** (has visible access **OR** the user is admin) + if endpoint_type is not None and ( + endpoint_type.view_access or context.credentials == admin_key + ): + context.output[code_field] = decorator_cleanup(endpoint_type.api_code) + context.output[helper_function_field] = ( + endpoint_type.helper_functions.values() or [] + ) + else: + context.output[code_field] = None + context.output[helper_function_field] = [] + return context + + return code_string + + +@transform(TwinAPIAuthedContext, TwinAPIContextView) +def twin_api_context_to_twin_api_context_view() -> list[Callable]: + return [keep(["state", "settings", "user"])] + + +@transform(CreateTwinAPIEndpoint, TwinAPIEndpoint) +def endpoint_create_to_twin_endpoint() -> list[Callable]: + return [generate_id, generate_action_object_id, check_and_cleanup_signature] + + +@transform(TwinAPIEndpoint, TwinAPIEndpointView) +def twin_endpoint_to_view() -> list[Callable]: + return [ + set_access_type, + extract_code_string("private_function"), + extract_code_string("mock_function"), + ] + + +@transform(Endpoint, PrivateAPIEndpoint) +def endpoint_to_private_endpoint() -> list[Callable]: + return [ + keep( + [ + "api_code", + "func_name", + "settings", + "view_access", + "helper_functions", + "state", + "signature", + ] + ) + ] + + +@transform(Endpoint, PublicAPIEndpoint) +def endpoint_to_public_endpoint() -> list[Callable]: + return [ + keep( + [ + "api_code", + "func_name", + "settings", + "view_access", + "helper_functions", + "state", + "signature", + ] + ) + ] + + +@migrate(TwinAPIEndpointV1, TwinAPIEndpoint) +def migrate_twin_api_endpoint_v1_to_current() -> list[Callable]: + return [ + drop(["worker_pool"]), + make_set_default("worker_pool_name", None), + ] + + +@migrate(CreateTwinAPIEndpointV1, CreateTwinAPIEndpoint) +def migrate_create_twin_api_endpoint_v1_to_current() -> list[Callable]: + return [ + drop(["worker_pool"]), + make_set_default("worker_pool_name", None), + ] + + +@migrate(TwinAPIEndpointViewV1, TwinAPIEndpointView) +def migrate_twin_api_endpoint_view_v1_to_current() -> list[Callable]: + return [ + drop(["worker_pool"]), + make_set_default("worker_pool_name", None), + ] + + +@migrate(TwinAPIEndpointView, TwinAPIEndpointViewV1) +def migrate_twin_api_endpoint_view_current_to_v1() -> list[Callable]: + return [ + drop(["worker_pool_name"]), + make_set_default("worker_pool", None), + ] + + +@migrate(CreateTwinAPIEndpoint, CreateTwinAPIEndpointV1) +def migrate_create_twin_api_endpoint_current_to_v1() -> list[Callable]: + return [ + drop(["worker_pool_name"]), + make_set_default("worker_pool", None), + ] + + +@migrate(TwinAPIEndpoint, TwinAPIEndpointV1) +def migrate_twin_api_endpoint_current_to_v1() -> list[Callable]: + return [ + drop(["worker_pool_name"]), + make_set_default("worker_pool", None), + ] + + +def api_endpoint( + path: str, + settings: dict[str, str] | None = None, + helper_functions: list[Callable] | None = None, + description: MarkdownDescription | None = None, + worker_pool_name: str | None = None, + endpoint_timeout: int = 60, +) -> Callable[..., TwinAPIEndpoint | SyftError]: + def decorator(f: Callable) -> TwinAPIEndpoint | SyftError: + try: + helper_functions_dict = { + f.__name__: dedent(inspect.getsource(f)) + for f in (helper_functions or []) + } + res = CreateTwinAPIEndpoint( + path=path, + mock_function=PublicAPIEndpoint( + api_code=inspect.getsource(f), + func_name=f.__name__, + settings=settings, + signature=inspect.signature(f), + helper_functions=helper_functions_dict, + ), + signature=inspect.signature(f), + description=description, + worker_pool_name=worker_pool_name, + endpoint_timeout=endpoint_timeout, + ) + except ValidationError as e: + for error in e.errors(): + error_msg = error["msg"] + res = SyftError(message=error_msg) + return res + + return decorator + + +def api_endpoint_method( + settings: dict[str, str] | None = None, + helper_functions: list[Callable] | None = None, +) -> Callable[..., Endpoint | SyftError]: + def decorator(f: Callable) -> Endpoint | SyftError: + try: + helper_functions_dict = { + f.__name__: dedent(inspect.getsource(f)) + for f in (helper_functions or []) + } + return Endpoint( + api_code=inspect.getsource(f), + func_name=f.__name__, + settings=settings, + signature=inspect.signature(f), + helper_functions=helper_functions_dict, + ) + except ValidationError as e: + for error in e.errors(): + error_msg = error["msg"] + res = SyftError(message=error_msg) + return res + + return decorator + + +def create_new_api_endpoint( + path: str, + mock_function: Endpoint, + private_function: Endpoint | None = None, + description: MarkdownDescription | None = None, + worker_pool_name: str | None = None, + endpoint_timeout: int = 60, + hide_mock_definition: bool = False, + hide_private_definition: bool = True, +) -> CreateTwinAPIEndpoint | SyftError: + try: + # Parse the string to extract the function name + + endpoint_signature = mock_function.signature + if private_function is not None: + if private_function.signature != mock_function.signature: + return SyftError(message="Signatures don't match") + endpoint_signature = mock_function.signature + private_function.view_access = not hide_private_definition + mock_function.view_access = not hide_mock_definition + + return CreateTwinAPIEndpoint( + path=path, + private_function=private_function.to(PrivateAPIEndpoint), + mock_function=mock_function.to(PublicAPIEndpoint), + signature=endpoint_signature, + description=description, + worker_pool_name=worker_pool_name, + endpoint_timeout=endpoint_timeout, + ) + + return CreateTwinAPIEndpoint( + path=path, + prublic_code=mock_function.to(PublicAPIEndpoint), + signature=endpoint_signature, + worker_pool_name=worker_pool_name, + endpoint_timeout=endpoint_timeout, + ) + except ValidationError as e: + for error in e.errors(): + error_msg = error["msg"] + + return SyftError(message=error_msg) diff --git a/packages/syft/src/syft/service/api/api_service.py b/packages/syft/src/syft/service/api/api_service.py new file mode 100644 index 00000000000..5993431efd2 --- /dev/null +++ b/packages/syft/src/syft/service/api/api_service.py @@ -0,0 +1,606 @@ +# stdlib +import time +from typing import Any +from typing import cast + +# third party +from pydantic import ValidationError + +# relative +from ...serde.serializable import serializable +from ...service.action.action_endpoint import CustomEndpointActionObject +from ...service.action.action_object import ActionObject +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.uid import UID +from ..context import AuthedServiceContext +from ..response import SyftSuccess +from ..service import AbstractService +from ..service import TYPE_TO_SERVICE +from ..service import service_method +from ..user.user_roles import ADMIN_ROLE_LEVEL +from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL +from ..user.user_roles import GUEST_ROLE_LEVEL +from .api import CreateTwinAPIEndpoint +from .api import Endpoint +from .api import PrivateAPIEndpoint +from .api import PublicAPIEndpoint +from .api import TwinAPIContextView +from .api import TwinAPIEndpoint +from .api import TwinAPIEndpointView +from .api import UpdateTwinAPIEndpoint +from .api_stash import TwinAPIEndpointStash + + +@serializable(canonical_name="APIService", version=1) +class APIService(AbstractService): + stash: TwinAPIEndpointStash + + def __init__(self, store: DBManager) -> None: + self.stash = TwinAPIEndpointStash(store=store) + + @service_method( + path="api.add", name="add", roles=ADMIN_ROLE_LEVEL, unwrap_on_success=False + ) + def set( + self, + context: AuthedServiceContext, + endpoint: CreateTwinAPIEndpoint | TwinAPIEndpoint, + ) -> SyftSuccess: + """Register an CustomAPIEndpoint.""" + try: + new_endpoint = None + if isinstance(endpoint, CreateTwinAPIEndpoint): # type: ignore + new_endpoint = endpoint.to(TwinAPIEndpoint) + elif isinstance(endpoint, TwinAPIEndpoint): # type: ignore + new_endpoint = endpoint + + if new_endpoint is None: + raise SyftException(public_message="Invalid endpoint type.") + except ValueError as e: + raise SyftException(public_message=str(e)) + + if isinstance(endpoint, CreateTwinAPIEndpoint): + endpoint_exists = self.stash.path_exists( + context.credentials, new_endpoint.path + ).unwrap() + if endpoint_exists: + raise SyftException( + public_message="An API endpoint already exists at the given path." + ) + + result = self.stash.upsert(context.credentials, obj=new_endpoint).unwrap() + action_obj = ActionObject.from_obj( + id=new_endpoint.action_object_id, + syft_action_data=CustomEndpointActionObject(endpoint_id=result.id), + syft_server_location=context.server.id, + syft_client_verify_key=context.credentials, + ) + context.server.services.action.set_result_to_store( + context=context, + result_action_object=action_obj, + has_result_read_permission=True, + ).unwrap() + + return SyftSuccess(message="Endpoint successfully created.") + + @service_method( + path="api.update", + name="update", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def update( + self, + context: AuthedServiceContext, + endpoint_path: str, + mock_function: Endpoint | None = None, + private_function: Endpoint | None = None, + hide_mock_definition: bool | None = None, + endpoint_timeout: int | None = None, + ) -> SyftSuccess: + """Updates an specific API endpoint.""" + + # if any of these are supplied e.g. truthy then keep going otherwise return + # an error + # TODO: change to an Update object with autosplat + if not ( + mock_function + or private_function + or (hide_mock_definition is not None) + or endpoint_timeout + ): + raise SyftException( + public_message='At least one of "mock_function", "private_function", ' + '"hide_mock_definition" or "endpoint_timeout" is required.' + ) + + endpoint = self.stash.get_by_path(context.credentials, endpoint_path).unwrap() + + endpoint_timeout = ( + endpoint_timeout + if endpoint_timeout is not None + else endpoint.endpoint_timeout + ) + + updated_mock = ( + mock_function.to(PublicAPIEndpoint) + if mock_function is not None + else endpoint.mock_function + ) + updated_private = ( + private_function.to(PrivateAPIEndpoint) + if private_function is not None + else endpoint.private_function + ) + + try: + endpoint_update = UpdateTwinAPIEndpoint( + path=endpoint_path, + mock_function=updated_mock, + private_function=updated_private, + endpoint_timeout=endpoint_timeout, + ) + except ValidationError as e: + raise SyftException(public_message=str(e)) + + endpoint.mock_function = endpoint_update.mock_function + endpoint.private_function = endpoint_update.private_function + endpoint.signature = updated_mock.signature + endpoint.endpoint_timeout = endpoint_update.endpoint_timeout + + if hide_mock_definition is not None: + view_access = not hide_mock_definition + endpoint.mock_function.view_access = view_access + + # save changes + self.stash.upsert(context.credentials, obj=endpoint).unwrap() + return SyftSuccess(message="Endpoint successfully updated.") + + @service_method( + path="api.delete", + name="delete", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def delete(self, context: AuthedServiceContext, endpoint_path: str) -> SyftSuccess: + """Deletes an specific API endpoint.""" + endpoint = self.stash.get_by_path(context.credentials, endpoint_path).unwrap() + self.stash.delete_by_uid(context.credentials, endpoint.id).unwrap() + return SyftSuccess(message="Endpoint successfully deleted.") + + @service_method( + path="api.view", + name="view", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def view(self, context: AuthedServiceContext, path: str) -> TwinAPIEndpointView: + """Retrieves an specific API endpoint.""" + api_endpoint = self.stash.get_by_path(context.server.verify_key, path).unwrap() + return api_endpoint.to(TwinAPIEndpointView, context=context) + + @service_method( + path="api.get", + name="get", + roles=ADMIN_ROLE_LEVEL, + ) + def get(self, context: AuthedServiceContext, api_path: str) -> TwinAPIEndpoint: + """Retrieves an specific API endpoint.""" + return self.stash.get_by_path(context.server.verify_key, api_path).unwrap() + + @service_method( + path="api.set_state", + name="set_state", + roles=ADMIN_ROLE_LEVEL, + ) + def set_state( + self, + context: AuthedServiceContext, + api_path: str, + state: dict, + private: bool = False, + mock: bool = False, + both: bool = False, + ) -> TwinAPIEndpoint: + """Sets the state of a specific API endpoint.""" + if both: + private = True + mock = True + api_endpoint = self.stash.get_by_path( + context.server.verify_key, api_path + ).unwrap() + + if private and api_endpoint.private_function: + api_endpoint.private_function.state = state + if mock and api_endpoint.mock_function: + api_endpoint.mock_function.state = state + + self.stash.upsert(context.credentials, obj=api_endpoint).unwrap() + return SyftSuccess(message=f"APIEndpoint {api_path} state updated.") + + @service_method( + path="api.set_settings", + name="set_settings", + roles=ADMIN_ROLE_LEVEL, + ) + def set_settings( + self, + context: AuthedServiceContext, + api_path: str, + settings: dict, + private: bool = False, + mock: bool = False, + both: bool = False, + ) -> TwinAPIEndpoint: + """Sets the settings of a specific API endpoint.""" + if both: + private = True + mock = True + api_endpoint = self.stash.get_by_path( + context.server.verify_key, api_path + ).unwrap() + + if private and api_endpoint.private_function: + api_endpoint.private_function.settings = settings + if mock and api_endpoint.mock_function: + api_endpoint.mock_function.settings = settings + + self.stash.upsert(context.credentials, obj=api_endpoint).unwrap() + return SyftSuccess(message=f"APIEndpoint {api_path} settings updated.") + + @service_method( + path="api.api_endpoints", + name="api_endpoints", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def api_endpoints( + self, + context: AuthedServiceContext, + ) -> list[TwinAPIEndpointView]: + """Retrieves a list of available API endpoints view available to the user.""" + admin_key = context.server.services.user.root_verify_key + all_api_endpoints = self.stash.get_all(admin_key).unwrap() + + api_endpoint_view = [ + api_endpoint.to(TwinAPIEndpointView, context=context) + for api_endpoint in all_api_endpoints + ] + + return api_endpoint_view + + @service_method( + path="api.call_in_jobs", name="call_in_jobs", roles=GUEST_ROLE_LEVEL + ) + def call_in_jobs( + self, + context: AuthedServiceContext, + path: str, + *args: Any, + **kwargs: Any, + ) -> Any: + """Call a Custom API Method in a Job""" + return self._call_in_jobs(context, "call", path, *args, **kwargs).unwrap() + + @service_method( + path="api.call_private_in_jobs", + name="call_private_in_jobs", + roles=GUEST_ROLE_LEVEL, + ) + def call_private_in_jobs( + self, + context: AuthedServiceContext, + path: str, + *args: Any, + **kwargs: Any, + ) -> Any: + """Call a Custom API Method in a Job""" + return self._call_in_jobs( + context, "call_private", path, *args, **kwargs + ).unwrap() + + @service_method( + path="api.call_public_in_jobs", + name="call_public_in_jobs", + roles=GUEST_ROLE_LEVEL, + ) + def call_public_in_jobs( + self, + context: AuthedServiceContext, + path: str, + *args: Any, + **kwargs: Any, + ) -> Any: + """Call a Custom API Method in a Job""" + return self._call_in_jobs( + context, "call_public", path, *args, **kwargs + ).unwrap() + + @as_result(SyftException) + def _call_in_jobs( + self, + context: AuthedServiceContext, + method: str, + path: str, + *args: Any, + **kwargs: Any, + ) -> Any: + custom_endpoint = self.get_code( + context=context, + endpoint_path=path, + ).unwrap() + log_id = UID() + job = context.server.add_api_endpoint_execution_to_queue( + context.credentials, + method, + path, + *args, + worker_pool_name=custom_endpoint.worker_pool_name, + log_id=log_id, + **kwargs, + ) + + # relative + from ..job.job_stash import JobStatus + + # So result is a Job object + job_id = job.id + # Question: For a small moment, when job status is updated, it doesn't return the job during the .get() as if + # it's not in the stash. Then afterwards if appears again. Is this a bug? + + start = time.time() + + # TODO: what can we do here????? + while ( + job is None + or job.status == JobStatus.PROCESSING + or job.status == JobStatus.CREATED + ): + job = context.server.services.job.get(context, job_id) + time.sleep(0.1) + if (time.time() - custom_endpoint.endpoint_timeout) > start: + raise SyftException( + public_message=( + f"Function timed out in {custom_endpoint.endpoint_timeout} seconds. " + + f"Get the Job with id: {job_id} to check results." + ) + ) + + if job.status == JobStatus.COMPLETED: + return job.result + elif job.status == JobStatus.ERRORED: + raise SyftException( + public_message=f"Function failed to complete: {job.result.message}" + ) + else: + raise SyftException(public_message="Function failed to complete.") + + @service_method( + path="api.get_public_context", name="get_public_context", roles=ADMIN_ROLE_LEVEL + ) + def get_public_context( + self, context: AuthedServiceContext, path: str + ) -> dict[str, Any]: + """Get specific public api context.""" + custom_endpoint = self.get_code( + context=context, + endpoint_path=path, + ).unwrap() + + return custom_endpoint.mock_function.build_internal_context(context=context).to( + TwinAPIContextView + ) + + @service_method( + path="api.get_private_context", + name="get_private_context", + roles=ADMIN_ROLE_LEVEL, + ) + def get_private_context( + self, context: AuthedServiceContext, path: str + ) -> dict[str, Any]: + """Get specific private api context.""" + custom_endpoint = self.get_code( + context=context, + endpoint_path=path, + ).unwrap() + + custom_endpoint.private_function = cast( + PrivateAPIEndpoint, custom_endpoint.private_function + ) + + return custom_endpoint.private_function.build_internal_context( + context=context + ).to(TwinAPIContextView) + + @service_method(path="api.get_all", name="get_all", roles=ADMIN_ROLE_LEVEL) + def get_all( + self, + context: AuthedServiceContext, + ) -> list[TwinAPIEndpoint]: + """Get all API endpoints.""" + return self.stash.get_all(context.credentials).unwrap() + + @service_method(path="api.call", name="call", roles=GUEST_ROLE_LEVEL) + def call( + self, + context: AuthedServiceContext, + path: str, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> SyftSuccess: + """Call a Custom API Method""" + custom_endpoint = self.get_code( + context=context, + endpoint_path=path, + ).unwrap() + + exec_result = custom_endpoint.exec( + context, *args, log_id=log_id, **kwargs + ).unwrap() + action_obj = ActionObject.from_obj(exec_result) + try: + return context.server.services.action.set_result_to_store( + context=context, + result_action_object=action_obj, + has_result_read_permission=True, + ).unwrap() + except Exception as e: + # stdlib + import traceback + + raise SyftException( + public_message=f"Failed to run. {e}, {traceback.format_exc()}" + ) + + @service_method(path="api.call_public", name="call_public", roles=GUEST_ROLE_LEVEL) + def call_public( + self, + context: AuthedServiceContext, + path: str, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> ActionObject: + """Call a Custom API Method in public mode""" + custom_endpoint = self.get_code( + context=context, + endpoint_path=path, + ).unwrap() + exec_result = custom_endpoint.exec_mock_function( + context, *args, log_id=log_id, **kwargs + ).unwrap() + + action_obj = ActionObject.from_obj(exec_result) + try: + return context.server.services.action.set_result_to_store( + context=context, + result_action_object=action_obj, + has_result_read_permission=True, + ).unwrap() + except Exception as e: + # stdlib + import traceback + + raise SyftException( + public_message=f"Failed to run. {e}, {traceback.format_exc()}" + ) + + @service_method( + path="api.call_private", name="call_private", roles=GUEST_ROLE_LEVEL + ) + def call_private( + self, + context: AuthedServiceContext, + path: str, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> ActionObject: + """Call a Custom API Method in private mode""" + custom_endpoint = self.get_code( + context=context, + endpoint_path=path, + ).unwrap() + + exec_result = custom_endpoint.exec_private_function( + context, *args, log_id=log_id, **kwargs + ).unwrap() + + action_obj = ActionObject.from_obj(exec_result) + try: + return context.server.services.action.set_result_to_store( + context=context, result_action_object=action_obj + ).unwrap() + except Exception as e: + # stdlib + import traceback + + raise SyftException( + public_message=f"Failed to run. {e}, {traceback.format_exc()}" + ) + + @service_method( + path="api.exists", + name="exists", + ) + def exists(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + """Check if an endpoint exists""" + self.get_endpoint_by_uid(context, uid).unwrap() + return SyftSuccess(message="Endpoint exists") + + # ==== The methods below aren't meant to be called directly by the user, but + # rather by the server context. === + # Therefore, they are not decorated with @service_method + @as_result(SyftException) + def execute_server_side_endpoint_by_id( + self, + context: AuthedServiceContext, + endpoint_uid: UID, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + endpoint = self.get_endpoint_by_uid(context, endpoint_uid).unwrap() + selected_code = endpoint.private_function + if not selected_code: + selected_code = endpoint.mock_function + + return endpoint.exec_code( + selected_code, context, *args, log_id=log_id, **kwargs + ).unwrap() + + @as_result(StashException, NotFoundException, SyftException) + def execute_service_side_endpoint_private_by_id( + self, + context: AuthedServiceContext, + endpoint_uid: UID, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + endpoint = self.get_endpoint_by_uid(context, endpoint_uid).unwrap() + return endpoint.exec_code( + endpoint.private_function, context, *args, log_id=log_id, **kwargs + ).unwrap() + + @as_result(StashException, NotFoundException, SyftException) + def execute_server_side_endpoint_mock_by_id( + self, + context: AuthedServiceContext, + endpoint_uid: UID, + *args: Any, + log_id: UID | None = None, + **kwargs: Any, + ) -> Any: + endpoint = self.get_endpoint_by_uid(context, endpoint_uid).unwrap() + return endpoint.exec_code( + endpoint.mock_function, context, *args, log_id=log_id, **kwargs + ).unwrap() + + @as_result(StashException, NotFoundException) + def get_endpoint_by_uid( + self, context: AuthedServiceContext, uid: UID + ) -> TwinAPIEndpoint: + admin_key = context.server.services.user.root_verify_key + return self.stash.get_by_uid(admin_key, uid).unwrap() + + @as_result(StashException) + def get_endpoints(self, context: AuthedServiceContext) -> list[TwinAPIEndpoint]: + # TODO: Add ability to specify which roles see which endpoints + # for now skip auth + return self.stash.get_all(context.server.verify_key).unwrap() + + @as_result(StashException, NotFoundException) + def get_code( + self, context: AuthedServiceContext, endpoint_path: str + ) -> TwinAPIEndpoint: + return self.stash.get_by_path( + context.server.verify_key, path=endpoint_path + ).unwrap() + + +TYPE_TO_SERVICE[TwinAPIEndpoint] = APIService diff --git a/packages/syft/src/syft/service/api/api_stash.py b/packages/syft/src/syft/service/api/api_stash.py new file mode 100644 index 00000000000..e892d48da61 --- /dev/null +++ b/packages/syft/src/syft/service/api/api_stash.py @@ -0,0 +1,35 @@ +# relative +from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result +from .api import TwinAPIEndpoint + +MISSING_PATH_STRING = "Endpoint path: {path} does not exist." + + +@serializable(canonical_name="TwinAPIEndpointSQLStash", version=1) +class TwinAPIEndpointStash(ObjectStash[TwinAPIEndpoint]): + @as_result(StashException, NotFoundException) + def get_by_path(self, credentials: SyftVerifyKey, path: str) -> TwinAPIEndpoint: + # TODO standardize by returning None if endpoint doesnt exist. + res = self.get_one( + credentials=credentials, + filters={"path": path}, + ) + + if res.is_err(): + raise NotFoundException( + public_message=MISSING_PATH_STRING.format(path=path) + ) + return res.unwrap() + + @as_result(StashException) + def path_exists(self, credentials: SyftVerifyKey, path: str) -> bool: + try: + self.get_by_path(credentials=credentials, path=path).unwrap() + return True + except NotFoundException: + return False diff --git a/packages/syft/src/syft/service/api/utils.py b/packages/syft/src/syft/service/api/utils.py new file mode 100644 index 00000000000..8680c4512ee --- /dev/null +++ b/packages/syft/src/syft/service/api/utils.py @@ -0,0 +1,44 @@ +# stdlib +import builtins as __builtin__ +import datetime +import sys +from typing import Any + +# relative +from ...types.uid import UID +from ..action.action_object import ActionObject +from ..context import AuthedServiceContext +from ..job.job_stash import Job +from ..response import SyftError + + +def print( + context: AuthedServiceContext, + log_id: UID, + *args: Any, + sep: str = " ", + end: str = "\n", +) -> str | None: + def to_str(arg: Any) -> str: + if isinstance(arg, bytes): + return arg.decode("utf-8") + if isinstance(arg, Job): + return f"JOB: {arg.id}" + if isinstance(arg, SyftError): + return f"JOB: {arg.message}" + if isinstance(arg, ActionObject): + return str(arg.syft_action_data) + return str(arg) + + new_args = [to_str(arg) for arg in args] + new_str = sep.join(new_args) + end + if context.server is not None: + context.server.services.log.append(context=context, uid=log_id, new_str=new_str) + time = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S") + return __builtin__.print( + f"{time} FUNCTION LOG :", + *new_args, + end=end, + sep=sep, + file=sys.stderr, + ) diff --git a/packages/syft/src/syft/service/attestation/__init__.py b/packages/syft/src/syft/service/attestation/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/service/attestation/attestation_constants.py b/packages/syft/src/syft/service/attestation/attestation_constants.py new file mode 100644 index 00000000000..855911e03f0 --- /dev/null +++ b/packages/syft/src/syft/service/attestation/attestation_constants.py @@ -0,0 +1,5 @@ +ATTESTATION_SERVICE_URL = ( + "http://localhost:4455" # Replace with "http://attestation:4455" +) +ATTEST_CPU_ENDPOINT = "/attest/cpu" +ATTEST_GPU_ENDPOINT = "/attest/gpu" diff --git a/packages/syft/src/syft/service/attestation/attestation_service.py b/packages/syft/src/syft/service/attestation/attestation_service.py new file mode 100644 index 00000000000..93110f72d74 --- /dev/null +++ b/packages/syft/src/syft/service/attestation/attestation_service.py @@ -0,0 +1,72 @@ +# stdlib +from collections.abc import Callable + +# third party +import requests + +# relative +from ...serde.serializable import serializable +from ...store.db.db import DBManager +from ...types.errors import SyftException +from ...types.result import as_result +from ...util.util import str_to_bool +from ..context import AuthedServiceContext +from ..response import SyftSuccess +from ..service import AbstractService +from ..service import service_method +from ..user.user_roles import GUEST_ROLE_LEVEL +from .attestation_constants import ATTESTATION_SERVICE_URL +from .attestation_constants import ATTEST_CPU_ENDPOINT +from .attestation_constants import ATTEST_GPU_ENDPOINT + + +@serializable(canonical_name="AttestationService", version=1) +class AttestationService(AbstractService): + """This service is responsible for getting all sorts of attestations for any client.""" + + def __init__(self, store: DBManager) -> None: + pass + + @as_result(SyftException) + def perform_request( + self, method: Callable, endpoint: str, raw: bool = False + ) -> SyftSuccess | str: + try: + response = method(f"{ATTESTATION_SERVICE_URL}{endpoint}") + response.raise_for_status() + message = response.json().get("result") + raw_token = response.json().get("token") + if raw: + return raw_token + elif str_to_bool(message): + return SyftSuccess(message=message) + else: + raise SyftException(public_message=message) + except requests.HTTPError: + raise SyftException(public_message=f"{response.json()['detail']}") + except requests.RequestException as e: + raise SyftException(public_message=f"Failed to perform request. {e}") + + @service_method( + path="attestation.get_cpu_attestation", + name="get_cpu_attestation", + roles=GUEST_ROLE_LEVEL, + ) + def get_cpu_attestation( + self, context: AuthedServiceContext, raw_token: bool = False + ) -> str | SyftSuccess: + return self.perform_request( + requests.get, ATTEST_CPU_ENDPOINT, raw_token + ).unwrap() + + @service_method( + path="attestation.get_gpu_attestation", + name="get_gpu_attestation", + roles=GUEST_ROLE_LEVEL, + ) + def get_gpu_attestation( + self, context: AuthedServiceContext, raw_token: bool = False + ) -> str | SyftSuccess: + return self.perform_request( + requests.get, ATTEST_GPU_ENDPOINT, raw_token + ).unwrap() diff --git a/packages/syft/src/syft/service/blob_storage/remote_profile.py b/packages/syft/src/syft/service/blob_storage/remote_profile.py index 7ff8f76427d..76abe869ae6 100644 --- a/packages/syft/src/syft/service/blob_storage/remote_profile.py +++ b/packages/syft/src/syft/service/blob_storage/remote_profile.py @@ -1,22 +1,22 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject +from ..service import AbstractService @serializable() class RemoteProfile(SyftObject): __canonical_name__ = "RemoteConfig" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 @serializable() class AzureRemoteProfile(RemoteProfile): __canonical_name__ = "AzureRemoteConfig" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 profile_name: str # used by seaweedfs account_name: str @@ -24,12 +24,14 @@ class AzureRemoteProfile(RemoteProfile): container_name: str -@serializable() -class RemoteProfileStash(BaseUIDStoreStash): - object_type = RemoteProfile - settings: PartitionSettings = PartitionSettings( - name=RemoteProfile.__canonical_name__, object_type=RemoteProfile - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="RemoteProfileSQLStash", version=1) +class RemoteProfileStash(ObjectStash[RemoteProfile]): + pass + + +@serializable(canonical_name="RemoteProfileService", version=1) +class RemoteProfileService(AbstractService): + stash: RemoteProfileStash + + def __init__(self, store: DBManager) -> None: + self.stash = RemoteProfileStash(store=store) diff --git a/packages/syft/src/syft/service/blob_storage/service.py b/packages/syft/src/syft/service/blob_storage/service.py index 808bbc754d8..055e4d946e4 100644 --- a/packages/syft/src/syft/service/blob_storage/service.py +++ b/packages/syft/src/syft/service/blob_storage/service.py @@ -1,32 +1,32 @@ # stdlib from pathlib import Path -from typing import cast # third party import requests # relative -from ...abstract_node import AbstractNode from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey from ...service.action.action_object import ActionObject from ...store.blob_storage import BlobRetrieval from ...store.blob_storage.on_disk import OnDiskBlobDeposit from ...store.blob_storage.seaweedfs import SeaweedFSBlobDeposit -from ...store.document_store import DocumentStore -from ...store.document_store import UIDPartitionKey +from ...store.db.db import DBManager from ...types.blob_storage import AzureSecureFilePathLocation from ...types.blob_storage import BlobFileType from ...types.blob_storage import BlobStorageEntry from ...types.blob_storage import BlobStorageMetadata from ...types.blob_storage import CreateBlobStorageEntry from ...types.blob_storage import SeaweedSecureFilePathLocation +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import TYPE_TO_SERVICE from ..service import service_method +from ..user.user_roles import ADMIN_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL from .remote_profile import AzureRemoteProfile from .remote_profile import RemoteProfileStash @@ -35,27 +35,24 @@ BlobDepositType = OnDiskBlobDeposit | SeaweedFSBlobDeposit -@serializable() +@serializable(canonical_name="BlobStorageService", version=1) class BlobStorageService(AbstractService): - store: DocumentStore stash: BlobStorageStash remote_profile_stash: RemoteProfileStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = BlobStorageStash(store=store) self.remote_profile_stash = RemoteProfileStash(store=store) @service_method(path="blob_storage.get_all", name="get_all") def get_all_blob_storage_entries( self, context: AuthedServiceContext - ) -> list[BlobStorageEntry] | SyftError: - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + ) -> list[BlobStorageEntry]: + return self.stash.get_all(context.credentials).unwrap() - @service_method(path="blob_storage.mount_azure", name="mount_azure") + @service_method( + path="blob_storage.mount_azure", name="mount_azure", unwrap_on_success=False + ) def mount_azure( self, context: AuthedServiceContext, @@ -64,7 +61,7 @@ def mount_azure( container_name: str, bucket_name: str, use_direct_connections: bool = True, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: # TODO: fix arguments remote_name = f"{account_name}{container_name}" @@ -83,24 +80,21 @@ def mount_azure( account_key=account_key, container_name=container_name, ) - res = self.remote_profile_stash.set(context.credentials, new_profile) - if res.is_err(): - return SyftError(message=res.value) - remote_profile = res.ok() - - context.node = cast(AbstractNode, context.node) + remote_profile = self.remote_profile_stash.set( + context.credentials, new_profile + ).unwrap() - seaweed_config = context.node.blob_storage_client.config + seaweed_config = context.server.blob_storage_client.config # we cache this here such that we can use it when reading a file from azure # from the remote_name seaweed_config.remote_profiles[remote_name] = remote_profile # TODO: possible wrap this in try catch - cfg = context.node.blob_store_config.client_config + cfg = context.server.blob_store_config.client_config init_request = requests.post(url=cfg.mount_url, json=args_dict) # nosec print(init_request.content) # TODO check return code - res = context.node.blob_storage_client.connect().client.list_objects( + res = context.server.blob_storage_client.connect().client.list_objects( Bucket=bucket_name ) # stdlib @@ -132,7 +126,7 @@ def mount_azure( type_=BlobFileType, bucket_name=bucket_name, ) - self.stash.set(context.credentials, blob_storage_entry) + self.stash.set(context.credentials, blob_storage_entry).unwrap() return SyftSuccess(message="Mounting Azure Successful!") @@ -141,16 +135,14 @@ def mount_azure( ) def get_files_from_bucket( self, context: AuthedServiceContext, bucket_name: str - ) -> list | SyftError: - result = self.stash.find_all(context.credentials, bucket_name=bucket_name) - if result.is_err(): - return result - bse_list = result.ok() - # stdlib + ) -> list: + bse_list = self.stash.get_all( + context.credentials, filters={"bucket_name": bucket_name} + ).unwrap() blob_files = [] for bse in bse_list: - self.stash.set(obj=bse, credentials=context.credentials) + # self.stash.set(obj=bse, credentials=context.credentials).unwrap() # We create an empty ActionObject and set its blob_storage_entry_id to bse.id # such that we can call reload_cache which creates # the BlobRetrieval (user needs permission to do this) @@ -161,8 +153,8 @@ def get_files_from_bucket( blob_file = ActionObject.empty() blob_file.syft_blob_storage_entry_id = bse.id blob_file.syft_client_verify_key = context.credentials - if context.node is not None: - blob_file.syft_node_location = context.node.id + if context.server is not None: + blob_file.syft_server_location = context.server.id blob_file.reload_cache() blob_files.append(blob_file.syft_action_data) @@ -171,21 +163,17 @@ def get_files_from_bucket( @service_method(path="blob_storage.get_by_uid", name="get_by_uid") def get_blob_storage_entry_by_uid( self, context: AuthedServiceContext, uid: UID - ) -> BlobStorageEntry | SyftError: - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + ) -> BlobStorageEntry: + return self.stash.get_by_uid(context.credentials, uid=uid).unwrap() @service_method(path="blob_storage.get_metadata", name="get_metadata") def get_blob_storage_metadata_by_uid( self, context: AuthedServiceContext, uid: UID - ) -> BlobStorageEntry | SyftError: - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - blob_storage_entry = result.ok() - return blob_storage_entry.to(BlobStorageMetadata) - return SyftError(message=result.err()) + ) -> BlobStorageEntry: + blob_storage_entry = self.stash.get_by_uid( + context.credentials, uid=uid + ).unwrap() + return blob_storage_entry.to(BlobStorageMetadata) # TODO: replace name with `create_blob_retrieval` @service_method( @@ -193,89 +181,106 @@ def get_blob_storage_metadata_by_uid( name="read", roles=GUEST_ROLE_LEVEL, ) - def read( - self, context: AuthedServiceContext, uid: UID - ) -> BlobRetrieval | SyftError: - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - obj: BlobStorageEntry | None = result.ok() - if obj is None: - return SyftError( - message=f"No blob storage entry exists for uid: {uid}, or you have no permissions to read it" - ) + def read(self, context: AuthedServiceContext, uid: UID) -> BlobRetrieval: + obj = self.stash.get_by_uid(context.credentials, uid=uid).unwrap() - context.node = cast(AbstractNode, context.node) - with context.node.blob_storage_client.connect() as conn: - res: BlobRetrieval = conn.read( - obj.location, obj.type_, bucket_name=obj.bucket_name - ) - res.syft_blob_storage_entry_id = uid - res.file_size = obj.file_size - return res - return SyftError(message=result.err()) + with context.server.blob_storage_client.connect() as conn: + res: BlobRetrieval = conn.read( + obj.location, obj.type_, bucket_name=obj.bucket_name + ) + res.syft_blob_storage_entry_id = uid + res.file_size = obj.file_size + return res - @service_method( - path="blob_storage.allocate", - name="allocate", - roles=GUEST_ROLE_LEVEL, - ) - def allocate( - self, context: AuthedServiceContext, obj: CreateBlobStorageEntry - ) -> BlobDepositType | SyftError: - context.node = cast(AbstractNode, context.node) - with context.node.blob_storage_client.connect() as conn: + @as_result(SyftException) + def _allocate( + self, + context: AuthedServiceContext, + obj: CreateBlobStorageEntry, + uploaded_by: SyftVerifyKey | None = None, + ) -> BlobDepositType: + """ + Allocate a secure location for the blob storage entry. + + If uploaded_by is None, the credentials of the context will be used. + + Args: + context (AuthedServiceContext): context + obj (CreateBlobStorageEntry): create blob parameters + uploaded_by (SyftVerifyKey | None, optional): Uploader credentials. + Can be used to upload on behalf of another user, needed for data migrations. + Defaults to None. + + Returns: + BlobDepositType: Blob deposit + """ + upload_credentials = uploaded_by or context.credentials + + with context.server.blob_storage_client.connect() as conn: secure_location = conn.allocate(obj) - - if isinstance(secure_location, SyftError): - return secure_location - blob_storage_entry = BlobStorageEntry( id=obj.id, location=secure_location, type_=obj.type_, mimetype=obj.mimetype, file_size=obj.file_size, - uploaded_by=context.credentials, + uploaded_by=upload_credentials, ) blob_deposit = conn.write(blob_storage_entry) - result = self.stash.set(context.credentials, blob_storage_entry) - if result.is_err(): - return SyftError(message=f"{result.err()}") + self.stash.set(context.credentials, blob_storage_entry).unwrap() return blob_deposit + @service_method( + path="blob_storage.allocate", + name="allocate", + roles=GUEST_ROLE_LEVEL, + ) + def allocate( + self, context: AuthedServiceContext, obj: CreateBlobStorageEntry + ) -> BlobDepositType: + return self._allocate(context, obj).unwrap() + + @service_method( + path="blob_storage.allocate_for_user", + name="allocate_for_user", + roles=ADMIN_ROLE_LEVEL, + ) + def allocate_for_user( + self, + context: AuthedServiceContext, + obj: CreateBlobStorageEntry, + uploaded_by: SyftVerifyKey, + ) -> BlobDepositType: + return self._allocate(context, obj, uploaded_by).unwrap() + @service_method( path="blob_storage.write_to_disk", name="write_to_disk", roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, ) def write_to_disk( self, context: AuthedServiceContext, uid: UID, data: bytes - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid( + ) -> SyftSuccess: + obj = self.stash.get_by_uid( credentials=context.credentials, uid=uid, + ).unwrap( + public_message=f"No blob storage entry exists for uid: {uid}, or you have no permissions to read it" ) - if result.is_err(): - return SyftError(message=f"{result.err()}") - - obj: BlobStorageEntry | None = result.ok() - - if obj is None: - return SyftError( - message=f"No blob storage entry exists for uid: {uid}, or you have no permissions to read it" - ) try: Path(obj.location.path).write_bytes(data) return SyftSuccess(message="File successfully saved.") except Exception as e: - return SyftError(message=f"Failed to write object to disk: {e}") + raise SyftException(public_message=f"Failed to write object to disk: {e}") @service_method( path="blob_storage.mark_write_complete", name="mark_write_complete", roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, ) def mark_write_complete( self, @@ -283,64 +288,49 @@ def mark_write_complete( uid: UID, etags: list, no_lines: int | None = 0, - ) -> SyftError | SyftSuccess: - result = self.stash.get_by_uid( + ) -> SyftSuccess: + obj = self.stash.get_by_uid( credentials=context.credentials, uid=uid, + ).unwrap( + public_message=f"No blob storage entry exists for uid: {uid}, or you have no permissions to read it" ) - if result.is_err(): - return SyftError(message=f"{result.err()}") - - obj: BlobStorageEntry | None = result.ok() - - if obj is None: - return SyftError( - message=f"No blob storage entry exists for uid: {uid}, or you have no permissions to read it" - ) obj.no_lines = no_lines - result = self.stash.update( + self.stash.update( credentials=context.credentials, obj=obj, - ) - if result.is_err(): - return SyftError(message=f"{result.err()}") - context.node = cast(AbstractNode, context.node) - with context.node.blob_storage_client.connect() as conn: + ).unwrap() + + with context.server.blob_storage_client.connect() as conn: result = conn.complete_multipart_upload(obj, etags) return result - @service_method(path="blob_storage.delete", name="delete") - def delete( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - obj = result.ok() - - if obj is None: - return SyftError( - message=f"No blob storage entry exists for uid: {uid}, or you have no permissions to read it" - ) - - context.node = cast(AbstractNode, context.node) - - try: - with context.node.blob_storage_client.connect() as conn: - file_unlinked_result = conn.delete(obj.location) - except Exception as e: - return SyftError(message=f"Failed to delete file: {e}") + @service_method(path="blob_storage.delete", name="delete", unwrap_on_success=False) + def delete(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + obj = self.stash.get_by_uid(context.credentials, uid=uid).unwrap() - if isinstance(file_unlinked_result, SyftError): - return file_unlinked_result - blob_storage_entry_deleted = self.stash.delete( - context.credentials, UIDPartitionKey.with_obj(uid), has_permission=True + try: + with context.server.blob_storage_client.connect() as conn: + try: + conn.delete(obj.location) + except Exception as e: + raise SyftException( + public_message=f"Failed to delete blob file with id '{uid}'. Error: {e}" + ) + + self.stash.delete_by_uid( + context.credentials, uid, has_permission=True + ).unwrap() + except Exception as e: + raise SyftException( + public_message=f"Failed to delete blob file with id '{uid}'. Error: {e}" ) - if blob_storage_entry_deleted.is_ok(): - return file_unlinked_result - return SyftError(message=result.err()) + return SyftSuccess( + message=f"Blob storage entry with id '{uid}' deleted successfully." + ) TYPE_TO_SERVICE[BlobStorageEntry] = BlobStorageEntry diff --git a/packages/syft/src/syft/service/blob_storage/stash.py b/packages/syft/src/syft/service/blob_storage/stash.py index 33456a67fe3..9cb002b7eb9 100644 --- a/packages/syft/src/syft/service/blob_storage/stash.py +++ b/packages/syft/src/syft/service/blob_storage/stash.py @@ -1,17 +1,9 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings +from ...store.db.stash import ObjectStash from ...types.blob_storage import BlobStorageEntry -@serializable() -class BlobStorageStash(BaseUIDStoreStash): - object_type = BlobStorageEntry - settings: PartitionSettings = PartitionSettings( - name=BlobStorageEntry.__canonical_name__, object_type=BlobStorageEntry - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="BlobStorageSQLStash", version=1) +class BlobStorageStash(ObjectStash[BlobStorageEntry]): + pass diff --git a/packages/syft/src/syft/service/blob_storage/util.py b/packages/syft/src/syft/service/blob_storage/util.py new file mode 100644 index 00000000000..0c0c565f766 --- /dev/null +++ b/packages/syft/src/syft/service/blob_storage/util.py @@ -0,0 +1,27 @@ +# stdlib +from typing import Any + +# relative +from ...types.errors import SyftException +from ...types.result import as_result +from ...util.util import get_mb_serialized_size +from ..metadata.server_metadata import ServerMetadata +from ..metadata.server_metadata import ServerMetadataJSON + + +def min_size_for_blob_storage_upload( + metadata: ServerMetadata | ServerMetadataJSON, +) -> int: + return metadata.min_size_blob_storage_mb + + +@as_result(SyftException) +def can_upload_to_blob_storage( + data: Any, metadata: ServerMetadata | ServerMetadataJSON +) -> bool: + try: + return get_mb_serialized_size(data) >= min_size_for_blob_storage_upload( + metadata + ) + except TypeError as exc: + raise SyftException.from_exception(exc, public_message=str(exc)) diff --git a/packages/syft/src/syft/service/code/code_parse.py b/packages/syft/src/syft/service/code/code_parse.py index 1f04fb786c7..4cde893520d 100644 --- a/packages/syft/src/syft/service/code/code_parse.py +++ b/packages/syft/src/syft/service/code/code_parse.py @@ -19,7 +19,7 @@ def visit_Module(self, node: Module) -> Any: def visit_Call(self, node: Any) -> None: if isinstance(node.func, ast.Attribute): if ( - getattr(node.func.value, "id", None) == "domain" + getattr(node.func.value, "id", None) == "datasite" and node.func.attr == "launch_job" ): self.nested_calls.append(node.args[0].id) diff --git a/packages/syft/src/syft/service/code/status_service.py b/packages/syft/src/syft/service/code/status_service.py index dbbb028b845..e5a32f5c721 100644 --- a/packages/syft/src/syft/service/code/status_service.py +++ b/packages/syft/src/syft/service/code/status_service.py @@ -1,58 +1,44 @@ # stdlib # third party -from result import Result # relative -from ...node.credentials import SyftVerifyKey +from ...client.api import ServerIdentity from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...store.document_store import UIDPartitionKey +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.uid import UID -from ...util.telemetry import instrument from ..context import AuthedServiceContext -from ..response import SyftError +from ..response import SyftSuccess from ..service import AbstractService from ..service import TYPE_TO_SERVICE from ..service import service_method from ..user.user_roles import ADMIN_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL +from .user_code import ApprovalDecision from .user_code import UserCodeStatusCollection -@instrument -@serializable() -class StatusStash(BaseUIDStoreStash): - object_type = UserCodeStatusCollection - settings: PartitionSettings = PartitionSettings( - name=UserCodeStatusCollection.__canonical_name__, - object_type=UserCodeStatusCollection, - ) +@serializable(canonical_name="StatusSQLStash", version=1) +class StatusStash(ObjectStash[UserCodeStatusCollection]): + pass + - def __init__(self, store: DocumentStore) -> None: - super().__init__(store) - self.store = store - self.settings = self.settings - self._object_type = self.object_type +class CodeStatusUpdate(PartialSyftObject): + __canonical_name__ = "CodeStatusUpdate" + __version__ = SYFT_OBJECT_VERSION_1 - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[UserCodeStatusCollection, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - return self.query_one(credentials=credentials, qks=qks) + id: UID + decision: ApprovalDecision -@instrument -@serializable() +@serializable(canonical_name="UserCodeStatusService", version=1) class UserCodeStatusService(AbstractService): - store: DocumentStore stash: StatusStash - def __init__(self, store: DocumentStore): - self.store = store + def __init__(self, store: DBManager): self.stash = StatusStash(store=store) @service_method(path="code_status.create", name="create", roles=ADMIN_ROLE_LEVEL) @@ -60,36 +46,56 @@ def create( self, context: AuthedServiceContext, status: UserCodeStatusCollection, - ) -> UserCodeStatusCollection | SyftError: - result = self.stash.set( + ) -> UserCodeStatusCollection: + res = self.stash.set( credentials=context.credentials, obj=status, - ) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + ).unwrap() + return res + + @service_method( + path="code_status.update", + name="update", + roles=ADMIN_ROLE_LEVEL, + autosplat=["code_update"], + unwrap_on_success=False, + ) + def update( + self, context: AuthedServiceContext, code_update: CodeStatusUpdate + ) -> SyftSuccess: + existing_status = self.stash.get_by_uid( + context.credentials, uid=code_update.id + ).unwrap() + server_identity = ServerIdentity.from_server(context.server) + existing_status.status_dict[server_identity] = code_update.decision + + res = self.stash.update(context.credentials, existing_status).unwrap() + return SyftSuccess(message="UserCode updated successfully", value=res) @service_method( path="code_status.get_by_uid", name="get_by_uid", roles=GUEST_ROLE_LEVEL ) def get_status( self, context: AuthedServiceContext, uid: UID - ) -> UserCodeStatusCollection | SyftError: + ) -> UserCodeStatusCollection: """Get the status of a user code item""" - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + return self.stash.get_by_uid(context.credentials, uid=uid).unwrap() @service_method(path="code_status.get_all", name="get_all", roles=ADMIN_ROLE_LEVEL) - def get_all( - self, context: AuthedServiceContext - ) -> list[UserCodeStatusCollection] | SyftError: + def get_all(self, context: AuthedServiceContext) -> list[UserCodeStatusCollection]: """Get all user code item statuses""" - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + return self.stash.get_all(context.credentials).unwrap() + + @service_method( + path="code_status.remove", + name="remove", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def remove(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + """Remove a user code item status""" + self.stash.delete_by_uid(context.credentials, uid=uid).unwrap() + return SyftSuccess(message=f"{uid} successfully deleted", value=uid) TYPE_TO_SERVICE[UserCodeStatusCollection] = UserCodeStatusService diff --git a/packages/syft/src/syft/service/code/user_code.py b/packages/syft/src/syft/service/code/user_code.py index 9c680dd288d..c20f2c58234 100644 --- a/packages/syft/src/syft/service/code/user_code.py +++ b/packages/syft/src/syft/service/code/user_code.py @@ -4,16 +4,19 @@ # stdlib import ast from collections.abc import Callable -from collections.abc import Generator from copy import deepcopy import datetime from enum import Enum import hashlib import inspect from io import StringIO -import itertools +import json +import keyword +import logging import random +import re import sys +from textwrap import dedent from threading import Thread import time import traceback @@ -24,45 +27,53 @@ from typing import final # third party +from IPython.display import HTML +from IPython.display import Markdown from IPython.display import display +from pydantic import ValidationError from pydantic import field_validator -from result import Err from typing_extensions import Self # relative -from ...abstract_node import AbstractNode -from ...abstract_node import NodeType +from ...abstract_server import ServerSideType +from ...abstract_server import ServerType from ...client.api import APIRegistry -from ...client.api import NodeIdentity -from ...client.enclave_client import EnclaveMetadata -from ...node.credentials import SyftVerifyKey +from ...client.api import ServerIdentity from ...serde.deserialize import _deserialize from ...serde.serializable import serializable from ...serde.serialize import _serialize -from ...store.document_store import PartitionKey +from ...server.credentials import SyftVerifyKey from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime +from ...types.dicttuple import DictTuple +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_migration import migrate +from ...types.syft_object import PartialSyftObject from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_4 from ...types.syft_object import SyftObject from ...types.syncable_object import SyncableSyftObject from ...types.transforms import TransformContext -from ...types.transforms import add_node_uid_for_key +from ...types.transforms import add_server_uid_for_key +from ...types.transforms import drop from ...types.transforms import generate_id +from ...types.transforms import make_set_default from ...types.transforms import transform from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE +from ...util.decorators import deprecated from ...util.markdown import CodeMarkdown from ...util.markdown import as_markdown_code +from ...util.notebook_ui.styles import FONT_CSS +from ...util.util import prompt_warning_message +from ..action.action_endpoint import CustomEndpointActionObject from ..action.action_object import Action from ..action.action_object import ActionObject from ..context import AuthedServiceContext from ..dataset.dataset import Asset from ..job.job_stash import Job from ..output.output_service import ExecutionOutput -from ..output.output_service import OutputService +from ..policy.policy import Constant from ..policy.policy import CustomInputPolicy from ..policy.policy import CustomOutputPolicy from ..policy.policy import EmpyInputPolicy @@ -75,30 +86,38 @@ from ..policy.policy import filter_only_uids from ..policy.policy import init_policy from ..policy.policy import load_policy_code -from ..policy.policy_service import PolicyService +from ..policy.policy import partition_by_server from ..response import SyftError from ..response import SyftInfo -from ..response import SyftNotReady from ..response import SyftSuccess from ..response import SyftWarning -from .code_parse import GlobalsVisitor +from ..service import ServiceConfigRegistry +from ..user.user import UserView +from ..user.user_roles import ServiceRole from .code_parse import LaunchJobVisitor from .unparse import unparse +from .utils import check_for_global_vars +from .utils import parse_code from .utils import submit_subjobs_code +logger = logging.getLogger(__name__) + if TYPE_CHECKING: # relative from ...service.sync.diff_state import AttrDiff -UserVerifyKeyPartitionKey = PartitionKey(key="user_verify_key", type_=SyftVerifyKey) -CodeHashPartitionKey = PartitionKey(key="code_hash", type_=str) -ServiceFuncNamePartitionKey = PartitionKey(key="service_func_name", type_=str) -SubmitTimePartitionKey = PartitionKey(key="submit_time", type_=DateTime) - PyCodeObject = Any -@serializable() +def compile_byte_code(parsed_code: str) -> PyCodeObject | None: + try: + return compile(parsed_code, "", "exec") + except Exception as e: + print("WARNING: to compile byte code", e) + return None + + +@serializable(canonical_name="UserCodeStatus", version=1) class UserCodeStatus(Enum): PENDING = "pending" DENIED = "denied" @@ -109,28 +128,175 @@ def __hash__(self) -> int: @serializable() -class UserCodeStatusCollection(SyncableSyftObject): +class ApprovalDecision(SyftObject): + status: UserCodeStatus + reason: str | None = None + + __canonical_name__ = "ApprovalDecision" + __version__ = 1 + + @property + def reason_or_none(self) -> str | None: + # TODO: move to class creation + if self.reason == "": + return None + return self.reason + + +@serializable() +class UserCodeStatusCollectionV1(SyncableSyftObject): + """Currently this is a class that implements a mixed bag of two statusses + The first status is for a level 0 Request, which only uses the status dict + for denied decision. If there is no denied decision, it computes the status + by checking the backend for whether it has readable outputs. + The second use case is for a level 2 Request, in this case we store the status + dict on the object and use it as is for both denied and approved status + """ + __canonical_name__ = "UserCodeStatusCollection" __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = ["approved", "status_dict"] - status_dict: dict[NodeIdentity, tuple[UserCodeStatus, str]] = {} + # this is empty in the case of l0 + status_dict: dict[ServerIdentity, tuple[UserCodeStatus, str]] = {} + user_code_link: LinkedObject + +@serializable() +class UserCodeStatusCollection(SyncableSyftObject): + """Currently this is a class that implements a mixed bag of two statusses + The first status is for a level 0 Request, which only uses the status dict + for denied decision. If there is no denied decision, it computes the status + by checking the backend for whether it has readable outputs. + The second use case is for a level 2 Request, in this case we store the status + dict on the object and use it as is for both denied and approved status + """ + + __canonical_name__ = "UserCodeStatusCollection" + __version__ = SYFT_OBJECT_VERSION_2 + + __repr_attrs__ = ["approved", "status_dict"] + + # this is empty in the case of l0 + status_dict: dict[ServerIdentity, ApprovalDecision] = {} + + user_code_link: LinkedObject + user_verify_key: SyftVerifyKey + + was_requested_on_lowside: bool = False + + # ugly and buggy optimization, remove at some point + _has_readable_outputs_cache: bool | None = None + + @property + def approved(self) -> bool: + # only use this on the client side, in this case we can use self.get_api instead + # of using the context + return self.get_is_approved(None) + + def get_is_approved(self, context: AuthedServiceContext | None) -> bool: + return self._compute_status(context) == UserCodeStatus.APPROVED + + def _compute_status( + self, context: AuthedServiceContext | None = None + ) -> UserCodeStatus: + if self.was_requested_on_lowside: + return self._compute_status_l0(context) + else: + return self._compute_status_l2() + + @property + def denied(self) -> bool: + # for denied we use the status dict both for level 0 and level 2 + return any( + approval_dec.status == UserCodeStatus.DENIED + for approval_dec in self.status_dict.values() + ) + + def _compute_status_l0( + self, context: AuthedServiceContext | None = None + ) -> UserCodeStatus: + # for l0, if denied in status dict, its denied + # if not, and it has readable outputs, its approved, + # else pending + + has_readable_outputs = self._has_readable_outputs(context) + + if self.denied: + if has_readable_outputs: + prompt_warning_message( + "This request already has results published to the data scientist. " + "They will still be able to access those results." + ) + return UserCodeStatus.DENIED + elif has_readable_outputs: + return UserCodeStatus.APPROVED + else: + return UserCodeStatus.PENDING + + def _compute_status_l2(self) -> UserCodeStatus: + any_denied = any( + approval_dec.status == UserCodeStatus.DENIED + for approval_dec in self.status_dict.values() + ) + all_approved = all( + approval_dec.status == UserCodeStatus.APPROVED + for approval_dec in self.status_dict.values() + ) + if any_denied: + return UserCodeStatus.DENIED + elif all_approved: + return UserCodeStatus.APPROVED + else: + return UserCodeStatus.PENDING + + def _has_readable_outputs( + self, context: AuthedServiceContext | None = None + ) -> bool: + if context is None: + # Clientside + api = self._get_api() + if self._has_readable_outputs_cache is None: + has_readable_outputs = api.output.has_output_read_permissions( + self.user_code_link.object_uid, self.user_verify_key + ) + self._has_readable_outputs_cache = has_readable_outputs + return has_readable_outputs + else: + return self._has_readable_outputs_cache + else: + # Serverside + return context.server.services.output.has_output_read_permissions( + context, self.user_code_link.object_uid, self.user_verify_key + ) + + @property + def first_denial_reason(self) -> str: + denial_reasons = [ + x.reason_or_none + for x in self.status_dict.values() + if x.status == UserCodeStatus.DENIED and x.reason_or_none is not None + ] + return next(iter(denial_reasons), "") + def syft_get_diffs(self, ext_obj: Any) -> list[AttrDiff]: # relative from ...service.sync.diff_state import AttrDiff diff_attrs = [] - status = list(self.status_dict.values())[0] - ext_status = list(ext_obj.status_dict.values())[0] + approval_decision = list(self.status_dict.values())[0] + ext_approval_decision = list(ext_obj.status_dict.values())[0] - if status != ext_status: + if ( + approval_decision.status != ext_approval_decision.status + or approval_decision.reason != ext_approval_decision.reason + ): diff_attr = AttrDiff( attr_name="status_dict", - low_attr=status, - high_attr=ext_status, + low_attr=approval_decision, + high_attr=ext_approval_decision, ) diff_attrs.append(diff_attr) return diff_attrs @@ -139,126 +305,190 @@ def __repr__(self) -> str: return str(self.status_dict) def _repr_html_(self) -> str: - string = f""" - + string = """

    User Code Status

    """ - for node_identity, (status, reason) in self.status_dict.items(): - node_name_str = f"{node_identity.node_name}" - uid_str = f"{node_identity.node_id}" - status_str = f"{status.value}" + for server_identity, approval_decision in self.status_dict.items(): + server_name_str = f"{server_identity.server_name}" + uid_str = f"{server_identity.server_id}" + status_str = f"{approval_decision.status.value}" string += f""" • UID: {uid_str}  - Node name: {node_name_str}  + Server name: {server_name_str}  Status: {status_str}; - Reason: {reason} + Reason: {approval_decision.reason}
    """ string += "

    " return string def __repr_syft_nested__(self) -> str: - string = "" - for node_identity, (status, reason) in self.status_dict.items(): - string += f"{node_identity.node_name}: {status}, {reason}
    " - return string + # this currently assumes that there is only one status + status_str = self._compute_status().value - def get_status_message(self) -> SyftSuccess | SyftNotReady | SyftError: - if self.approved: - return SyftSuccess(message=f"{type(self)} approved") - denial_string = "" - string = "" - for node_identity, (status, reason) in self.status_dict.items(): - denial_string += f"Code status on node '{node_identity.node_name}' is '{status}'. Reason: {reason}" - if not reason.endswith("."): - denial_string += "." - string += f"Code status on node '{node_identity.node_name}' is '{status}'." if self.denied: - return SyftError( - message=f"{type(self)} Your code cannot be run: {denial_string}" - ) - else: - return SyftNotReady( - message=f"{type(self)} Your code is waiting for approval. {string}" - ) + status_str = f"{status_str}: self.first_denial_reason" + return status_str - @property - def approved(self) -> bool: - return all(x == UserCodeStatus.APPROVED for x, _ in self.status_dict.values()) - - @property - def denied(self) -> bool: - for status, _ in self.status_dict.values(): - if status == UserCodeStatus.DENIED: - return True - return False - - def for_user_context(self, context: AuthedServiceContext) -> UserCodeStatus: - context.node = cast(AbstractNode, context.node) - if context.node.node_type == NodeType.ENCLAVE: - keys = {status for status, _ in self.status_dict.values()} - if len(keys) == 1 and UserCodeStatus.APPROVED in keys: - return UserCodeStatus.APPROVED - elif UserCodeStatus.PENDING in keys and UserCodeStatus.DENIED not in keys: - return UserCodeStatus.PENDING - elif UserCodeStatus.DENIED in keys: - return UserCodeStatus.DENIED - else: - raise Exception(f"Invalid types in {keys} for Code Submission") + def get_status_message_l2(self, context: AuthedServiceContext) -> str: + if self.get_is_approved(context): + return f"{type(self)} approved" + denial_string = "" + string = "" - elif context.node.node_type == NodeType.DOMAIN: - node_identity = NodeIdentity( - node_name=context.node.name, - node_id=context.node.id, - verify_key=context.node.signing_key.verify_key, + for server_identity, approval_decision in self.status_dict.items(): + denial_string += ( + f"Code status on server '{server_identity.server_name}' is '{approval_decision.status}'." + f" Reason: {approval_decision.reason}" ) - if node_identity in self.status_dict: - return self.status_dict[node_identity][0] - else: - raise Exception( - f"Code Object does not contain {context.node.name} Domain's data" - ) + if approval_decision.reason and not approval_decision.reason.endswith("."): # type: ignore + denial_string += "." + string += f"Code status on server '{server_identity.server_name}' is '{approval_decision.status}'." + if self.denied: + return f"{type(self)} Your code cannot be run: {denial_string}" else: - raise Exception( - f"Invalid Node Type for Code Submission:{context.node.node_type}" - ) + return f"{type(self)} Your code is waiting for approval. {string}" + @as_result(SyftException) def mutate( self, - value: tuple[UserCodeStatus, str], - node_name: str, - node_id: UID, + value: ApprovalDecision, + server_name: str, + server_id: UID, verify_key: SyftVerifyKey, - ) -> SyftError | Self: - node_identity = NodeIdentity( - node_name=node_name, node_id=node_id, verify_key=verify_key + ) -> Self: + server_identity = ServerIdentity( + server_name=server_name, server_id=server_id, verify_key=verify_key ) status_dict = self.status_dict - if node_identity in status_dict: - status_dict[node_identity] = value + if server_identity in status_dict: + status_dict[server_identity] = value self.status_dict = status_dict return self else: - return SyftError( - message="Cannot Modify Status as the Domain's data is not included in the request" + raise SyftException( + public_message="Cannot Modify Status as the Datasite's data is not included in the request" ) - def get_sync_dependencies(self, api: Any = None) -> list[UID]: + def get_sync_dependencies(self, context: AuthedServiceContext) -> list[UID]: return [self.user_code_link.object_uid] +@migrate(UserCodeStatusCollectionV1, UserCodeStatusCollection) +def migrate_user_code_status_to_v2() -> list[Callable]: + def update_statusdict(context: TransformContext) -> TransformContext: + res = {} + if not isinstance(context.obj, UserCodeStatusCollectionV1): + raise Exception("Invalid object type") + if context.output is None: + raise Exception("Output is None") + for server_identity, (status, reason) in context.obj.status_dict.items(): + res[server_identity] = ApprovalDecision(status=status, reason=reason) + context.output["status_dict"] = res + return context + + def set_user_verify_key(context: TransformContext) -> TransformContext: + authed_context = context.to_server_context() + if not isinstance(context.obj, UserCodeStatusCollectionV1): + raise Exception("Invalid object type") + if context.output is None: + raise Exception("Output is None") + user_code = context.obj.user_code_link.resolve_with_context( + authed_context + ).unwrap() + context.output["user_verify_key"] = user_code.user_verify_key + return context + + return [ + make_set_default("was_requested_on_lowside", False), + make_set_default("_has_readable_outputs_cache", None), + update_statusdict, + set_user_verify_key, + ] + + +@serializable() +class UserCodeV1(SyncableSyftObject): + # version + __canonical_name__ = "UserCode" + __version__ = SYFT_OBJECT_VERSION_1 + + id: UID + server_uid: UID | None = None + user_verify_key: SyftVerifyKey + raw_code: str + input_policy_type: type[InputPolicy] | UserPolicy + input_policy_init_kwargs: dict[Any, Any] | None = None + input_policy_state: bytes = b"" + output_policy_type: type[OutputPolicy] | UserPolicy + output_policy_init_kwargs: dict[Any, Any] | None = None + output_policy_state: bytes = b"" + parsed_code: str + service_func_name: str + unique_func_name: str + user_unique_func_name: str + code_hash: str + signature: inspect.Signature + status_link: LinkedObject | None = None + input_kwargs: list[str] + submit_time: DateTime | None = None + # tracks if the code calls datasite.something, variable is set during parsing + uses_datasite: bool = False + + nested_codes: dict[str, tuple[LinkedObject, dict]] | None = {} + worker_pool_name: str | None = None + origin_server_side_type: ServerSideType + l0_deny_reason: str | None = None + _has_output_read_permissions_cache: bool | None = None + + __table_coll_widths__ = [ + "min-content", + "auto", + "auto", + "auto", + "auto", + "auto", + "auto", + "auto", + ] + + __attr_searchable__: ClassVar[list[str]] = [ + "user_verify_key", + "service_func_name", + "code_hash", + ] + __attr_unique__: ClassVar[list[str]] = [] + __repr_attrs__: ClassVar[list[str]] = [ + "service_func_name", + "input_owners", + "code_status", + "worker_pool_name", + "l0_deny_reason", + "raw_code", + ] + + __exclude_sync_diff_attrs__: ClassVar[list[str]] = [ + "server_uid", + "code_status", + "input_policy_type", + "input_policy_init_kwargs", + "input_policy_state", + "output_policy_type", + "output_policy_init_kwargs", + "output_policy_state", + ] + + @serializable() class UserCode(SyncableSyftObject): # version __canonical_name__ = "UserCode" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_2 id: UID - node_uid: UID | None = None + server_uid: UID | None = None user_verify_key: SyftVerifyKey raw_code: str input_policy_type: type[InputPolicy] | UserPolicy @@ -275,11 +505,25 @@ class UserCode(SyncableSyftObject): signature: inspect.Signature status_link: LinkedObject input_kwargs: list[str] - enclave_metadata: EnclaveMetadata | None = None submit_time: DateTime | None = None - uses_domain: bool = False # tracks if the code calls domain.something, variable is set during parsing + # tracks if the code calls datasite.something, variable is set during parsing + uses_datasite: bool = False + nested_codes: dict[str, tuple[LinkedObject, dict]] | None = {} worker_pool_name: str | None = None + origin_server_side_type: ServerSideType + # l0_deny_reason: str | None = None + + __table_coll_widths__ = [ + "min-content", + "auto", + "auto", + "auto", + "auto", + "auto", + "auto", + "auto", + ] __attr_searchable__: ClassVar[list[str]] = [ "user_verify_key", @@ -290,12 +534,15 @@ class UserCode(SyncableSyftObject): __repr_attrs__: ClassVar[list[str]] = [ "service_func_name", "input_owners", - "code_status", + "status", "worker_pool_name", + # "l0_deny_reason", + "raw_code", ] __exclude_sync_diff_attrs__: ClassVar[list[str]] = [ - "node_uid", + "server_uid", + "code_status", "input_policy_type", "input_policy_init_kwargs", "input_policy_state", @@ -304,6 +551,14 @@ class UserCode(SyncableSyftObject): "output_policy_state", ] + @field_validator("service_func_name", mode="after") + @classmethod + def service_func_name_is_valid(cls, value: str) -> str: + _ = is_valid_usercode_name( + value + ).unwrap() # this will throw an error if not valid + return value + def __setattr__(self, key: str, value: Any) -> None: # Get the attribute from the class, it might be a descriptor or None attr = getattr(type(self), key, None) @@ -317,14 +572,14 @@ def __setattr__(self, key: str, value: Any) -> None: return super().__setattr__(key, value) def _coll_repr_(self) -> dict[str, Any]: - status = [status for status, _ in self.status.status_dict.values()][0].value - if status == UserCodeStatus.PENDING.value: + status = self.status._compute_status() + if status == UserCodeStatus.PENDING: badge_color = "badge-purple" - elif status == UserCodeStatus.APPROVED.value: + elif status == UserCodeStatus.APPROVED: badge_color = "badge-green" else: badge_color = "badge-red" - status_badge = {"value": status, "type": badge_color} + status_badge = {"value": status.value, "type": badge_color} return { "Input Policy": self.input_policy_type.__canonical_name__, "Output Policy": self.output_policy_type.__canonical_name__, @@ -338,27 +593,33 @@ def _coll_repr_(self) -> dict[str, Any]: } @property - def status(self) -> UserCodeStatusCollection | SyftError: - # Clientside only - res = self.status_link.resolve - return res + def is_l0_deployment(self) -> bool: + return self.origin_server_side_type == ServerSideType.LOW_SIDE - def get_status( - self, context: AuthedServiceContext - ) -> UserCodeStatusCollection | SyftError: - status = self.status_link.resolve_with_context(context) - if status.is_err(): - return SyftError(message=status.err()) - return status.ok() + @property + def is_l2_deployment(self) -> bool: + return self.origin_server_side_type == ServerSideType.HIGH_SIDE @property - def is_enclave_code(self) -> bool: - return self.enclave_metadata is not None + def user(self) -> UserView: + api = self.get_api() + return api.services.user.get_by_verify_key(self.user_verify_key) + + @property + def status(self) -> UserCodeStatusCollection: + # only use this client side + return self.get_status(None).unwrap() + + @as_result(SyftException) + def get_status( + self, context: AuthedServiceContext | None + ) -> UserCodeStatusCollection: + return self.status_link.resolve_dynamic(context, load_cached=False) @property def input_owners(self) -> list[str] | None: if self.input_policy_init_kwargs is not None: - return [str(x.node_name) for x in self.input_policy_init_kwargs.keys()] + return [str(x.server_name) for x in self.input_policy_init_kwargs.keys()] return None @property @@ -375,7 +636,7 @@ def output_reader_names(self) -> list[SyftVerifyKey] | None: ): keys = self.output_policy_init_kwargs.get("output_readers", []) inpkey2name = { - x.verify_key: x.node_name for x in self.input_policy_init_kwargs + x.verify_key: x.server_name for x in self.input_policy_init_kwargs } return [inpkey2name[k] for k in keys if k in inpkey2name] return None @@ -387,26 +648,22 @@ def output_readers(self) -> list[SyftVerifyKey] | None: return None @property - def code_status(self) -> list: - status_list = [] - for node_view, (status, _) in self.status.status_dict.items(): - status_list.append( - f"Node: {node_view.node_name}, Status: {status.value}", - ) - return status_list + def code_status_str(self) -> str: + return f"Status: {self.status._compute_status().value}" @property def input_policy(self) -> InputPolicy | None: - if not self.status.approved: - return None - return self._get_input_policy() + if self.status.approved or self.input_policy_type.has_safe_serde: + return self._get_input_policy() + return None def get_input_policy(self, context: AuthedServiceContext) -> InputPolicy | None: - status = self.get_status(context) - if not status.approved: - return None - return self._get_input_policy() + status = self.get_status(context).unwrap() + if status.get_is_approved(context) or self.input_policy_type.has_safe_serde: + return self._get_input_policy() + return None + # TODO: Change the return type to follow the enum pattern + input policy def _get_input_policy(self) -> InputPolicy | None: if len(self.input_policy_state) == 0: input_policy = None @@ -416,12 +673,12 @@ def _get_input_policy(self) -> InputPolicy | None: and self.input_policy_init_kwargs is not None ): # TODO: Tech Debt here - node_view_workaround = False - for k, _ in self.input_policy_init_kwargs.items(): - if isinstance(k, NodeIdentity): - node_view_workaround = True + server_view_workaround = False + for k in self.input_policy_init_kwargs.keys(): + if isinstance(k, ServerIdentity): + server_view_workaround = True - if node_view_workaround: + if server_view_workaround: input_policy = self.input_policy_type( init_kwargs=self.input_policy_init_kwargs ) @@ -448,8 +705,10 @@ def _get_input_policy(self) -> InputPolicy | None: print(f"Failed to deserialize custom input policy state. {e}") return None + @as_result(SyftException) def is_output_policy_approved(self, context: AuthedServiceContext) -> bool: - return self.get_status(context).approved + status = self.get_status(context).unwrap() + return status.approved @input_policy.setter # type: ignore def input_policy(self, value: Any) -> None: # type: ignore @@ -460,20 +719,20 @@ def input_policy(self, value: Any) -> None: # type: ignore else: raise Exception(f"You can't set {type(value)} as input_policy_state") + def get_output_policy(self, context: AuthedServiceContext) -> OutputPolicy | None: + status = self.get_status(context).unwrap() + if status.get_is_approved(context) or self.output_policy_type.has_safe_serde: + return self._get_output_policy() + return None + @property def output_policy(self) -> OutputPolicy | None: # type: ignore - if not self.status.approved: - return None - return self._get_output_policy() - - def get_output_policy(self, context: AuthedServiceContext) -> OutputPolicy | None: - if not self.get_status(context).approved: - return None - return self._get_output_policy() + if self.status.approved or self.output_policy_type.has_safe_serde: + return self._get_output_policy() + return None + # FIX: change return type like _get_input_policy def _get_output_policy(self) -> OutputPolicy | None: - # if not self.status.approved: - # return None if len(self.output_policy_state) == 0: output_policy = None if isinstance(self.output_policy_type, type) and issubclass( @@ -492,7 +751,7 @@ def _get_output_policy(self) -> OutputPolicy | None: ) if output_policy is not None: - output_policy.syft_node_location = self.syft_node_location + output_policy.syft_server_location = self.syft_server_location output_policy.syft_client_verify_key = self.syft_client_verify_key output_blob = _serialize(output_policy, to_bytes=True) self.output_policy_state = output_blob @@ -501,11 +760,26 @@ def _get_output_policy(self) -> OutputPolicy | None: raise Exception("output_policy is None during init") try: - return _deserialize(self.output_policy_state, from_bytes=True) + output_policy = _deserialize(self.output_policy_state, from_bytes=True) + output_policy.syft_server_location = self.syft_server_location + output_policy.syft_client_verify_key = self.syft_client_verify_key + return output_policy except Exception as e: print(f"Failed to deserialize custom output policy state. {e}") return None + @property + def output_policy_id(self) -> UID | None: + if self.output_policy_init_kwargs is not None: + return self.output_policy_init_kwargs.get("id", None) + return None + + @property + def input_policy_id(self) -> UID | None: + if self.input_policy_init_kwargs is not None: + return self.input_policy_init_kwargs.get("id", None) + return None + @output_policy.setter # type: ignore def output_policy(self, value: Any) -> None: # type: ignore if isinstance(value, OutputPolicy): @@ -516,116 +790,161 @@ def output_policy(self, value: Any) -> None: # type: ignore raise Exception(f"You can't set {type(value)} as output_policy_state") @property - def output_history(self) -> list[ExecutionOutput] | SyftError: - api = APIRegistry.api_for(self.syft_node_location, self.syft_client_verify_key) - if api is None: - return SyftError( - message=f"Can't access the api. You must login to {self.syft_node_location}" - ) + def output_history(self) -> list[ExecutionOutput]: + api = self.get_api() return api.services.output.get_by_user_code_id(self.id) + @as_result(SyftException) def get_output_history( self, context: AuthedServiceContext - ) -> list[ExecutionOutput] | SyftError: - if not self.get_status(context).approved: - return SyftError( - message="Execution denied, Please wait for the code to be approved" - ) - node = cast(AbstractNode, context.node) - output_service = cast(OutputService, node.get_service("outputservice")) - return output_service.get_by_user_code_id(context, self.id) + ) -> list[ExecutionOutput]: + return context.server.services.output.get_by_user_code_id(context, self.id) - def apply_output( + @as_result(SyftException) + def store_execution_output( self, context: AuthedServiceContext, outputs: Any, job_id: UID | None = None, input_ids: dict[str, UID] | None = None, - ) -> ExecutionOutput | SyftError: + ) -> ExecutionOutput: + is_admin = context.role == ServiceRole.ADMIN + output_policy = self.get_output_policy(context) - if output_policy is None: - return SyftError( - message="You must wait for the output policy to be approved" + + if output_policy is None and not is_admin: + raise SyftException( + public_message="You must wait for the output policy to be approved" ) output_ids = filter_only_uids(outputs) - context.node = cast(AbstractNode, context.node) - output_service = context.node.get_service("outputservice") - output_service = cast(OutputService, output_service) - execution_result = output_service.create( + return context.server.services.output.create( context, user_code_id=self.id, output_ids=output_ids, executing_user_verify_key=self.user_verify_key, job_id=job_id, - output_policy_id=output_policy.id, + output_policy_id=self.output_policy_id, input_ids=input_ids, ) - if isinstance(execution_result, SyftError): - return execution_result - - return execution_result @property def byte_code(self) -> PyCodeObject | None: return compile_byte_code(self.parsed_code) - def get_results(self) -> Any: - # relative - from ...client.api import APIRegistry - - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) - if api is None: - return SyftError( - message=f"Can't access the api. You must login to {self.node_uid}" - ) - return api.services.code.get_results(self) + @property + def assets(self) -> DictTuple[str, Asset]: + if not self.input_policy_init_kwargs: + return DictTuple({}) + + api = self._get_api() + + # get a flat dict of all inputs + all_inputs = {} + inputs = self.input_policy_init_kwargs or {} + for vals in inputs.values(): + # Only keep UIDs, filter out Constants + all_inputs.update({k: v for k, v in vals.items() if isinstance(v, UID)}) + + # map the action_id to the asset + used_assets: list[Asset] = [] + for kwarg_name, action_id in all_inputs.items(): + assets = api.dataset.get_assets_by_action_id(uid=action_id) + if isinstance(assets, SyftError): + return assets + if assets: + asset = assets[0] + asset._kwarg_name = kwarg_name + used_assets.append(asset) + + asset_dict = {asset._kwarg_name: asset for asset in used_assets} + return DictTuple(asset_dict) @property - def assets(self) -> list[Asset]: - # relative - from ...client.api import APIRegistry + def action_objects(self) -> dict: + if not self.input_policy_init_kwargs: + return {} + + all_inputs = {} + for vals in self.input_policy_init_kwargs.values(): + all_inputs.update(vals) + + # filter out the assets + action_objects = { + arg_name: str(uid) + for arg_name, uid in all_inputs.items() + if arg_name not in self.assets.keys() and isinstance(uid, UID) + } - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") + return action_objects - inputs: Generator = (x for x in range(0)) # create an empty generator - if self.input_policy_init_kwargs is not None: - inputs = ( - uids - for node_identity, uids in self.input_policy_init_kwargs.items() - if node_identity.node_name == api.node_name - ) + @property + def constants(self) -> dict[str, Constant]: + if not self.input_policy_init_kwargs: + return {} + + all_inputs = {} + for vals in self.input_policy_init_kwargs.values(): + all_inputs.update(vals) + + # filter out the assets + constants = { + arg_name: item + for arg_name, item in all_inputs.items() + if isinstance(item, Constant) + } - all_assets = [] - for uid in itertools.chain.from_iterable(x.values() for x in inputs): - if isinstance(uid, UID): - assets = api.services.dataset.get_assets_by_action_id(uid) - if not isinstance(assets, list): - return assets + return constants - all_assets += assets - return all_assets + @property + def inputs(self) -> dict: + inputs = {} + + assets = self.assets + action_objects = self.action_objects + constants = self.constants + if action_objects: + inputs["action_objects"] = action_objects + if assets: + inputs["assets"] = { + argument: asset._get_dict_for_user_code_repr() + for argument, asset in assets.items() + } + if self.constants: + inputs["constants"] = { + argument: constant._get_dict_for_user_code_repr() + for argument, constant in constants.items() + } + return inputs - def get_sync_dependencies(self, api: Any = None) -> list[UID] | SyftError: + @property + def _inputs_json(self) -> str | SyftError: + input_str = json.dumps(self.inputs, indent=2) + return input_str + + def get_sync_dependencies(self, context: AuthedServiceContext) -> list[UID]: dependencies = [] if self.nested_codes is not None: - nested_code_ids = [link.object_uid for link in self.nested_codes.values()] + nested_code_ids = [ + link.object_uid for link, _ in self.nested_codes.values() + ] dependencies.extend(nested_code_ids) + if self.status_link is not None: + dependencies.append(self.status_link.object_uid) + return dependencies @property - def unsafe_function(self) -> Callable | None: + def run(self) -> Callable | None: warning = SyftWarning( message="This code was submitted by a User and could be UNSAFE." ) display(warning) # 🟡 TODO: re-use the same infrastructure as the execute_byte_code function - def wrapper(*args: Any, **kwargs: Any) -> Callable | SyftError: + def wrapper(*args: Any, **kwargs: Any) -> Callable: try: filtered_kwargs = {} on_private_data, on_mock_data = False, False @@ -660,10 +979,17 @@ def wrapper(*args: Any, **kwargs: Any) -> Callable | SyftError: # return the results return result except Exception as e: - return SyftError(f"Failed to run unsafe_function. Error: {e}") + raise SyftException( + public_message=f"Failed to execute 'run'. Error: {e}" + ) return wrapper + @property + @deprecated(reason="Use 'run' instead") + def unsafe_function(self) -> Callable | None: + return self.run + def _inner_repr(self, level: int = 0) -> str: shared_with_line = "" if len(self.output_readers) > 0 and self.output_reader_names is not None: @@ -673,12 +999,28 @@ def _inner_repr(self, level: int = 0) -> str: f"outputs are *shared* with the owners of {owners_string} once computed" ) + constants_str = "" + args = [ + x + for _dict in self.input_policy_init_kwargs.values() # type: ignore + for x in _dict.values() + ] + constants = [x for x in args if isinstance(x, Constant)] + constants_str = "\n\t".join([f"{x.kw}: {x.val}" for x in constants]) + + # indent all lines except the first one + inputs_str = "\n".join( + [f" {line}" for line in self._inputs_json.split("\n")] + ).lstrip() + md = f"""class UserCode id: UID = {self.id} service_func_name: str = {self.service_func_name} shareholders: list = {self.input_owners} - status: list = {self.code_status} + status: str = {self.code_status_str} + {constants_str} {shared_with_line} + inputs: dict = {inputs_str} code: {self.raw_code} @@ -693,7 +1035,7 @@ def _inner_repr(self, level: int = 0) -> str: [f"{' '*level}{substring}" for substring in md.split("\n")[:-1]] ) if self.nested_codes is not None: - for _, (obj, _) in self.nested_codes.items(): + for obj, _ in self.nested_codes.values(): code = obj.resolve md += "\n" md += code._inner_repr(level=level + 1) @@ -703,6 +1045,55 @@ def _inner_repr(self, level: int = 0) -> str: def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: return as_markdown_code(self._inner_repr()) + def _ipython_display_(self, level: int = 0) -> None: + tabs = " " * level + shared_with_line = "" + if len(self.output_readers) > 0 and self.output_reader_names is not None: + owners_string = " and ".join([f"*{x}*" for x in self.output_reader_names]) + shared_with_line += ( + f"

    {tabs}Custom Policy: " + f"outputs are *shared* with the owners of {owners_string} once computed

    " + ) + constants_str = "" + args = [ + x + for _dict in self.input_policy_init_kwargs.values() # type: ignore + for x in _dict.values() + ] + constants = [x for x in args if isinstance(x, Constant)] + constants_str = "\n ".join([f"{x.kw}: {x.val}" for x in constants]) + # indent all lines except the first one + repr_str = f""" + +
    +

    {tabs}UserCode

    +

    {tabs}id: UID = {self.id}

    +

    {tabs}service_func_name: str = {self.service_func_name}

    +

    {tabs}shareholders: list = {self.input_owners}

    +

    {tabs}status: str = {self.code_status_str}

    + {tabs}{constants_str} + {tabs}{shared_with_line} +

    {tabs}inputs: dict =

    {self._inputs_json}

    +

    {tabs}code:

    +
    + """ + md = "\n".join( + [f"{' '*level}{substring}" for substring in self.raw_code.split("\n")[:-1]] + ) + display(HTML(repr_str), Markdown(as_markdown_code(md))) + if self.nested_codes is not None and self.nested_codes != {}: + nested_line_html = f""" +
    +

    {tabs}Nested Requests:

    +
    + """ + display(HTML(nested_line_html)) + for obj, _ in self.nested_codes.values(): + code = obj.resolve + code._ipython_display_(level=level + 1) + @property def show_code(self) -> CodeMarkdown: return CodeMarkdown(self.raw_code) @@ -717,12 +1108,23 @@ def show_code_cell(self) -> None: ip = get_ipython() ip.set_next_input(warning_message + self.raw_code) + def __call__(self, *args: Any, **kwargs: Any) -> Any: + api = self._get_api() + return getattr(api.code, self.service_func_name)(*args, **kwargs) + + +class UserCodeUpdate(PartialSyftObject): + __canonical_name__ = "UserCodeUpdate" + __version__ = SYFT_OBJECT_VERSION_1 + + l0_deny_reason: str | None + @serializable(without=["local_function"]) class SubmitUserCode(SyftObject): # version __canonical_name__ = "SubmitUserCode" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore[assignment] code: str @@ -734,11 +1136,18 @@ class SubmitUserCode(SyftObject): output_policy_init_kwargs: dict[Any, Any] | None = {} local_function: Callable | None = None input_kwargs: list[str] - enclave_metadata: EnclaveMetadata | None = None worker_pool_name: str | None = None __repr_attrs__ = ["func_name", "code"] + @field_validator("func_name", mode="after") + @classmethod + def func_name_is_valid(cls, value: str) -> str: + _ = is_valid_usercode_name( + value + ).unwrap() # this will throw an error if not valid + return value + @field_validator("output_policy_init_kwargs", mode="after") @classmethod def add_output_policy_ids(cls, values: Any) -> Any: @@ -750,20 +1159,28 @@ def add_output_policy_ids(cls, values: Any) -> Any: def kwargs(self) -> dict[Any, Any] | None: return self.input_policy_init_kwargs - def __call__(self, *args: Any, syft_no_node: bool = False, **kwargs: Any) -> Any: - if syft_no_node: + def __call__( + self, + *args: Any, + syft_no_server: bool = False, + blocking: bool = False, + time_alive: int | None = None, + n_consumers: int = 2, + **kwargs: Any, + ) -> Any: + if syft_no_server: return self.local_call(*args, **kwargs) - return self._ephemeral_node_call(*args, **kwargs) + return self._ephemeral_server_call( + *args, + time_alive=time_alive, + n_consumers=n_consumers, + blocking=blocking, + **kwargs, + ) def local_call(self, *args: Any, **kwargs: Any) -> Any: # only run this on the client side if self.local_function: - tree = ast.parse(inspect.getsource(self.local_function)) - - # check there are no globals - v = GlobalsVisitor() - v.visit(tree) - # filtered_args = [] filtered_kwargs = {} # for arg in args: @@ -781,81 +1198,74 @@ def local_call(self, *args: Any, **kwargs: Any) -> Any: else: raise NotImplementedError - def _ephemeral_node_call( + def _ephemeral_server_call( self, - time_alive: int | None = None, - n_consumers: int | None = None, *args: Any, + time_alive: int | None = None, + n_consumers: int = 2, + blocking: bool = False, **kwargs: Any, ) -> Any: # relative - from ... import _orchestra + from ...orchestra import Orchestra # Right now we only create a number of workers # In the future we might need to have the same pools/images as well - if n_consumers is None: + if time_alive is None and not blocking: print( SyftInfo( - message="Creating a node with n_consumers=2 (the default value)" - ) - ) - n_consumers = 2 - - if time_alive is None and "blocking" in kwargs and not kwargs["blocking"]: - print( - SyftInfo( - message="Closing the node after time_alive=300 (the default value)" + message="Closing the server after time_alive=300 (the default value)" ) ) time_alive = 300 # This could be changed given the work on containers - ep_node = _orchestra().launch( - name=f"ephemeral_node_{self.func_name}_{random.randint(a=0, b=10000)}", # nosec + ep_server = Orchestra.launch( + name=f"ephemeral_server_{self.func_name}_{random.randint(a=0, b=10000)}", # nosec reset=True, create_producer=True, n_consumers=n_consumers, deploy_to="python", ) - ep_client = ep_node.login(email="info@openmined.org", password="changethis") # nosec + ep_client = ep_server.login( + email="info@openmined.org", + password="changethis", + ) # nosec self.input_policy_init_kwargs = cast(dict, self.input_policy_init_kwargs) - for node_id, obj_dict in self.input_policy_init_kwargs.items(): - # api = APIRegistry.api_for( - # node_uid=node_id.node_id, user_verify_key=node_id.verify_key - # ) - api = APIRegistry.get_by_recent_node_uid(node_uid=node_id.node_id) + for server_id, obj_dict in self.input_policy_init_kwargs.items(): + api = APIRegistry.get_by_recent_server_uid(server_uid=server_id.server_id) if api is None: - return SyftError( - f"Can't access the api. You must login to {node_id.node_id}" + raise SyftException( + public_message=f"Can't access the api. You must login to {server_id.server_id}" ) # Creating TwinObject from the ids of the kwargs # Maybe there are some corner cases where this is not enough # And need only ActionObjects # Also, this works only on the assumption that all inputs # are ActionObjects, which might change in the future - for _, id in obj_dict.items(): - mock_obj = api.services.action.get_mock(id) - if isinstance(mock_obj, SyftError): - data_obj = api.services.action.get(id) - if isinstance(data_obj, SyftError): - return SyftError( - message="You do not have access to object you want \ + for id in obj_dict.values(): + try: + mock_obj = api.services.action.get_mock(id) + data_obj = mock_obj + except SyftException: + try: + data_obj = api.services.action.get(id) + except SyftException: + raise SyftException( + public_message="You do not have access to object you want \ to use, or the private object does not have mock \ - data. Contact the Node Admin." + data. Contact the Server Admin." ) - else: - data_obj = mock_obj + data_obj.id = id new_obj = ActionObject.from_obj( data_obj.syft_action_data, id=id, - syft_node_location=node_id.node_id, - syft_client_verify_key=node_id.verify_key, + syft_server_location=server_id.server_id, + syft_client_verify_key=server_id.verify_key, ) - res = ep_client.api.services.action.set(new_obj) - if isinstance(res, SyftError): - return res + new_obj.send(ep_client) new_syft_func = deepcopy(self) @@ -863,20 +1273,22 @@ def _ephemeral_node_call( new_syft_func.worker_pool_name = None # We will look for subjos, and if we find any will submit them - # to the ephemeral_node + # to the ephemeral_server submit_subjobs_code(self, ep_client) ep_client.code.request_code_execution(new_syft_func) ep_client.requests[-1].approve(approve_nested=True) func_call = getattr(ep_client.code, new_syft_func.func_name) + # TODO: fix properly + func_call.unwrap_on_success = True result = func_call(*args, **kwargs) def task() -> None: if "blocking" in kwargs and not kwargs["blocking"]: time.sleep(time_alive) - print(SyftInfo(message="Landing the ephmeral node...")) - ep_node.land() - print(SyftInfo(message="Node Landed!")) + print(SyftInfo(message="Landing the ephmeral server...")) + ep_server.land() + print(SyftInfo(message="Server Landed!")) thread = Thread(target=task) thread.start() @@ -890,6 +1302,37 @@ def input_owner_verify_keys(self) -> list[str] | None: return None +def get_code_hash(code: str, user_verify_key: SyftVerifyKey) -> str: + full_str = f"{code}{user_verify_key}" + return hashlib.sha256(full_str.encode()).hexdigest() + + +@as_result(SyftException) +def is_valid_usercode_name(func_name: str) -> Any: + if len(func_name) == 0: + raise SyftException(public_message="Function name cannot be empty") + if func_name == "_": + raise SyftException( + public_message="Cannot use anonymous function as syft function" + ) + if not str.isidentifier(func_name): + raise SyftException( + public_message="Function name must be a valid Python identifier" + ) + if keyword.iskeyword(func_name): + raise SyftException(public_message="Function name is a reserved python keyword") + + service_method_path = f"code.{func_name}" + if ServiceConfigRegistry.path_exists(service_method_path): + raise SyftException( + public_message=( + f"Could not create syft function with name {func_name}:" + f" a service with the same name already exists" + ) + ) + return True + + class ArgumentType(Enum): REAL = 1 MOCK = 2 @@ -923,19 +1366,30 @@ def syft_function_single_use( ) +def replace_func_name(src: str, new_func_name: str) -> str: + pattern = r"\bdef\s+(\w+)\s*\(" + replacement = f"def {new_func_name}(" + new_src = re.sub(pattern, replacement, src, count=1) + return new_src + + def syft_function( input_policy: InputPolicy | UID | None = None, output_policy: OutputPolicy | UID | None = None, share_results_with_owners: bool = False, worker_pool_name: str | None = None, + name: str | None = None, ) -> Callable: if input_policy is None: input_policy = EmpyInputPolicy() + init_input_kwargs = None if isinstance(input_policy, CustomInputPolicy): input_policy_type = SubmitUserPolicy.from_obj(input_policy) + init_input_kwargs = partition_by_server(input_policy.init_kwargs) # type: ignore else: input_policy_type = type(input_policy) + init_input_kwargs = getattr(input_policy, "init_kwargs", {}) if output_policy is None: output_policy = SingleExecutionExactOutput() @@ -946,18 +1400,46 @@ def syft_function( output_policy_type = type(output_policy) def decorator(f: Any) -> SubmitUserCode: - res = SubmitUserCode( - code=inspect.getsource(f), - func_name=f.__name__, - signature=inspect.signature(f), - input_policy_type=input_policy_type, - input_policy_init_kwargs=getattr(input_policy, "init_kwargs", {}), - output_policy_type=output_policy_type, - output_policy_init_kwargs=getattr(output_policy, "init_kwargs", {}), - local_function=f, - input_kwargs=f.__code__.co_varnames[: f.__code__.co_argcount], - worker_pool_name=worker_pool_name, - ) + try: + code = dedent(inspect.getsource(f)) + + if name is not None: + fname = name + code = replace_func_name(code, fname) + else: + fname = f.__name__ + + input_kwargs = f.__code__.co_varnames[: f.__code__.co_argcount] + + parse_user_code( + raw_code=code, + func_name=fname, + original_func_name=f.__name__, + function_input_kwargs=input_kwargs, + ) + + res = SubmitUserCode( + code=code, + func_name=fname, + signature=inspect.signature(f), + input_policy_type=input_policy_type, + input_policy_init_kwargs=init_input_kwargs, + output_policy_type=output_policy_type, + output_policy_init_kwargs=getattr(output_policy, "init_kwargs", {}), + local_function=f, + input_kwargs=input_kwargs, + worker_pool_name=worker_pool_name, + ) + + except ValidationError as e: + errors = e.errors() + msg = "Failed to create syft function, encountered validation errors:\n" + for error in errors: + msg += f"\t{error['msg']}\n" + raise SyftException(public_message=msg) + + except SyftException as se: + raise SyftException(public_message=f"Error when parsing the code: {se}") if share_results_with_owners and res.output_policy_init_kwargs is not None: res.output_policy_init_kwargs["output_readers"] = ( @@ -990,26 +1472,20 @@ def generate_unique_func_name(context: TransformContext) -> TransformContext: return context -def process_code( - context: TransformContext, +def parse_user_code( raw_code: str, func_name: str, original_func_name: str, - policy_input_kwargs: list[str], function_input_kwargs: list[str], ) -> str: - tree = ast.parse(raw_code) + # parse the code, check for syntax errors and if there are global variables + tree: ast.Module = parse_code(raw_code=raw_code) + check_for_global_vars(code_tree=tree) - # check there are no globals - v = GlobalsVisitor() - v.visit(tree) - - f = tree.body[0] + f: ast.stmt = tree.body[0] f.decorator_list = [] call_args = function_input_kwargs - if "domain" in function_input_kwargs and context.output is not None: - context.output["uses_domain"] = True call_stmt_keywords = [ast.keyword(arg=i, value=[ast.Name(id=i)]) for i in call_args] call_stmt = ast.Assign( targets=[ast.Name(id="result")], @@ -1034,18 +1510,37 @@ def process_code( return unparse(wrapper_function) +def process_code( + context: TransformContext, + raw_code: str, + func_name: str, + original_func_name: str, + policy_input_kwargs: list[str], + function_input_kwargs: list[str], +) -> str: + if "datasite" in function_input_kwargs and context.output is not None: + context.output["uses_datasite"] = True + + return parse_user_code( + raw_code=raw_code, + func_name=func_name, + original_func_name=original_func_name, + function_input_kwargs=function_input_kwargs, + ) + + def new_check_code(context: TransformContext) -> TransformContext: # TODO: remove this tech debt hack if context.output is None: return context input_kwargs = context.output["input_policy_init_kwargs"] - node_view_workaround = False + server_view_workaround = False for k in input_kwargs.keys(): - if isinstance(k, NodeIdentity): - node_view_workaround = True + if isinstance(k, ServerIdentity): + server_view_workaround = True - if not node_view_workaround: + if not server_view_workaround: input_keys = list(input_kwargs.keys()) else: input_keys = [] @@ -1066,39 +1561,30 @@ def new_check_code(context: TransformContext) -> TransformContext: def locate_launch_jobs(context: TransformContext) -> TransformContext: - if context.node is None: - raise ValueError(f"context {context}'s node is None") + if context.server is None: + raise ValueError(f"context {context}'s server is None") if context.output is not None: nested_codes = {} tree = ast.parse(context.output["raw_code"]) - # look for domain arg - if "domain" in [arg.arg for arg in tree.body[0].args.args]: + # look for datasite arg + if "datasite" in [arg.arg for arg in tree.body[0].args.args]: v = LaunchJobVisitor() v.visit(tree) nested_calls = v.nested_calls - user_code_service = context.node.get_service("usercodeService") for call in nested_calls: - user_codes = user_code_service.get_by_service_name(context, call) - if isinstance(user_codes, SyftError): - raise Exception(user_codes.message) + user_codes = context.server.services.user_code.get_by_service_name( + context, call + ) # TODO: Not great user_code = user_codes[-1] user_code_link = LinkedObject.from_obj( - user_code, node_uid=context.node.id + user_code, server_uid=context.server.id ) nested_codes[call] = (user_code_link, user_code.nested_codes) context.output["nested_codes"] = nested_codes return context -def compile_byte_code(parsed_code: str) -> PyCodeObject | None: - try: - return compile(parsed_code, "", "exec") - except Exception as e: - print("WARNING: to compile byte code", e) - return None - - def compile_code(context: TransformContext) -> TransformContext: if context.output is None: return context @@ -1115,10 +1601,12 @@ def compile_code(context: TransformContext) -> TransformContext: def hash_code(context: TransformContext) -> TransformContext: if context.output is None: return context + if not isinstance(context.obj, SubmitUserCode): + return context code = context.output["code"] context.output["raw_code"] = code - code_hash = hashlib.sha256(code.encode("utf8")).hexdigest() + code_hash = get_code_hash(code, context.credentials) context.output["code_hash"] = code_hash return context @@ -1134,14 +1622,11 @@ def add_credentials(context: TransformContext) -> TransformContext: def check_policy(policy: Any, context: TransformContext) -> TransformContext: - if context.node is not None: - policy_service = context.node.get_service(PolicyService) + if context.server is not None: if isinstance(policy, SubmitUserPolicy): policy = policy.to(UserPolicy, context=context) elif isinstance(policy, UID): - policy = policy_service.get_policy_by_uid(context, policy) - if policy.is_ok(): - policy = policy.ok() + policy = context.server.services.policy.get_policy_by_uid(context, policy) return policy @@ -1168,53 +1653,66 @@ def create_code_status(context: TransformContext) -> TransformContext: # relative from .user_code_service import UserCodeService - if context.node is None: - raise ValueError(f"{context}'s node is None") + if context.server is None: + raise ValueError(f"{context}'s server is None") if context.output is None: return context - input_keys = list(context.output["input_policy_init_kwargs"].keys()) + # # Low side requests have a computed status + # if + # return context + + was_requested_on_lowside = ( + context.server.server_side_type == ServerSideType.LOW_SIDE + ) + code_link = LinkedObject.from_uid( context.output["id"], UserCode, service_type=UserCodeService, - node_uid=context.node.id, + server_uid=context.server.id, ) - if context.node.node_type == NodeType.DOMAIN: - node_identity = NodeIdentity( - node_name=context.node.name, - node_id=context.node.id, - verify_key=context.node.signing_key.verify_key, + if context.server.server_type == ServerType.DATASITE: + server_identity = ServerIdentity( + server_name=context.server.name, + server_id=context.server.id, + verify_key=context.server.signing_key.verify_key, ) status = UserCodeStatusCollection( - status_dict={node_identity: (UserCodeStatus.PENDING, "")}, + status_dict={ + server_identity: ApprovalDecision(status=UserCodeStatus.PENDING) + }, user_code_link=code_link, + user_verify_key=context.credentials, + was_requested_on_lowside=was_requested_on_lowside, ) - elif context.node.node_type == NodeType.ENCLAVE: - status_dict = {key: (UserCodeStatus.PENDING, "") for key in input_keys} + elif context.server.server_type == ServerType.ENCLAVE: + input_keys = list(context.output["input_policy_init_kwargs"].keys()) + status_dict = { + key: ApprovalDecision(status=UserCodeStatus.PENDING) for key in input_keys + } status = UserCodeStatusCollection( status_dict=status_dict, user_code_link=code_link, + user_verify_key=context.credentials, ) else: raise NotImplementedError( - f"Invalid node type:{context.node.node_type} for code submission" + f"Invalid server type:{context.server.server_type} for code submission" ) - res = context.node.get_service("usercodestatusservice").create(context, status) + res = context.server.services.user_code_status.create(context, status) # relative from .status_service import UserCodeStatusService - # TODO error handling in transform functions - if not isinstance(res, SyftError): - context.output["status_link"] = LinkedObject.from_uid( - res.id, - UserCodeStatusCollection, - service_type=UserCodeStatusService, - node_uid=context.node.id, - ) + context.output["status_link"] = LinkedObject.from_uid( + res.id, + UserCodeStatusCollection, + service_type=UserCodeStatusService, + server_uid=context.server.id, + ) return context @@ -1226,15 +1724,23 @@ def add_submit_time(context: TransformContext) -> TransformContext: def set_default_pool_if_empty(context: TransformContext) -> TransformContext: if ( - context.node + context.server and context.output and context.output.get("worker_pool_name", None) is None ): - default_pool = context.node.get_default_worker_pool() + default_pool = context.server.get_default_worker_pool().unwrap() context.output["worker_pool_name"] = default_pool.name return context +def set_origin_server_side_type(context: TransformContext) -> TransformContext: + if context.server and context.output: + context.output["origin_server_side_type"] = ( + context.server.server_side_type or ServerSideType.HIGH_SIDE + ) + return context + + @transform(SubmitUserCode, UserCode) def submit_user_code_to_user_code() -> list[Callable]: return [ @@ -1247,9 +1753,10 @@ def submit_user_code_to_user_code() -> list[Callable]: locate_launch_jobs, add_credentials_for_key("user_verify_key"), create_code_status, - add_node_uid_for_key("node_uid"), + add_server_uid_for_key("server_uid"), add_submit_time, set_default_pool_if_empty, + set_origin_server_side_type, ] @@ -1257,7 +1764,7 @@ def submit_user_code_to_user_code() -> list[Callable]: class UserCodeExecutionResult(SyftObject): # version __canonical_name__ = "UserCodeExecutionResult" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID user_code_id: UID @@ -1267,7 +1774,7 @@ class UserCodeExecutionResult(SyftObject): @serializable() -class UserCodeExecutionOutput(SyftObject): +class UserCodeExecutionOutputV1(SyftObject): # version __canonical_name__ = "UserCodeExecutionOutput" __version__ = SYFT_OBJECT_VERSION_1 @@ -1279,49 +1786,41 @@ class UserCodeExecutionOutput(SyftObject): result: Any = None +@serializable() +class UserCodeExecutionOutput(SyftObject): + # version + __canonical_name__ = "UserCodeExecutionOutput" + __version__ = SYFT_OBJECT_VERSION_2 + + id: UID + user_code_id: UID + errored: bool = False + stdout: str + stderr: str + result: Any = None + safe_error_message: str | None = None + + class SecureContext: def __init__(self, context: AuthedServiceContext) -> None: - node = context.node - if node is None: - raise ValueError(f"{context}'s node is None") - - job_service = node.get_service("jobservice") - action_service = node.get_service("actionservice") - # user_service = node.get_service("userservice") + server = context.server + if server is None: + raise ValueError(f"{context}'s server is None") def job_set_n_iters(n_iters: int) -> None: job = context.job job.n_iters = n_iters - job_service.update(context, job) + server.services.job.update(context, job) def job_set_current_iter(current_iter: int) -> None: job = context.job job.current_iter = current_iter - job_service.update(context, job) + server.services.job.update(context, job) def job_increase_current_iter(current_iter: int) -> None: job = context.job job.current_iter += current_iter - job_service.update(context, job) - - # def set_api_registry(): - # user_signing_key = [ - # x.signing_key - # for x in user_service.stash.partition.data.values() - # if x.verify_key == context.credentials - # ][0] - # data_protcol = get_data_protocol() - # user_api = node.get_api(context.credentials, data_protcol.latest_version) - # user_api.signing_key = user_signing_key - # # We hardcode a python connection here since we have access to the node - # # TODO: this is not secure - # user_api.connection = PythonConnection(node=node) - - # APIRegistry.set_api_for( - # node_uid=node.id, - # user_verify_key=context.credentials, - # api=user_api, - # ) + server.services.job.update(context, job) def launch_job(func: UserCode, **kwargs: Any) -> Job | None: # relative @@ -1329,24 +1828,23 @@ def launch_job(func: UserCode, **kwargs: Any) -> Job | None: kw2id = {} for k, v in kwargs.items(): value = ActionObject.from_obj(v) - ptr = action_service._set(context, value) - ptr = ptr.ok() + ptr = server.services.action.set_result_to_store( + value, context, has_result_read_permission=False + ).unwrap() kw2id[k] = ptr.id try: # TODO: check permissions here action = Action.syft_function_action_from_kwargs_and_id(kw2id, func.id) - job = node.add_action_to_queue( + return server.add_action_to_queue( action=action, credentials=context.credentials, parent_job_id=context.job_id, has_execute_permissions=True, worker_pool_name=func.worker_pool_name, - ) + ).unwrap() # # set api in global scope to enable using .get(), .wait()) # set_api_registry() - - return job except Exception as e: print(f"ERROR {e}") raise ValueError(f"error while launching job:\n{e}") @@ -1372,7 +1870,7 @@ def execute_byte_code( safe_context = SecureContext(context=context) - class LocalDomainClient: + class LocalDatasiteClient: def init_progress(self, n_iters: int) -> None: if safe_context.is_async: safe_context.job_set_current_iter(0) @@ -1420,9 +1918,10 @@ def to_str(arg: Any) -> str: new_args = [to_str(arg) for arg in args] new_str = sep.join(new_args) + end - if context.node is not None: - log_service = context.node.get_service("LogService") - log_service.append(context=context, uid=log_id, new_str=new_str) + if context.server is not None: + context.server.services.log.append( + context=context, uid=log_id, new_str=new_str + ) time = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S") return __builtin__.print( f"{time} FUNCTION LOG ({job_id}):", @@ -1435,8 +1934,13 @@ def to_str(arg: Any) -> str: else: print = original_print - if code_item.uses_domain: - kwargs["domain"] = LocalDomainClient() + if code_item.uses_datasite: + kwargs["datasite"] = LocalDatasiteClient() + + job_log_id = context.job.log_id if context.job else None + for k, v in kwargs.items(): + if isinstance(v, CustomEndpointActionObject): + kwargs[k] = v.add_context(context=context, log_id=job_log_id) stdout = StringIO() stderr = StringIO() @@ -1447,38 +1951,54 @@ def to_str(arg: Any) -> str: # We only need access to local kwargs _locals = {"kwargs": kwargs} _globals = {} + if code_item.nested_codes is not None: for service_func_name, (linked_obj, _) in code_item.nested_codes.items(): - code_obj = linked_obj.resolve_with_context(context=context) - if isinstance(code_obj, Err): - raise Exception(code_obj.err()) - _globals[service_func_name] = code_obj.ok() + _globals[service_func_name] = linked_obj.resolve_with_context( + context=context + ).unwrap() + _globals["print"] = print exec(code_item.parsed_code, _globals, _locals) # nosec evil_string = f"{code_item.unique_func_name}(**kwargs)" + + result_message = "" + try: result = eval(evil_string, _globals, _locals) # nosec + errored = False except Exception as e: + errored = True error_msg = traceback_from_error(e, code_item) + if context.job is not None: time = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S") - original_print( - f"{time} EXCEPTION LOG ({job_id}):\n{error_msg}", file=sys.stderr - ) - if context.node is not None: + logger.error(f"{time} EXCEPTION LOG ({job_id}):\n{error_msg}") + else: + # for local execution + time = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S") + logger.error(f"{time} EXCEPTION LOG:\n{error_msg}\n") + + if ( + context.server is not None + and context.job is not None + and context.job.log_id is not None + ): log_id = context.job.log_id - log_service = context.node.get_service("LogService") - log_service.append(context=context, uid=log_id, new_err=error_msg) + context.server.services.log.append( + context=context, uid=log_id, new_err=error_msg + ) result_message = ( f"Exception encountered while running {code_item.service_func_name}" - ", please contact the Node Admin for more info." + ", please contact the Server Admin for more info." ) + if context.dev_mode: result_message += error_msg - result = Err(result_message) + result = SyftError(message=result_message) # reset print print = original_print @@ -1492,8 +2012,9 @@ def to_str(arg: Any) -> str: stdout=str(stdout.getvalue()), stderr=str(stderr.getvalue()), result=result, + errored=errored, + safe_error_message=result_message, ) - except Exception as e: # stdlib @@ -1501,6 +2022,7 @@ def to_str(arg: Any) -> str: # print("execute_byte_code failed", e, file=stderr_) print(traceback.format_exc()) print("execute_byte_code failed", e) + raise finally: sys.stdout = stdout_ sys.stderr = stderr_ @@ -1540,17 +2062,27 @@ def load_approved_policy_code( user_code_items: list[UserCode], context: AuthedServiceContext | None ) -> Any: """Reload the policy code in memory for user code that is approved.""" - try: - for user_code in user_code_items: + for user_code in user_code_items: + try: if context is None: status = user_code.status else: - status = user_code.get_status(context) + status = user_code.get_status(context).unwrap() + except SyftException: + display( + SyftWarning( + message=f"Failed to load UserCode {user_code.id.no_dash} {user_code.service_func_name=}" + ) + ) + continue - if status.approved: - if isinstance(user_code.input_policy_type, UserPolicy): - load_policy_code(user_code.input_policy_type) - if isinstance(user_code.output_policy_type, UserPolicy): - load_policy_code(user_code.output_policy_type) - except Exception as e: - raise Exception(f"Failed to load code: {user_code}: {e}") + if status.approved: + if isinstance(user_code.input_policy_type, UserPolicy): + load_policy_code(user_code.input_policy_type) + if isinstance(user_code.output_policy_type, UserPolicy): + load_policy_code(user_code.output_policy_type) + + +@migrate(UserCodeV1, UserCode) +def migrate_user_code_to_v2() -> list[Callable]: + return [drop("l0_deny_reason"), drop("_has_output_read_permissions_cache")] diff --git a/packages/syft/src/syft/service/code/user_code_service.py b/packages/syft/src/syft/service/code/user_code_service.py index 0b543ba266d..5ba617ef62e 100644 --- a/packages/syft/src/syft/service/code/user_code_service.py +++ b/packages/syft/src/syft/service/code/user_code_service.py @@ -1,37 +1,30 @@ # stdlib +from enum import Enum from typing import Any from typing import TypeVar -from typing import cast - -# third party -from result import Err -from result import Ok -from result import Result # relative -from ...abstract_node import AbstractNode -from ...abstract_node import NodeType -from ...client.enclave_client import EnclaveClient from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject -from ...types.cache_object import CachedSyftObject +from ...types.errors import SyftException +from ...types.result import Err +from ...types.result import as_result from ...types.twin_object import TwinObject from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_object import ActionObject from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from ..context import AuthedServiceContext -from ..network.routes import route_to_connection from ..output.output_service import ExecutionOutput +from ..policy.policy import InputPolicyValidEnum from ..policy.policy import OutputPolicy from ..request.request import Request from ..request.request import SubmitRequest +from ..request.request import SyncedUserCodeStatusChange from ..request.request import UserCodeStatusChange -from ..request.request_service import RequestService -from ..response import SyftError -from ..response import SyftNotReady from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -44,48 +37,129 @@ from .user_code import SubmitUserCode from .user_code import UserCode from .user_code import UserCodeStatus +from .user_code import UserCodeUpdate +from .user_code import get_code_hash from .user_code import load_approved_policy_code from .user_code_stash import UserCodeStash -@instrument -@serializable() +class HasCodePermissionEnum(str, Enum): + ACCEPTED = "Has permission" + DENIED = "Permission denied" + + +class IsExecutionAllowedEnum(str, Enum): + ALLOWED = "Execution allowed" + NO_PERMISSION = "Execution denied: You do not have permission to execute code" + NOT_APPROVED = "Execution denied: Your code is waiting for approval" + OUTPUT_POLICY_NONE = "Execution denied: Output policy is not set" + INVALID_OUTPUT_POLICY = "Execution denied: Output policy is not valid" + OUTPUT_POLICY_NOT_APPROVED = "Execution denied: Output policy not approved" + + +@serializable(canonical_name="UserCodeService", version=1) class UserCodeService(AbstractService): - store: DocumentStore stash: UserCodeStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = UserCodeStash(store=store) - @service_method(path="code.submit", name="submit", roles=GUEST_ROLE_LEVEL) + @service_method( + path="code.submit", + name="submit", + roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, + ) def submit( - self, context: AuthedServiceContext, code: UserCode | SubmitUserCode - ) -> UserCode | SyftError: + self, context: AuthedServiceContext, code: SubmitUserCode + ) -> SyftSuccess: """Add User Code""" - result = self._submit(context=context, code=code) - if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="User Code Submitted", require_api_update=True) + user_code = self._submit(context, code, exists_ok=False).unwrap() + return SyftSuccess( + message="User Code Submitted", require_api_update=True, value=user_code + ) + @as_result(SyftException) def _submit( - self, context: AuthedServiceContext, code: UserCode | SubmitUserCode - ) -> Result[UserCode, str]: - if not isinstance(code, UserCode): - code = code.to(UserCode, context=context) # type: ignore[unreachable] + self, + context: AuthedServiceContext, + submit_code: SubmitUserCode, + exists_ok: bool = False, + ) -> UserCode: + """ + Submit a UserCode. + + If exists_ok is True, the function will return the existing code if it exists. + + Args: + context (AuthedServiceContext): context + submit_code (SubmitUserCode): UserCode to submit + exists_ok (bool, optional): If True, return the existing code if it exists. + If false, existing codes returns Err. Defaults to False. + + Returns: + Result[UserCode, str]: New UserCode or error + """ + try: + existing_code = self.stash.get_by_code_hash( + context.credentials, + code_hash=get_code_hash(submit_code.code, context.credentials), + ).unwrap() + # no exception, code exists + if exists_ok: + return existing_code + else: + raise SyftException( + public_message="UserCode with this code already exists" + ) + except NotFoundException: + pass - result = self.stash.set(context.credentials, code) - return result + code = submit_code.to(UserCode, context=context) + result = self._post_user_code_transform_ops(context, code) - @service_method(path="code.delete", name="delete", roles=ADMIN_ROLE_LEVEL) - def delete( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - """Delete User Code""" - result = self.stash.delete_by_uid(context.credentials, uid) if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="User Code Deleted") + # if the validation fails, we should remove the user code status + # and code version to prevent dangling status + root_context = AuthedServiceContext( + credentials=context.server.verify_key, server=context.server + ) + + if code.status_link is not None: + _ = context.server.services.user_code_status.remove( + root_context, code.status_link.object_uid + ) + + # result.unwrap() will raise any exceptions from post_user_code_transform_ops + result.unwrap() + + return self.stash.set(context.credentials, code).unwrap() + + @service_method( + path="code.update", + name="update", + roles=ADMIN_ROLE_LEVEL, + autosplat=["code_update"], + unwrap_on_success=False, + ) + def update( + self, + context: AuthedServiceContext, + code_update: UserCodeUpdate, + ) -> SyftSuccess: + updated_code = self.stash.update(context.credentials, code_update).unwrap() + return SyftSuccess(message="UserCode updated successfully", value=updated_code) + + @service_method( + path="code.delete", + name="delete", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def delete(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + """Delete User Code""" + self.stash.delete_by_uid(context.credentials, uid).unwrap() + return SyftSuccess(message=f"User Code {uid} deleted", value=uid) @service_method( path="code.get_by_service_func_name", @@ -94,76 +168,64 @@ def delete( ) def get_by_service_name( self, context: AuthedServiceContext, service_func_name: str - ) -> list[UserCode] | SyftError: - result = self.stash.get_by_service_func_name( + ) -> list[UserCode]: + return self.stash.get_by_service_func_name( context.credentials, service_func_name=service_func_name - ) - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() + ).unwrap() - def _request_code_execution( - self, - context: AuthedServiceContext, - code: SubmitUserCode, - reason: str | None = "", - ) -> Request | SyftError: - user_code: UserCode = code.to(UserCode, context=context) - return self._request_code_execution_inner(context, user_code, reason) - - def _request_code_execution_inner( + # TODO: Add usercode errors + @as_result(SyftException) + def _post_user_code_transform_ops( self, context: AuthedServiceContext, user_code: UserCode, - reason: str | None = "", - ) -> Request | SyftError: + ) -> UserCode: if user_code.output_readers is None: - return SyftError( - message=f"there is no verified output readers for {user_code}" + raise SyftException( + public_message=f"there is no verified output readers for {user_code}" ) if user_code.input_owner_verify_keys is None: - return SyftError( - message=f"there is no verified input owners for {user_code}" + raise SyftException( + public_message=f"there is no verified input owners for {user_code}" ) if not all( x in user_code.input_owner_verify_keys for x in user_code.output_readers ): - raise ValueError("outputs can only be distributed to input owners") - - # check if the code with the same name and content already exists in the stash - - find_results = self.stash.get_by_code_hash( - context.credentials, code_hash=user_code.code_hash - ) - if find_results.is_err(): - return SyftError(message=str(find_results.err())) - find_results = find_results.ok() - - if find_results is not None: - return SyftError( - message="The code to be submitted (name and content) already exists" + raise SyftException( + public_message="outputs can only be distributed to input owners" ) - - context.node = cast(AbstractNode, context.node) - - worker_pool_service = context.node.get_service("SyftWorkerPoolService") - pool_result = worker_pool_service._get_worker_pool( + context.server.services.syft_worker_pool._get_worker_pool( context, pool_name=user_code.worker_pool_name, ) - if isinstance(pool_result, SyftError): - return pool_result + # Create a code history + context.server.services.code_history.submit_version( + context=context, code=user_code + ) - result = self.stash.set(context.credentials, user_code) - if result.is_err(): - return SyftError(message=str(result.err())) + return user_code - # Create a code history - code_history_service = context.node.get_service("codehistoryservice") - result = code_history_service.submit_version(context=context, code=user_code) - if isinstance(result, SyftError): - return result + @as_result(SyftException) + def _request_code_execution( + self, + context: AuthedServiceContext, + user_code: UserCode, + reason: str | None = "", + ) -> Request: + # Cannot make multiple requests for the same code + # FIX: Change requestservice result type + existing_requests = context.server.services.request.get_by_usercode_id( + context, user_code.id + ) + + if len(existing_requests) > 0: + raise SyftException( + public_message=( + f"Request {existing_requests[0].id} already exists for this UserCode." + f" Please use the existing request, or submit a new UserCode to create a new request." + ) + ) # Users that have access to the output also have access to the code item if user_code.output_readers is not None: @@ -174,22 +236,48 @@ def _request_code_execution_inner( ] ) - code_link = LinkedObject.from_obj(user_code, node_uid=context.node.id) + code_link = LinkedObject.from_obj(user_code, server_uid=context.server.id) - CODE_EXECUTE = UserCodeStatusChange( - value=UserCodeStatus.APPROVED, - linked_obj=user_code.status_link, - linked_user_code=code_link, - ) - changes = [CODE_EXECUTE] + # Requests made on low side are synced, and have their status computed instead of set manually. + if user_code.is_l0_deployment: + status_change = SyncedUserCodeStatusChange( + value=UserCodeStatus.APPROVED, + linked_obj=user_code.status_link, + linked_user_code=code_link, + ) + else: + status_change = UserCodeStatusChange( + value=UserCodeStatus.APPROVED, + linked_obj=user_code.status_link, + linked_user_code=code_link, + ) + changes = [status_change] request = SubmitRequest(changes=changes) - method = context.node.get_service_method(RequestService.submit) - result = method(context=context, request=request, reason=reason) + result = context.server.services.request.submit( + context=context, request=request, reason=reason + ) - # The Request service already returns either a SyftSuccess or SyftError return result + @as_result(SyftException, NotFoundException, StashException) + def _get_or_submit_user_code( + self, + context: AuthedServiceContext, + code: SubmitUserCode | UserCode, + ) -> UserCode: + """ + - If the code is a UserCode, check if it exists and return + - If the code is a SubmitUserCode and the same code hash exists, return the existing code + - If the code is a SubmitUserCode and the code hash does not exist, submit the code + """ + if isinstance(code, UserCode): + return self.stash.get_by_uid(context.credentials, code.id).unwrap() + else: # code: SubmitUserCode + # Submit new UserCode, or get existing UserCode with the same code hash + # TODO: Why is this tagged as unreachable? + return self._submit(context, code, exists_ok=True).unwrap() # type: ignore[unreachable] + @service_method( path="code.request_code_execution", name="request_code_execution", @@ -198,163 +286,101 @@ def _request_code_execution_inner( def request_code_execution( self, context: AuthedServiceContext, - code: SubmitUserCode, + code: SubmitUserCode | UserCode, reason: str | None = "", - ) -> SyftSuccess | SyftError: + ) -> Request: """Request Code execution on user code""" - return self._request_code_execution(context=context, code=code, reason=reason) + user_code = self._get_or_submit_user_code(context, code).unwrap() + + result = self._request_code_execution( + context, + user_code, + reason, + ).unwrap() + + return result @service_method(path="code.get_all", name="get_all", roles=GUEST_ROLE_LEVEL) - def get_all(self, context: AuthedServiceContext) -> list[UserCode] | SyftError: + def get_all(self, context: AuthedServiceContext) -> list[UserCode]: """Get a Dataset""" - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + return self.stash.get_all(context.credentials).unwrap() @service_method( path="code.get_by_id", name="get_by_id", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def get_by_uid( - self, context: AuthedServiceContext, uid: UID - ) -> UserCode | SyftError: + def get_by_uid(self, context: AuthedServiceContext, uid: UID) -> UserCode: """Get a User Code Item""" - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - user_code = result.ok() - if user_code and user_code.input_policy_state and context.node is not None: - # TODO replace with LinkedObject Context - user_code.node_uid = context.node.id - return user_code - return SyftError(message=result.err()) - - @service_method(path="code.get_all_for_user", name="get_all_for_user") - def get_all_for_user( - self, context: AuthedServiceContext - ) -> SyftSuccess | SyftError: + user_code = self.stash.get_by_uid(context.credentials, uid=uid).unwrap() + if user_code and user_code.input_policy_state and context.server is not None: + # TODO replace with LinkedObject Context + user_code.server_uid = context.server.id + return user_code + + @service_method( + path="code.get_all_for_user", + name="get_all_for_user", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def get_all_for_user(self, context: AuthedServiceContext) -> list[UserCode]: """Get All User Code Items for User's VerifyKey""" # TODO: replace with incoming user context and key - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + return self.stash.get_all(context.credentials).unwrap() def update_code_state( self, context: AuthedServiceContext, code_item: UserCode - ) -> SyftSuccess | SyftError: - result = self.stash.update(context.credentials, code_item) - if result.is_ok(): - return SyftSuccess(message="Code State Updated") - return SyftError(message="Unable to Update Code State") + ) -> UserCode: + context = context.as_root_context() + return self.stash.update(context.credentials, code_item).unwrap() + @as_result(SyftException) def load_user_code(self, context: AuthedServiceContext) -> None: - result = self.stash.get_all(credentials=context.credentials) - if result.is_ok(): - user_code_items = result.ok() - load_approved_policy_code(user_code_items=user_code_items, context=context) - - @service_method(path="code.get_results", name="get_results", roles=GUEST_ROLE_LEVEL) - def get_results( - self, context: AuthedServiceContext, inp: UID | UserCode - ) -> list[UserCode] | SyftError: - context.node = cast(AbstractNode, context.node) - uid = inp.id if isinstance(inp, UserCode) else inp - code_result = self.stash.get_by_uid(context.credentials, uid=uid) - - if code_result.is_err(): - return SyftError(message=code_result.err()) - code = code_result.ok() - - if code.is_enclave_code: - # if the current node is not the enclave - if not context.node.node_type == NodeType.ENCLAVE: - connection = route_to_connection(code.enclave_metadata.route) - enclave_client = EnclaveClient( - connection=connection, - credentials=context.node.signing_key, - ) - if enclave_client.code is None: - return SyftError( - message=f"{enclave_client} can't access the user code api" - ) - outputs = enclave_client.code.get_results(code.id) - if isinstance(outputs, list): - for output in outputs: - output.syft_action_data # noqa: B018 - else: - outputs.syft_action_data # noqa: B018 - return outputs - - # if the current node is the enclave - else: - if not code.get_status(context.as_root_context()).approved: - return code.status.get_status_message() - - output_history = code.get_output_history( - context=context.as_root_context() - ) - if isinstance(output_history, SyftError): - return output_history - - if len(output_history) > 0: - res = resolve_outputs( - context=context, - output_ids=output_history[-1].output_ids, - ) - if res.is_err(): - return res - res = delist_if_single(res.ok()) - return Ok(res) - else: - return SyftError(message="No results available") - else: - return SyftError(message="Endpoint only supported for enclave code") + user_code_items = self.stash.get_all(credentials=context.credentials).unwrap() + load_approved_policy_code(user_code_items=user_code_items, context=context) + # FIX: Exceptions etc def is_execution_allowed( self, code: UserCode, context: AuthedServiceContext, output_policy: OutputPolicy | None, - ) -> bool | SyftSuccess | SyftError | SyftNotReady: - if not code.get_status(context).approved: - return code.status.get_status_message() - # Check if the user has permission to execute the code. - elif not (has_code_permission := self.has_code_permission(code, context)): - return has_code_permission + ) -> IsExecutionAllowedEnum: + status = code.get_status(context).unwrap() + if not status.get_is_approved(context): + return IsExecutionAllowedEnum.NOT_APPROVED + elif self.has_code_permission(code, context) is HasCodePermissionEnum.DENIED: + # TODO: Check enum above + return IsExecutionAllowedEnum.NO_PERMISSION elif not code.is_output_policy_approved(context): - return SyftError("Output policy not approved", code) + return IsExecutionAllowedEnum.OUTPUT_POLICY_NOT_APPROVED - policy_is_valid = output_policy is not None and output_policy._is_valid(context) - if not policy_is_valid: - return policy_is_valid - else: - return True + if output_policy is None: + return IsExecutionAllowedEnum.OUTPUT_POLICY_NONE - def is_execution_on_owned_args_allowed( - self, context: AuthedServiceContext - ) -> bool | SyftError: + try: + output_policy.is_valid(context) + except Exception: + return IsExecutionAllowedEnum.INVALID_OUTPUT_POLICY + + return IsExecutionAllowedEnum.ALLOWED + + def is_execution_on_owned_args_allowed(self, context: AuthedServiceContext) -> bool: if context.role == ServiceRole.ADMIN: return True - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - current_user = user_service.get_current_user(context=context) + current_user = context.server.services.user.get_current_user(context=context) return current_user.mock_execution_permission def keep_owned_kwargs( self, kwargs: dict[str, Any], context: AuthedServiceContext - ) -> dict[str, Any] | SyftError: + ) -> dict[str, Any]: """Return only the kwargs that are owned by the user""" - context.node = cast(AbstractNode, context.node) - - action_service = context.node.get_service("actionservice") - mock_kwargs = {} for k, v in kwargs.items(): if isinstance(v, UID): # Jobs have UID kwargs instead of ActionObject - v = action_service.get(context, uid=v) - if v.is_ok(): - v = v.ok() + try: + v = context.server.services.action.get(context, uid=v) + except Exception: # nosec: we are skipping when dont find it + pass if ( isinstance(v, ActionObject) and v.syft_client_verify_key == context.credentials @@ -363,234 +389,290 @@ def keep_owned_kwargs( return mock_kwargs def is_execution_on_owned_args( - self, kwargs: dict[str, Any], context: AuthedServiceContext + self, + context: AuthedServiceContext, + user_code_id: UID, + passed_kwargs: dict[str, Any], ) -> bool: - return len(self.keep_owned_kwargs(kwargs, context)) == len(kwargs) + # Check if all kwargs are owned by the user + all_kwargs_are_owned = len( + self.keep_owned_kwargs(passed_kwargs, context) + ) == len(passed_kwargs) + + if not all_kwargs_are_owned: + return False + + # Check if the kwargs match the code signature + try: + code = self.stash.get_by_uid(context.credentials, user_code_id).unwrap() + except SyftException: + return False + + # Skip the datasite and context kwargs, they are passed by the backend + code_kwargs = set(code.signature.parameters.keys()) - {"datasite", "context"} + + passed_kwarg_keys = set(passed_kwargs.keys()) + return passed_kwarg_keys == code_kwargs @service_method(path="code.call", name="call", roles=GUEST_ROLE_LEVEL) def call( self, context: AuthedServiceContext, uid: UID, **kwargs: Any - ) -> CachedSyftObject | ActionObject | SyftSuccess | SyftError: + ) -> ActionObject: """Call a User Code Function""" kwargs.pop("result_id", None) - result = self._call(context, uid, **kwargs) - if result.is_err(): - return SyftError(message=result.err()) + return self._call(context, uid, **kwargs).unwrap() + + def valid_worker_pool_for_context( + self, context: AuthedServiceContext, user_code: UserCode + ) -> bool: + """This is a temporary fix that is needed until every function is always just ran as job""" + # relative + from ...server.server import get_default_worker_pool_name + + has_custom_worker_pool = ( + user_code.worker_pool_name is not None + ) and user_code.worker_pool_name != get_default_worker_pool_name() + if has_custom_worker_pool and context.is_blocking_api_call: + return False else: - return result.ok() + return True + @as_result(SyftException) def _call( self, context: AuthedServiceContext, uid: UID, result_id: UID | None = None, **kwargs: Any, - ) -> Result[ActionObject, Err]: + ) -> ActionObject: """Call a User Code Function""" - try: - code_result = self.stash.get_by_uid(context.credentials, uid=uid) - if code_result.is_err(): - return code_result - code: UserCode = code_result.ok() - - # Set Permissions - if self.is_execution_on_owned_args(kwargs, context): - if self.is_execution_on_owned_args_allowed(context): - context.has_execute_permissions = True - else: - return Err( - "You do not have the permissions for mock execution, please contact the admin" - ) - override_execution_permission = ( - context.has_execute_permissions or context.role == ServiceRole.ADMIN + code: UserCode = self.stash.get_by_uid(context.credentials, uid=uid).unwrap() + + # Set Permissions + if self.is_execution_on_owned_args(context, uid, kwargs): + if self.is_execution_on_owned_args_allowed(context): + # handles the case: if we have 1 or more owned args and execution permission + # handles the case: if we have 0 owned args and execution permission + context.has_execute_permissions = True + elif len(kwargs) == 0: + # handles the case: if we have 0 owned args and execution permission + pass + else: + raise SyftException( + public_message="You do not have the permissions for mock execution, please contact the admin" + ) + + override_execution_permission = ( + context.has_execute_permissions or context.role == ServiceRole.ADMIN + ) + + # Override permissions bypasses the cache, since we do not check in/out policies + skip_fill_cache = override_execution_permission + # We do not read from output policy cache if there are mock arguments + skip_read_cache = len(self.keep_owned_kwargs(kwargs, context)) > 0 + + # Extract ids from kwargs + kwarg2id = map_kwargs_to_id(kwargs) + + input_policy = code.get_input_policy(context) + output_policy = code.get_output_policy(context) + + # Check output policy + if not override_execution_permission: + output_history = code.get_output_history(context=context).unwrap() + + is_execution_allowed = self.is_execution_allowed( + code=code, + context=context, + output_policy=output_policy, ) - # Override permissions bypasses the cache, since we do not check in/out policies - skip_fill_cache = override_execution_permission - # We do not read from output policy cache if there are mock arguments - skip_read_cache = len(self.keep_owned_kwargs(kwargs, context)) > 0 - - # Extract ids from kwargs - kwarg2id = map_kwargs_to_id(kwargs) - - input_policy = code.get_input_policy(context) - - # Check output policy - output_policy = code.get_output_policy(context) - if not override_execution_permission: - output_history = code.get_output_history(context=context) - if isinstance(output_history, SyftError): - return Err(output_history.message) - can_execute = self.is_execution_allowed( - code=code, - context=context, - output_policy=output_policy, - ) - if not can_execute: - if not code.is_output_policy_approved(context): - return Err( - "Execution denied: Your code is waiting for approval" - ) - if not (is_valid := output_policy._is_valid(context)): # type: ignore - if len(output_history) > 0 and not skip_read_cache: - last_executed_output = output_history[-1] - # Check if the inputs of the last executed output match - # against the current input - if ( - input_policy is not None - and not last_executed_output.check_input_ids( - kwargs=kwarg2id - ) - ): - inp_policy_validation = input_policy._is_valid( - context, - usr_input_kwargs=kwarg2id, - code_item_id=code.id, - ) - if inp_policy_validation.is_err(): - return inp_policy_validation + if ( + is_execution_allowed is not IsExecutionAllowedEnum.ALLOWED + or context.is_l0_lowside + ): + # We check output policy only in l2 deployment. + # code is from low side (L0 setup) + status = code.get_status(context).unwrap() + + if ( + context.server_allows_execution_for_ds + and not status.get_is_approved(context) + ): + raise SyftException( + public_message=status.get_status_message_l2(context) + ) - result: Result[ActionObject, str] = resolve_outputs( - context=context, - output_ids=last_executed_output.output_ids, + output_policy_is_valid = False + try: + if output_policy: + output_policy_is_valid = output_policy.is_valid(context) + except SyftException: + pass + + # if you cant run it or the results are being sycned from l0 + # lets have a look at the output history and possibly return that + if not output_policy_is_valid or code.is_l0_deployment: + if len(output_history) > 0 and not skip_read_cache: + last_executed_output = output_history[-1] + # Check if the inputs of the last executed output match + # against the current input + if ( + input_policy is not None + and not last_executed_output.check_input_ids( + kwargs=kwarg2id ) - if result.is_err(): - return result - - res = delist_if_single(result.ok()) - return Ok( - CachedSyftObject( - result=res, - error_msg=is_valid.message, - ) + ): + inp_policy_validation = input_policy.is_valid( + context, + usr_input_kwargs=kwarg2id, ) - else: - return cast(Err, is_valid.to_result()) - return can_execute.to_result() # type: ignore - # Execute the code item - context.node = cast(AbstractNode, context.node) + if not inp_policy_validation: + raise SyftException( + # TODO: Print what's inside + public_message=InputPolicyValidEnum.INVALID + ) - action_service = context.node.get_service("actionservice") + outputs = resolve_outputs( + context=context, + output_ids=last_executed_output.output_ids, + ).unwrap() - result_action_object: Result[ActionObject | TwinObject, str] = ( - action_service._user_code_execute( - context, code, kwarg2id, result_id=result_id - ) - ) - if result_action_object.is_err(): - return result_action_object - else: - result_action_object = result_action_object.ok() + if outputs: + outputs = delist_if_single(outputs) - output_result = action_service.set_result_to_store( - result_action_object, context, code.get_output_policy(context) - ) - - if output_result.is_err(): - return output_result - result = output_result.ok() + if code.is_l2_deployment: + # Skip output policy warning in L0 setup; + # admin overrides policy checks. + output_policy_message = ( + "Your result has been fetched from output_history, " + "because your OutputPolicy is no longer valid." + ) + context.add_warning(output_policy_message) + return outputs # type: ignore - # Apply Output Policy to the results and update the OutputPolicyState + raise SyftException(public_message=is_execution_allowed.value) - # this currently only works for nested syft_functions - # and admins executing on high side (TODO, decide if we want to increment counter) - if not skip_fill_cache and output_policy is not None: - res = code.apply_output( - context=context, - outputs=result, - job_id=context.job_id, - input_ids=kwarg2id, - ) - if isinstance(res, SyftError): - return Err(res.message) - has_result_read_permission = context.extra_kwargs.get( - "has_result_read_permission", False + # Execute the code item + if not self.valid_worker_pool_for_context(context, code): + raise SyftException( + public_message="You tried to run a syft function attached to a worker pool in blocking mode," + "which is currently not supported. Run your function with `blocking=False` to run" + " as a job on your worker pool." ) - if isinstance(result, TwinObject): - if has_result_read_permission: - return Ok(result.private) - else: - return Ok(result.mock) - elif result.is_mock: - return Ok(result) - elif result.syft_action_data_type is Err: - # result contains the error but the request was handled correctly - return result.syft_action_data - elif has_result_read_permission: - return Ok(result) - else: - return Ok(result.as_empty()) - except Exception as e: - # stdlib - import traceback + action_obj = context.server.services.action._user_code_execute( + context, code, kwarg2id, result_id + ).unwrap() + + result = context.server.services.action.set_result_to_store( + action_obj, context, code.get_output_policy(context) + ).unwrap() + + # Apply Output Policy to the results and update the OutputPolicyState + + # this currently only works for nested syft_functions + # and admins executing on high side (TODO, decide if we want to increment counter) + # always store_execution_output on l0 setup + is_l0_request = context.role == ServiceRole.ADMIN and code.is_l0_deployment + + if not skip_fill_cache and output_policy is not None or is_l0_request: + code.store_execution_output( + context=context, + outputs=result, + job_id=context.job_id, + input_ids=kwarg2id, + ).unwrap() + + has_result_read_permission = context.extra_kwargs.get( + "has_result_read_permission", False + ) - return Err(value=f"Failed to run. {e}, {traceback.format_exc()}") + # TODO: Just to fix the issue with the current implementation + if context.role == ServiceRole.ADMIN: + has_result_read_permission = True + + if isinstance(result, TwinObject): + if has_result_read_permission: + return result.private + else: + return result.mock + elif result.is_mock: # type: ignore[unreachable] + return result # type: ignore[return-value] + # TODO: Check this part after error handling PR + elif result.syft_action_data_type is Err: + # result contains the error but the request was handled correctly + return result + elif has_result_read_permission: + return result + else: + return result.as_empty() def has_code_permission( self, code_item: UserCode, context: AuthedServiceContext - ) -> SyftSuccess | SyftError: - context.node = cast(AbstractNode, context.node) + ) -> HasCodePermissionEnum: if not ( - context.credentials == context.node.verify_key + context.credentials == context.server.verify_key or context.credentials == code_item.user_verify_key ): - return SyftError( - message=f"Code Execution Permission: {context.credentials} denied" - ) - return SyftSuccess(message="you have permission") + return HasCodePermissionEnum.DENIED + return HasCodePermissionEnum.ACCEPTED @service_method( - path="code.apply_output", name="apply_output", roles=GUEST_ROLE_LEVEL + path="code.store_execution_output", + name="store_execution_output", + roles=GUEST_ROLE_LEVEL, ) - def apply_output( + def store_execution_output( self, context: AuthedServiceContext, user_code_id: UID, outputs: Any, input_ids: dict[str, UID] | None = None, job_id: UID | None = None, - ) -> ExecutionOutput | SyftError: - code_result = self.stash.get_by_uid(context.credentials, user_code_id) - if code_result.is_err(): - return SyftError(message=code_result.err()) + ) -> ExecutionOutput: + code: UserCode = self.stash.get_by_uid( + context.credentials, user_code_id + ).unwrap() - code: UserCode = code_result.ok() - if not code.get_status(context).approved: - return SyftError(message="Code is not approved") + is_admin = context.role == ServiceRole.ADMIN - res = code.apply_output( + if ( + not code.get_status(context).unwrap().get_is_approved(context) + and not is_admin + ): + raise SyftException(public_message="This UserCode is not approved") + + return code.store_execution_output( context=context, outputs=outputs, job_id=job_id, input_ids=input_ids, - ) - return res + ).unwrap() +@as_result(SyftException) def resolve_outputs( context: AuthedServiceContext, output_ids: list[UID], -) -> Result[list[ActionObject], str]: +) -> list[ActionObject] | None: # relative from ...service.action.action_object import TwinMode if isinstance(output_ids, list): if len(output_ids) == 0: return None + outputs = [] for output_id in output_ids: - if context.node is not None: - action_service = context.node.get_service("actionservice") - result = action_service.get( + if context.server is not None: + output = context.server.services.action.get( context, uid=output_id, twin_mode=TwinMode.PRIVATE ) - if result.is_err(): - return result - outputs.append(result.ok()) - return Ok(outputs) + outputs.append(output) + return outputs else: - raise NotImplementedError + raise SyftException(public_message="Cannot resolve type of output_ids") T = TypeVar("T") diff --git a/packages/syft/src/syft/service/code/user_code_stash.py b/packages/syft/src/syft/service/code/user_code_stash.py index fa9fad49b82..4ba67e3633b 100644 --- a/packages/syft/src/syft/service/code/user_code_stash.py +++ b/packages/syft/src/syft/service/code/user_code_stash.py @@ -1,50 +1,27 @@ -# stdlib - -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...util.telemetry import instrument -from .user_code import CodeHashPartitionKey -from .user_code import ServiceFuncNamePartitionKey -from .user_code import SubmitTimePartitionKey +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from .user_code import UserCode -from .user_code import UserVerifyKeyPartitionKey - - -@instrument -@serializable() -class UserCodeStash(BaseUIDStoreStash): - object_type = UserCode - settings: PartitionSettings = PartitionSettings( - name=UserCode.__canonical_name__, object_type=UserCode - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - def get_all_by_user_verify_key( - self, credentials: SyftVerifyKey, user_verify_key: SyftVerifyKey - ) -> Result[list[UserCode], str]: - qks = QueryKeys(qks=[UserVerifyKeyPartitionKey.with_obj(user_verify_key)]) - return self.query_one(credentials=credentials, qks=qks) - def get_by_code_hash( - self, credentials: SyftVerifyKey, code_hash: str - ) -> Result[UserCode | None, str]: - qks = QueryKeys(qks=[CodeHashPartitionKey.with_obj(code_hash)]) - return self.query_one(credentials=credentials, qks=qks) +@serializable(canonical_name="UserCodeSQLStash", version=1) +class UserCodeStash(ObjectStash[UserCode]): + @as_result(StashException, NotFoundException) + def get_by_code_hash(self, credentials: SyftVerifyKey, code_hash: str) -> UserCode: + return self.get_one( + credentials=credentials, + filters={"code_hash": code_hash}, + ).unwrap() + @as_result(StashException) def get_by_service_func_name( self, credentials: SyftVerifyKey, service_func_name: str - ) -> Result[list[UserCode], str]: - qks = QueryKeys(qks=[ServiceFuncNamePartitionKey.with_obj(service_func_name)]) - return self.query_all( - credentials=credentials, qks=qks, order_by=SubmitTimePartitionKey - ) + ) -> list[UserCode]: + return self.get_all( + credentials=credentials, + filters={"service_func_name": service_func_name}, + ).unwrap() diff --git a/packages/syft/src/syft/service/code/utils.py b/packages/syft/src/syft/service/code/utils.py index fccc5314c43..1b26d44e054 100644 --- a/packages/syft/src/syft/service/code/utils.py +++ b/packages/syft/src/syft/service/code/utils.py @@ -6,17 +6,20 @@ from IPython import get_ipython # relative +from ...types.errors import SyftException +from ..response import SyftWarning +from .code_parse import GlobalsVisitor from .code_parse import LaunchJobVisitor def submit_subjobs_code(submit_user_code, ep_client) -> None: # type: ignore # TODO: fix the mypy issue. Reason: circular import # We are exploring the source code to automatically upload - # subjobs in the ephemeral node + # subjobs in the ephemeral server # Usually, a DS would manually submit the code for subjobs, - # but because we dont allow them to interact with the ephemeral node + # but because we dont allow them to interact with the ephemeral server # that would not be possible - if "domain" in submit_user_code.input_kwargs: + if "datasite" in submit_user_code.input_kwargs: tree = ast.parse(inspect.getsource(submit_user_code.local_function)) v = LaunchJobVisitor() v.visit(tree) @@ -36,3 +39,28 @@ def submit_subjobs_code(submit_user_code, ep_client) -> None: # type: ignore # fetch if specs["type_name"] == "SubmitUserCode": ep_client.code.submit(ipython.ev(call)) + + +def check_for_global_vars(code_tree: ast.Module) -> GlobalsVisitor | SyftWarning: + """ + Check that the code does not contain any global variables + """ + v = GlobalsVisitor() + try: + v.visit(code_tree) + except Exception: + raise SyftException( + public_message="Your code contains (a) global variable(s), which is not allowed" + ) + return v + + +def parse_code(raw_code: str) -> ast.Module | SyftWarning: + """ + Parse the code into an AST tree and return a warning if there are syntax errors + """ + try: + tree = ast.parse(raw_code) + except SyntaxError as e: + raise SyftException(public_message=f"Your code contains syntax error: {e}") + return tree diff --git a/packages/syft/src/syft/service/code_history/code_history.py b/packages/syft/src/syft/service/code_history/code_history.py index b4a44911868..2f38fc21493 100644 --- a/packages/syft/src/syft/service/code_history/code_history.py +++ b/packages/syft/src/syft/service/code_history/code_history.py @@ -3,30 +3,29 @@ from typing import Any # relative -from ...client.api import APIRegistry -from ...client.enclave_client import EnclaveMetadata from ...serde.serializable import serializable from ...service.user.user_roles import ServiceRole -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.errors import SyftException +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject from ...types.syft_object import SyftVerifyKey -from ...types.syft_object import get_repr_values_table from ...types.uid import UID -from ...util.notebook_ui.notebook_addons import create_table_template +from ...util.notebook_ui.components.tabulator_template import ( + build_tabulator_table_with_data, +) +from ...util.table import prepare_table_data from ..code.user_code import UserCode -from ..response import SyftError @serializable() class CodeHistory(SyftObject): # version __canonical_name__ = "CodeHistory" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID - node_uid: UID + server_uid: UID user_verify_key: SyftVerifyKey - enclave_metadata: EnclaveMetadata | None = None user_code_history: list[UID] = [] service_func_name: str comment_history: list[str] = [] @@ -44,7 +43,7 @@ def add_code(self, code: UserCode, comment: str | None = None) -> None: class CodeHistoryView(SyftObject): # version __canonical_name__ = "CodeHistoryView" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID user_code_history: list[UserCode] = [] @@ -54,8 +53,9 @@ class CodeHistoryView(SyftObject): def _coll_repr_(self) -> dict[str, int]: return {"Number of versions": len(self.user_code_history)} - def _repr_html_(self) -> str: - rows = get_repr_values_table(self.user_code_history, True) + def _repr_html_(self) -> str | None: + rows, metadata = prepare_table_data(self.user_code_history) + for i, r in enumerate(rows): r["Version"] = f"v{i}" raw_code = self.user_code_history[i].raw_code @@ -63,20 +63,23 @@ def _repr_html_(self) -> str: if n_code_lines > 5: raw_code = "\n".join(raw_code.split("\n", 5)) r["Code"] = raw_code - # rows = sorted(rows, key=lambda x: x["Version"]) - return create_table_template(rows, "CodeHistory", table_icon=None) - def __getitem__(self, index: int | str) -> UserCode | SyftError: + metadata["name"] = "Code History" + metadata["columns"] += ["Version", "Code"] + + return build_tabulator_table_with_data(rows, metadata) + + def __getitem__(self, index: int | str) -> UserCode: if isinstance(index, str): raise TypeError(f"index {index} must be an integer, not a string") - api = APIRegistry.api_for(self.syft_node_location, self.syft_client_verify_key) - if api is None: - return SyftError( - message=f"Can't access the api. You must login to {self.node_uid}" - ) - if api.user_role.value >= ServiceRole.DATA_OWNER.value and index < 0: - return SyftError( - message="For security concerns we do not allow negative indexing. \ + api = self.get_api() + if ( + api.user.get_current_user().role.value >= ServiceRole.DATA_OWNER.value + and index < 0 + ): + # negative index would dynamically resolve to a different version + raise SyftException( + public_message="For security concerns we do not allow negative indexing. \ Try using absolute values when indexing" ) return self.user_code_history[index] @@ -86,7 +89,7 @@ def __getitem__(self, index: int | str) -> UserCode | SyftError: class CodeHistoriesDict(SyftObject): # version __canonical_name__ = "CodeHistoriesDict" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID code_versions: dict[str, CodeHistoryView] = {} @@ -115,10 +118,10 @@ def __getattr__(self, name: str) -> Any: class UsersCodeHistoriesDict(SyftObject): # version __canonical_name__ = "UsersCodeHistoriesDict" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID - node_uid: UID + server_uid: UID user_dict: dict[str, list[str]] = {} __repr_attrs__ = ["available_keys"] @@ -127,16 +130,17 @@ class UsersCodeHistoriesDict(SyftObject): def available_keys(self) -> str: return json.dumps(self.user_dict, sort_keys=True, indent=4) - def __getitem__(self, key: str | int) -> CodeHistoriesDict | SyftError: - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) - if api is None: - return SyftError( - message=f"Can't access the api. You must login to {self.node_uid}" - ) - return api.services.code_history.get_history_for_user(key) - - def _repr_html_(self) -> str: - rows = [] - for user, funcs in self.user_dict.items(): - rows += [{"user": user, "UserCodes": funcs}] - return create_table_template(rows, "UserCodeHistory", table_icon=None) + def __getitem__(self, key: str | int) -> CodeHistoriesDict: + return self.get_api().services.code_history.get_history_for_user(key) + + def _repr_html_(self) -> str | None: + rows = [ + {"User": user, "UserCodes": ", ".join(funcs)} + for user, funcs in self.user_dict.items() + ] + metadata = { + "name": "UserCode Histories", + "columns": ["User", "UserCodes"], + "icon": None, + } + return build_tabulator_table_with_data(rows, metadata) diff --git a/packages/syft/src/syft/service/code_history/code_history_service.py b/packages/syft/src/syft/service/code_history/code_history_service.py index ba751467aa9..ff2967f169b 100644 --- a/packages/syft/src/syft/service/code_history/code_history_service.py +++ b/packages/syft/src/syft/service/code_history/code_history_service.py @@ -1,22 +1,20 @@ # stdlib -from typing import cast # relative -from ...abstract_node import AbstractNode -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException from ...types.uid import UID -from ...util.telemetry import instrument from ..code.user_code import SubmitUserCode from ..code.user_code import UserCode from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import service_method from ..user.user_roles import DATA_OWNER_ROLE_LEVEL from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL +from ..user.user_roles import ServiceRole from .code_history import CodeHistoriesDict from .code_history import CodeHistory from .code_history import CodeHistoryView @@ -24,131 +22,99 @@ from .code_history_stash import CodeHistoryStash -@instrument -@serializable() +@serializable(canonical_name="CodeHistoryService", version=1) class CodeHistoryService(AbstractService): - store: DocumentStore stash: CodeHistoryStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = CodeHistoryStash(store=store) @service_method( path="code_history.submit_version", name="submit_version", roles=DATA_SCIENTIST_ROLE_LEVEL, + unwrap_on_success=False, ) def submit_version( self, context: AuthedServiceContext, code: SubmitUserCode | UserCode, comment: str | None = None, - ) -> SyftSuccess | SyftError: - context.node = cast(AbstractNode, context.node) - user_code_service = context.node.get_service("usercodeservice") + ) -> SyftSuccess: if isinstance(code, SubmitUserCode): - result = user_code_service._submit(context=context, code=code) - if result.is_err(): - return SyftError(message=str(result.err())) - code = result.ok() - elif isinstance(code, UserCode): # type: ignore[unreachable] - result = user_code_service.get_by_uid(context=context, uid=code.id) - if isinstance(result, SyftError): - return result - code = result - - result = self.stash.get_by_service_func_name_and_verify_key( - credentials=context.credentials, - service_func_name=code.service_func_name, - user_verify_key=context.credentials, - ) - - if result.is_err(): - return SyftError(message=result.err()) - - code_history: CodeHistory | None = result.ok() + code = context.server.services.user_code._submit( + context=context, submit_code=code + ).unwrap() - if code_history is None: + try: + code_history = self.stash.get_by_service_func_name_and_verify_key( + credentials=context.credentials, + service_func_name=code.service_func_name, + user_verify_key=context.credentials, + ).unwrap() + except NotFoundException: code_history = CodeHistory( id=UID(), - node_uid=context.node.id, + server_uid=context.server.id, user_verify_key=context.credentials, service_func_name=code.service_func_name, ) - result = self.stash.set(credentials=context.credentials, obj=code_history) - if result.is_err(): - return SyftError(message=result.err()) + self.stash.set(credentials=context.credentials, obj=code_history).unwrap() code_history.add_code(code=code, comment=comment) - result = self.stash.update(credentials=context.credentials, obj=code_history) - if result.is_err(): - return SyftError(message=result.err()) - - return SyftSuccess(message="Code version submit success") + res = self.stash.update( + credentials=context.credentials, obj=code_history + ).unwrap() + return SyftSuccess(message="Code version submit success", value=res) @service_method( path="code_history.get_all", name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def get_all(self, context: AuthedServiceContext) -> list[CodeHistory] | SyftError: + def get_all(self, context: AuthedServiceContext) -> list[CodeHistory]: """Get a Dataset""" - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + return self.stash.get_all(context.credentials).unwrap() @service_method( path="code_history.get", name="get", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def get_code_by_uid( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: + def get_code_by_uid(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: """Get a User Code Item""" - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - code_history = result.ok() - return code_history - return SyftError(message=result.err()) - - @service_method(path="code_history.delete", name="delete") - def delete( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.delete_by_uid(context.credentials, uid) - if result.is_ok(): - return result.ok() - else: - return SyftError(message=result.err()) + return self.stash.get_by_uid(context.credentials, uid=uid).unwrap() + + @service_method(path="code_history.delete", name="delete", unwrap_on_success=False) + def delete(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + res = self.stash.delete_by_uid(context.credentials, uid).unwrap() + return SyftSuccess(message="Succesfully deleted", value=res) def fetch_histories_for_user( self, context: AuthedServiceContext, user_verify_key: SyftVerifyKey - ) -> CodeHistoriesDict | SyftError: - result = self.stash.get_by_verify_key( - credentials=context.credentials, user_verify_key=user_verify_key - ) - context.node = cast(AbstractNode, context.node) - user_code_service = context.node.get_service("usercodeservice") - - def get_code(uid: UID) -> UserCode | SyftError: - return user_code_service.get_by_uid(context=context, uid=uid) - - if result.is_ok(): - code_histories = result.ok() - code_versions_dict = {} - - for code_history in code_histories: - user_code_list = [] - for uid in code_history.user_code_history: - user_code_list.append(get_code(uid)) - code_versions = CodeHistoryView( - user_code_history=user_code_list, - service_func_name=code_history.service_func_name, - comment_history=code_history.comment_history, - ) - code_versions_dict[code_history.service_func_name] = code_versions - return CodeHistoriesDict(code_versions=code_versions_dict) + ) -> CodeHistoriesDict: + if context.role in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN]: + code_histories = self.stash.get_by_verify_key( + credentials=context.server.verify_key, user_verify_key=user_verify_key + ).unwrap() else: - return SyftError(message=result.err()) + code_histories = self.stash.get_by_verify_key( + credentials=context.credentials, user_verify_key=user_verify_key + ).unwrap() + + def get_code(uid: UID) -> UserCode: + return context.server.services.user_code.stash.get_by_uid( + credentials=context.server.verify_key, + uid=uid, + ).unwrap() + + code_versions_dict = {} + + for code_history in code_histories: + user_code_list = [get_code(uid) for uid in code_history.user_code_history] + code_versions = CodeHistoryView( + user_code_history=user_code_list, + service_func_name=code_history.service_func_name, + comment_history=code_history.comment_history, + ) + code_versions_dict[code_history.service_func_name] = code_versions + return CodeHistoriesDict(code_versions=code_versions_dict) @service_method( path="code_history.get_history", @@ -157,7 +123,7 @@ def get_code(uid: UID) -> UserCode | SyftError: ) def get_histories_for_current_user( self, context: AuthedServiceContext - ) -> CodeHistoriesDict | SyftError: + ) -> CodeHistoriesDict: return self.fetch_histories_for_user( context=context, user_verify_key=context.credentials ) @@ -169,18 +135,13 @@ def get_histories_for_current_user( ) def get_history_for_user( self, context: AuthedServiceContext, email: str - ) -> CodeHistoriesDict | SyftError: - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - result = user_service.stash.get_by_email( + ) -> CodeHistoriesDict: + user = context.server.services.user.stash.get_by_email( credentials=context.credentials, email=email + ).unwrap() + return self.fetch_histories_for_user( + context=context, user_verify_key=user.verify_key ) - if result.is_ok(): - user = result.ok() - return self.fetch_histories_for_user( - context=context, user_verify_key=user.verify_key - ) - return SyftError(message=result.err()) @service_method( path="code_history.get_histories", @@ -189,20 +150,16 @@ def get_history_for_user( ) def get_histories_group_by_user( self, context: AuthedServiceContext - ) -> UsersCodeHistoriesDict | SyftError: - result = self.stash.get_all(credentials=context.credentials) - if result.is_err(): - return SyftError(message=result.err()) - code_histories: list[CodeHistory] = result.ok() - - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - result = user_service.stash.get_all(context.credentials) - if result.is_err(): - return SyftError(message=result.err()) - users = result.ok() + ) -> UsersCodeHistoriesDict: + if context.role in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN]: + code_histories = self.stash.get_all( + context.credentials, has_permission=True + ).unwrap() + else: + code_histories = self.stash.get_all(context.credentials).unwrap() - user_code_histories = UsersCodeHistoriesDict(node_uid=context.node.id) + users = context.server.services.user.stash.get_all(context.credentials).unwrap() + user_code_histories = UsersCodeHistoriesDict(server_uid=context.server.id) verify_key_2_user_email = {} for user in users: @@ -227,23 +184,16 @@ def get_by_func_name_and_user_email( service_func_name: str, user_email: str, user_id: UID, - ) -> list[CodeHistory] | SyftError: - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - user_verify_key = user_service.user_verify_key(user_email) + ) -> list[CodeHistory]: + user_verify_key = context.server.services.user.user_verify_key(user_email) - if isinstance(user_verify_key, SyftError): - return user_verify_key - - kwargs = { + filters = { "id": user_id, "email": user_email, "verify_key": user_verify_key, "service_func_name": service_func_name, } - result = self.stash.find_all(credentials=context.credentials, **kwargs) - if result.is_err(): # or len(result) > 1 - return result - - return result.ok() + return self.stash.get_all( + credentials=context.credentials, filters=filters + ).unwrap() diff --git a/packages/syft/src/syft/service/code_history/code_history_stash.py b/packages/syft/src/syft/service/code_history/code_history_stash.py index b4d93aa4f1b..69dfd272717 100644 --- a/packages/syft/src/syft/service/code_history/code_history_stash.py +++ b/packages/syft/src/syft/service/code_history/code_history_stash.py @@ -1,62 +1,43 @@ -# stdlib - -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import StashException +from ...types.result import as_result from .code_history import CodeHistory -NamePartitionKey = PartitionKey(key="service_func_name", type_=str) -VerifyKeyPartitionKey = PartitionKey(key="user_verify_key", type_=SyftVerifyKey) - - -@serializable() -class CodeHistoryStash(BaseUIDStoreStash): - object_type = CodeHistory - settings: PartitionSettings = PartitionSettings( - name=CodeHistory.__canonical_name__, object_type=CodeHistory - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="CodeHistoryStashSQL", version=1) +class CodeHistoryStash(ObjectStash[CodeHistory]): + @as_result(StashException) def get_by_service_func_name_and_verify_key( self, credentials: SyftVerifyKey, service_func_name: str, user_verify_key: SyftVerifyKey, - ) -> Result[list[CodeHistory], str]: - qks = QueryKeys( - qks=[ - NamePartitionKey.with_obj(service_func_name), - VerifyKeyPartitionKey.with_obj(user_verify_key), - ] - ) - return self.query_one(credentials=credentials, qks=qks) - + ) -> CodeHistory: + return self.get_one( + credentials=credentials, + filters={ + "user_verify_key": user_verify_key, + "service_func_name": service_func_name, + }, + ).unwrap() + + @as_result(StashException) def get_by_service_func_name( self, credentials: SyftVerifyKey, service_func_name: str - ) -> Result[list[CodeHistory], str]: - qks = QueryKeys(qks=[NamePartitionKey.with_obj(service_func_name)]) - return self.query_all(credentials=credentials, qks=qks) + ) -> list[CodeHistory]: + return self.get_all( + credentials=credentials, + filters={"service_func_name": service_func_name}, + ).unwrap() + @as_result(StashException) def get_by_verify_key( self, credentials: SyftVerifyKey, user_verify_key: SyftVerifyKey - ) -> Result[CodeHistory | None, str]: - if isinstance(user_verify_key, str): - user_verify_key = SyftVerifyKey.from_string(user_verify_key) - qks = QueryKeys(qks=[VerifyKeyPartitionKey.with_obj(user_verify_key)]) - return self.query_all(credentials=credentials, qks=qks) - - # def get_version(self, name:str, version:int) -> Optional[UserCode]: - # for obj in self.objs.values(): - # if obj.name == name and obj.version == version: - # return obj - # return None + ) -> list[CodeHistory]: + return self.get_all( + credentials=credentials, + filters={"user_verify_key": user_verify_key}, + ).unwrap() diff --git a/packages/syft/src/syft/service/context.py b/packages/syft/src/syft/service/context.py index d4b31c72fa6..4ce07c67982 100644 --- a/packages/syft/src/syft/service/context.py +++ b/packages/syft/src/syft/service/context.py @@ -1,16 +1,16 @@ # stdlib from typing import Any -from typing import cast # third party from typing_extensions import Self # relative -from ..abstract_node import AbstractNode -from ..node.credentials import SyftVerifyKey -from ..node.credentials import UserLoginCredentials +from ..abstract_server import AbstractServer +from ..abstract_server import ServerSideType +from ..server.credentials import SyftVerifyKey +from ..server.credentials import UserLoginCredentials from ..types.syft_object import Context -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftBaseObject from ..types.syft_object import SyftObject from ..types.uid import UID @@ -19,38 +19,56 @@ from .user.user_roles import ServiceRoleCapability -class NodeServiceContext(Context, SyftObject): - __canonical_name__ = "NodeServiceContext" - __version__ = SYFT_OBJECT_VERSION_2 +class ServerServiceContext(Context, SyftObject): + __canonical_name__ = "ServerServiceContext" + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore[assignment] - node: AbstractNode | None = None + server: AbstractServer -class AuthedServiceContext(NodeServiceContext): +class AuthedServiceContext(ServerServiceContext): __canonical_name__ = "AuthedServiceContext" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 credentials: SyftVerifyKey role: ServiceRole = ServiceRole.NONE job_id: UID | None = None extra_kwargs: dict = {} has_execute_permissions: bool = False + is_blocking_api_call: bool = False + client_warnings: list[str] = [] @property def dev_mode(self) -> Any: - return self.node.dev_mode # type: ignore + return self.server.dev_mode # type: ignore + + def add_warning(self, message: str) -> None: + self.client_warnings.append(message) def capabilities(self) -> list[ServiceRoleCapability]: return ROLE_TO_CAPABILITIES.get(self.role, []) def with_credentials(self, credentials: SyftVerifyKey, role: ServiceRole) -> Self: - return AuthedServiceContext(credentials=credentials, role=role, node=self.node) + return AuthedServiceContext( + credentials=credentials, role=role, server=self.server + ) + + @property + def is_l0_lowside(self) -> bool: + """Returns True if this is a low side of a Level 0 deployment""" + return self.server.server_side_type == ServerSideType.LOW_SIDE + + @property + def server_allows_execution_for_ds(self) -> bool: + """Returns True if this is a low side of a Level 0 deployment""" + return not self.is_l0_lowside def as_root_context(self) -> Self: - self.node = cast(AbstractNode, self.node) return AuthedServiceContext( - credentials=self.node.verify_key, role=ServiceRole.ADMIN, node=self.node + credentials=self.server.verify_key, + role=ServiceRole.ADMIN, + server=self.server, ) @property @@ -59,27 +77,25 @@ def job(self): # type: ignore # but we can't import Job since it's a circular import if self.job_id is None: return None - res = self.node.job_stash.get_by_uid(self.credentials, self.job_id) - if res.is_err(): - return None - else: - return res.ok() + return self.server.job_stash.get_by_uid( + self.credentials, self.job_id + ).ok() # if this fails, it will return None -class UnauthedServiceContext(NodeServiceContext): +class UnauthedServiceContext(ServerServiceContext): __canonical_name__ = "UnauthedServiceContext" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - login_credentials: UserLoginCredentials - node: AbstractNode | None = None + server: AbstractServer + login_credentials: UserLoginCredentials | None = None role: ServiceRole = ServiceRole.NONE class ChangeContext(SyftBaseObject): __canonical_name__ = "ChangeContext" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - node: AbstractNode | None = None + server: AbstractServer approving_user_credentials: SyftVerifyKey | None = None requesting_user_credentials: SyftVerifyKey | None = None extra_kwargs: dict = {} @@ -87,14 +103,14 @@ class ChangeContext(SyftBaseObject): @classmethod def from_service(cls, context: AuthedServiceContext) -> Self: return cls( - node=context.node, + server=context.server, approving_user_credentials=context.credentials, extra_kwargs=context.extra_kwargs, ) def to_service_ctx(self) -> AuthedServiceContext: return AuthedServiceContext( - node=self.node, + server=self.server, credentials=self.approving_user_credentials, extra_kwargs=self.extra_kwargs, ) diff --git a/packages/syft/src/syft/service/data_subject/__init__.py b/packages/syft/src/syft/service/data_subject/__init__.py index f628bc5d753..f232044493c 100644 --- a/packages/syft/src/syft/service/data_subject/__init__.py +++ b/packages/syft/src/syft/service/data_subject/__init__.py @@ -1,2 +1,2 @@ # relative -from .data_subject import DataSubjectCreate # noqa: F401 +from .data_subject import DataSubjectCreate diff --git a/packages/syft/src/syft/service/data_subject/data_subject.py b/packages/syft/src/syft/service/data_subject/data_subject.py index cadcf0e1f52..f85f80b0069 100644 --- a/packages/syft/src/syft/service/data_subject/data_subject.py +++ b/packages/syft/src/syft/service/data_subject/data_subject.py @@ -7,27 +7,23 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import PartitionKey -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext -from ...types.transforms import add_node_uid_for_key +from ...types.transforms import add_server_uid_for_key from ...types.transforms import generate_id from ...types.transforms import transform from ...types.uid import UID from ...util.markdown import as_markdown_python_code -from ..response import SyftError - -NamePartitionKey = PartitionKey(key="name", type_=str) @serializable() class DataSubject(SyftObject): # version __canonical_name__ = "DataSubject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - node_uid: UID + server_uid: UID name: str description: str | None = None aliases: list[str] = [] @@ -35,13 +31,7 @@ class DataSubject(SyftObject): @property def members(self) -> list: # relative - from ...client.api import APIRegistry - - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") - members = api.services.data_subject.members_for(self.name) - return members + return self.get_api().services.data_subject.members_for(self.name) __attr_searchable__ = ["name", "description"] __repr_attrs__ = ["name", "description"] @@ -71,7 +61,7 @@ def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: class DataSubjectCreate(SyftObject): # version __canonical_name__ = "DataSubjectCreate" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore[assignment] name: str @@ -131,4 +121,4 @@ def remove_members_list(context: TransformContext) -> TransformContext: @transform(DataSubjectCreate, DataSubject) def create_data_subject_to_data_subject() -> list[Callable]: - return [generate_id, remove_members_list, add_node_uid_for_key("node_uid")] + return [generate_id, remove_members_list, add_server_uid_for_key("server_uid")] diff --git a/packages/syft/src/syft/service/data_subject/data_subject_member.py b/packages/syft/src/syft/service/data_subject/data_subject_member.py index 06e25b11d5b..83704fc95ab 100644 --- a/packages/syft/src/syft/service/data_subject/data_subject_member.py +++ b/packages/syft/src/syft/service/data_subject/data_subject_member.py @@ -3,18 +3,14 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import PartitionKey -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject -ParentPartitionKey = PartitionKey(key="parent", type_=str) -ChildPartitionKey = PartitionKey(key="child", type_=str) - @serializable() class DataSubjectMemberRelationship(SyftObject): __canonical_name__ = "DataSubjectMemberRelationship" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 parent: str child: str diff --git a/packages/syft/src/syft/service/data_subject/data_subject_member_service.py b/packages/syft/src/syft/service/data_subject/data_subject_member_service.py index 57f38f445ec..e7e482a4337 100644 --- a/packages/syft/src/syft/service/data_subject/data_subject_member_service.py +++ b/packages/syft/src/syft/service/data_subject/data_subject_member_service.py @@ -1,83 +1,63 @@ # stdlib -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...util.telemetry import instrument +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import StashException +from ...types.result import as_result from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES from ..service import TYPE_TO_SERVICE -from .data_subject_member import ChildPartitionKey from .data_subject_member import DataSubjectMemberRelationship -from .data_subject_member import ParentPartitionKey - - -@instrument -@serializable() -class DataSubjectMemberStash(BaseUIDStoreStash): - object_type = DataSubjectMemberRelationship - settings: PartitionSettings = PartitionSettings( - name=DataSubjectMemberRelationship.__canonical_name__, - object_type=DataSubjectMemberRelationship, - ) - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="DataSubjectMemberSQLStash", version=1) +class DataSubjectMemberStash(ObjectStash[DataSubjectMemberRelationship]): + @as_result(StashException) def get_all_for_parent( self, credentials: SyftVerifyKey, name: str - ) -> Result[DataSubjectMemberRelationship | None, str]: - qks = QueryKeys(qks=[ParentPartitionKey.with_obj(name)]) - return self.query_all(credentials=credentials, qks=qks) + ) -> list[DataSubjectMemberRelationship]: + return self.get_all( + credentials=credentials, + filters={"parent": name}, + ).unwrap() + @as_result(StashException) def get_all_for_child( self, credentials: SyftVerifyKey, name: str - ) -> Result[DataSubjectMemberRelationship | None, str]: - qks = QueryKeys(qks=[ChildPartitionKey.with_obj(name)]) - return self.query_all(credentials=credentials, qks=qks) + ) -> list[DataSubjectMemberRelationship]: + return self.get_all( + credentials=credentials, + filters={"child": name}, + ).unwrap() -@instrument -@serializable() +@serializable(canonical_name="DataSubjectMemberService", version=1) class DataSubjectMemberService(AbstractService): - store: DocumentStore stash: DataSubjectMemberStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = DataSubjectMemberStash(store=store) def add( self, context: AuthedServiceContext, parent: str, child: str - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """Register relationship between data subject and it's member.""" relation = DataSubjectMemberRelationship(parent=parent, child=child) - result = self.stash.set(context.credentials, relation, ignore_duplicates=True) - if result.is_err(): - return SyftError(result.err()) + self.stash.set(context.credentials, relation, ignore_duplicates=True).unwrap() return SyftSuccess(message=f"Relationship added for: {parent} -> {child}") def get_relatives( self, context: AuthedServiceContext, data_subject_name: str - ) -> list[str] | SyftError: + ) -> list[DataSubjectMemberRelationship]: """Get all Members for given data subject""" - result = self.stash.get_all_for_parent( + return self.stash.get_all_for_parent( context.credentials, name=data_subject_name - ) - if result.is_ok(): - data_subject_members = result.ok() - return data_subject_members - return SyftError(message=result.err()) + ).unwrap() TYPE_TO_SERVICE[DataSubjectMemberRelationship] = DataSubjectMemberService diff --git a/packages/syft/src/syft/service/data_subject/data_subject_service.py b/packages/syft/src/syft/service/data_subject/data_subject_service.py index 5aacd15eb3d..ecde100edf5 100644 --- a/packages/syft/src/syft/service/data_subject/data_subject_service.py +++ b/packages/syft/src/syft/service/data_subject/data_subject_service.py @@ -1,20 +1,15 @@ # stdlib -from typing import cast # third party -from result import Result # relative -from ...abstract_node import AbstractNode -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...util.telemetry import instrument +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import StashException +from ...types.result import as_result from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -22,122 +17,79 @@ from ..service import service_method from .data_subject import DataSubject from .data_subject import DataSubjectCreate -from .data_subject import NamePartitionKey -from .data_subject_member_service import DataSubjectMemberService - - -@instrument -@serializable() -class DataSubjectStash(BaseUIDStoreStash): - object_type = DataSubject - settings: PartitionSettings = PartitionSettings( - name=DataSubject.__canonical_name__, object_type=DataSubject - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - - def get_by_name( - self, credentials: SyftVerifyKey, name: str - ) -> Result[DataSubject | None, str]: - qks = QueryKeys(qks=[NamePartitionKey.with_obj(name)]) - return self.query_one(credentials, qks=qks) - - def update( - self, - credentials: SyftVerifyKey, - data_subject: DataSubject, - has_permission: bool = False, - ) -> Result[DataSubject, str]: - res = self.check_type(data_subject, DataSubject) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().update(credentials=credentials, obj=res.ok()) - - -@instrument -@serializable() + + +@serializable(canonical_name="DataSubjectSQLStash", version=1) +class DataSubjectStash(ObjectStash[DataSubject]): + @as_result(StashException) + def get_by_name(self, credentials: SyftVerifyKey, name: str) -> DataSubject: + return self.get_one( + credentials=credentials, + filters={"name": name}, + ).unwrap() + + +@serializable(canonical_name="DataSubjectService", version=1) class DataSubjectService(AbstractService): - store: DocumentStore stash: DataSubjectStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = DataSubjectStash(store=store) @service_method(path="data_subject.add", name="add_data_subject") def add( self, context: AuthedServiceContext, data_subject: DataSubjectCreate - ) -> SyftSuccess | SyftError: - """Register a data subject.""" - - context.node = cast(AbstractNode, context.node) - member_relationship_add = context.node.get_service_method( - DataSubjectMemberService.add - ) - - member_relationships = data_subject.member_relationships - for member_relationship in member_relationships: - parent_ds, child_ds = member_relationship - for ds in [parent_ds, child_ds]: - result = self.stash.set( - context.credentials, - ds.to(DataSubject, context=context), - ignore_duplicates=True, + ) -> SyftSuccess: + """Register a data subject.""" # + + member_relationships: set[tuple[str, str]] = data_subject.member_relationships + if len(member_relationships) == 0: + self.stash.set( + context.credentials, + data_subject.to(DataSubject, context=context), + ).unwrap() + else: + for member_relationship in member_relationships: + parent_ds, child_ds = member_relationship + for ds in [parent_ds, child_ds]: + self.stash.set( + context.credentials, + ds.to(DataSubject, context=context), + ignore_duplicates=True, + ).unwrap() + context.server.services.data_subject_member.add( + context, parent_ds.name, child_ds.name ) - if result.is_err(): - return SyftError(message=str(result.err())) - result = member_relationship_add(context, parent_ds.name, child_ds.name) - if isinstance(result, SyftError): - return result return SyftSuccess( - message=f"{len(member_relationships)+1} Data Subjects Registered" + message=f"{len(member_relationships)+1} Data Subjects Registered", + value=member_relationships, ) @service_method(path="data_subject.get_all", name="get_all") - def get_all(self, context: AuthedServiceContext) -> list[DataSubject] | SyftError: + def get_all(self, context: AuthedServiceContext) -> list[DataSubject]: """Get all Data subjects""" - result = self.stash.get_all(context.credentials) - if result.is_ok(): - data_subjects = result.ok() - return data_subjects - return SyftError(message=result.err()) + return self.stash.get_all(context.credentials).unwrap() @service_method(path="data_subject.get_members", name="members_for") def get_members( self, context: AuthedServiceContext, data_subject_name: str - ) -> list[DataSubject] | SyftError: - context.node = cast(AbstractNode, context.node) - get_relatives = context.node.get_service_method( - DataSubjectMemberService.get_relatives + ) -> list[DataSubject]: + relatives = context.server.services.data_subject.get_relatives( + context, data_subject_name ) - relatives = get_relatives(context, data_subject_name) - - if isinstance(relatives, SyftError): - return relatives - members = [] for relative in relatives: result = self.get_by_name(context=context, name=relative.child) - if isinstance(result, SyftError): - return result members.append(result) return members @service_method(path="data_subject.get_by_name", name="get_by_name") - def get_by_name( - self, context: AuthedServiceContext, name: str - ) -> SyftSuccess | SyftError: + def get_by_name(self, context: AuthedServiceContext, name: str) -> DataSubject: """Get a Data Subject by its name.""" - result = self.stash.get_by_name(context.credentials, name=name) - if result.is_ok(): - data_subject = result.ok() - return data_subject - return SyftError(message=result.err()) + return self.stash.get_by_name(context.credentials, name=name).unwrap() TYPE_TO_SERVICE[DataSubject] = DataSubjectService diff --git a/packages/syft/src/syft/service/dataset/dataset.py b/packages/syft/src/syft/service/dataset/dataset.py index daf92ecdbcb..185774fa86c 100644 --- a/packages/syft/src/syft/service/dataset/dataset.py +++ b/packages/syft/src/syft/service/dataset/dataset.py @@ -2,59 +2,55 @@ from collections.abc import Callable from datetime import datetime from enum import Enum +import logging +import textwrap from typing import Any # third party -from IPython.display import HTML from IPython.display import display -import itables +import markdown import pandas as pd from pydantic import ConfigDict from pydantic import field_validator from pydantic import model_validator -from result import Err -from result import Ok -from result import Result from typing_extensions import Self # relative from ...serde.serializable import serializable -from ...store.document_store import PartitionKey from ...types.datetime import DateTime from ...types.dicttuple import DictTuple +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.transforms import generate_id +from ...types.transforms import make_set_default from ...types.transforms import transform from ...types.transforms import validate_url from ...types.uid import UID -from ...util import options -from ...util.colors import ON_SURFACE_HIGHEST -from ...util.colors import SURFACE -from ...util.colors import SURFACE_SURFACE -from ...util.fonts import ITABLES_CSS -from ...util.fonts import fonts_css from ...util.markdown import as_markdown_python_code -from ...util.notebook_ui.notebook_addons import FOLDER_ICON -from ...util.util import get_mb_size +from ...util.misc_objs import MarkdownDescription +from ...util.notebook_ui.icons import Icon +from ...util.table import itable_template_from_df +from ...util.util import repr_truncation +from ..action.action_data_empty import ActionDataEmpty +from ..action.action_object import ActionObject from ..data_subject.data_subject import DataSubject from ..data_subject.data_subject import DataSubjectCreate -from ..data_subject.data_subject_service import DataSubjectService from ..response import SyftError -from ..response import SyftException from ..response import SyftSuccess +from ..response import SyftWarning -DATA_SIZE_WARNING_LIMIT = 512 - - -NamePartitionKey = PartitionKey(key="name", type_=str) +logger = logging.getLogger(__name__) @serializable() class Contributor(SyftObject): __canonical_name__ = "Contributor" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 name: str role: str | None = None @@ -66,15 +62,12 @@ class Contributor(SyftObject): def _repr_html_(self) -> Any: return f""" - -
    -

    Contributor

    -

    Name: {self.name}

    -

    Role: {self.role}

    -

    Email: {self.email}

    -
    + + Contributor + Name: {self.name} + Role: {self.role} + Email: {self.email} + """ def __eq__(self, value: object) -> bool: @@ -88,39 +81,14 @@ def __hash__(self) -> int: return hash(self.email) -@serializable() -class MarkdownDescription(SyftObject): - # version - __canonical_name__ = "MarkdownDescription" - __version__ = SYFT_OBJECT_VERSION_2 - - text: str - - def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: - style = """ - - """ - display(HTML(style)) - return self.text - - @serializable() class Asset(SyftObject): # version __canonical_name__ = "Asset" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 action_id: UID - node_uid: UID + server_uid: UID name: str description: MarkdownDescription | None = None contributors: set[Contributor] = set() @@ -130,7 +98,10 @@ class Asset(SyftObject): created_at: DateTime = DateTime.now() uploader: Contributor | None = None - __repr_attrs__ = ["name", "shape"] + # _kwarg_name and _dataset_name are set by the UserCode.assets + _kwarg_name: str | None = None + _dataset_name: str | None = None + __syft_include_id_coll_repr__ = False def __init__( self, @@ -142,57 +113,64 @@ def __init__( super().__init__(**data, description=description) def _repr_html_(self) -> Any: - itables_css = f""" - .itables table {{ - margin: 0 auto; - float: left; - color: {ON_SURFACE_HIGHEST[options.color_theme]}; - }} - .itables table th {{color: {SURFACE_SURFACE[options.color_theme]};}} - """ - # relative from ...service.action.action_object import ActionObject uploaded_by_line = ( - f"

    Uploaded by: {self.uploader.name} ({self.uploader.email})

    " + f"Uploaded by: {self.uploader.name} ({self.uploader.email})" if self.uploader else "" ) - if isinstance(self.data, ActionObject): - data_table_line = itables.to_html_datatable( - df=self.data.syft_action_data, css=itables_css - ) - elif isinstance(self.data, pd.DataFrame): - data_table_line = itables.to_html_datatable(df=self.data, css=itables_css) + mock = self.mock + private_data_res = self._private_data() + if private_data_res.is_err(): + data_table_line = "You have no permission to the private data" else: - data_table_line = self.data + private_data_obj = private_data_res.ok() + if isinstance(private_data_obj, ActionObject): + if isinstance(private_data_obj.syft_action_data, ActionDataEmpty): + data_table_line = "No data" + else: + df = pd.DataFrame(private_data_obj) + data_table_line = itable_template_from_df( + df=private_data_obj.head(5) + ) - if isinstance(self.mock, ActionObject): - mock_table_line = itables.to_html_datatable( - df=self.mock.syft_action_data, css=itables_css - ) - elif isinstance(self.mock, pd.DataFrame): - mock_table_line = itables.to_html_datatable(df=self.mock, css=itables_css) + elif isinstance(private_data_obj, pd.DataFrame): + data_table_line = itable_template_from_df(df=private_data_obj.head(5)) + else: + try: + data_table_line = repr_truncation(private_data_obj) + except Exception as e: + error_msg = ( + e.public_message if isinstance(e, SyftException) else str(e) + ) + logger.debug(f"Failed to truncate private data repr. {error_msg}") + data_table_line = private_data_res.ok() # type: ignore + + if isinstance(mock, ActionObject): + if isinstance(mock.syft_action_data, ActionDataEmpty): + mock_table_line = "No data" + else: + df = pd.DataFrame(mock.syft_action_data) + mock_table_line = itable_template_from_df(df=df.head(5)) + elif isinstance(mock, pd.DataFrame): + mock_table_line = itable_template_from_df(df=self.mock.head(5)) else: - mock_table_line = self.mock + try: + mock_table_line = repr_truncation(self.mock) + except Exception as e: + logger.debug(f"Failed to truncate mock data repr. {e}") + mock_table_line = self.mock + if isinstance(mock_table_line, SyftError): mock_table_line = mock_table_line.message return f""" - -

    {self.name}

    -

    {self.description}

    +

    {self.description or ""}

    Asset ID: {self.id}

    Action Object ID: {self.action_id}

    {uploaded_by_line} @@ -203,6 +181,9 @@ def _repr_html_(self) -> Any: {mock_table_line}
    """ + def __repr__(self) -> str: + return f"Asset(name='{self.name}', server_uid='{self.server_uid}', action_id='{self.action_id}')" + def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: _repr_str = f"Asset: {self.name}\n" _repr_str += f"Pointer Id: {self.action_id}\n" @@ -214,6 +195,30 @@ def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: _repr_str += f"\t{contributor.name}: {contributor.email}\n" return as_markdown_python_code(_repr_str) + def _coll_repr_(self) -> dict[str, Any]: + base_dict = { + "Parameter": self._kwarg_name, + "Action ID": self.action_id, + "Asset Name": self.name, + "Dataset Name": self._dataset_name, + "Server UID": self.server_uid, + } + + # _kwarg_name and _dataset_name are set by the UserCode.assets + # if they are None, we remove them from the dict + filtered_dict = { + key: value for key, value in base_dict.items() if value is not None + } + return filtered_dict + + def _get_dict_for_user_code_repr(self) -> dict[str, Any]: + return { + "action_id": self.action_id.no_dash, + "source_asset": self.name, + "source_dataset": self._dataset_name, + "source_server": self.server_uid.no_dash, + } + def __eq__(self, other: object) -> bool: if not isinstance(other, Asset): return False @@ -231,34 +236,24 @@ def __eq__(self, other: object) -> bool: @property def pointer(self) -> Any: - # relative - from ...client.api import APIRegistry - - api = APIRegistry.api_for( - node_uid=self.node_uid, - user_verify_key=self.syft_client_verify_key, - ) - if api is not None and api.services is not None: + api = self.get_api() + if api.services is not None: return api.services.action.get_pointer(self.action_id) @property - def mock(self) -> SyftError | Any: + def mock(self) -> Any: # relative - from ...client.api import APIRegistry - - api = APIRegistry.api_for( - node_uid=self.node_uid, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") - result = api.services.action.get_mock(self.action_id) + api = self.get_api() try: + result = api.services.action.get_mock(self.action_id) if isinstance(result, SyftObject): return result.syft_action_data - return result + else: + return result except Exception as e: - return SyftError(message=f"Failed to get mock. {e}") + raise SyftException.from_exception( + e, public_message=f"Failed to get mock. {e}" + ) def has_data_permission(self) -> bool: return self.data is not None @@ -271,21 +266,32 @@ def has_permission(self, data_result: Any) -> bool: and data_result.endswith("denied") ) - @property - def data(self) -> Any: - # relative - from ...client.api import APIRegistry + @as_result(SyftException) + def _private_data(self) -> Any: + """ + Retrieves the private data associated with this asset. - api = APIRegistry.api_for( - node_uid=self.node_uid, - user_verify_key=self.syft_client_verify_key, - ) - if api is None or api.services is None: + Returns: + Result[Any, str]: A Result object containing the private data if the user has permission + otherwise an Err object with the message "You do not have permission to access private data." + """ + + # TODO: split this out in permission logic and existence logic + api = self.get_api_wrapped() + if api.is_err(): return None - res = api.services.action.get(self.action_id) + res = api.unwrap().services.action.get(self.action_id) if self.has_permission(res): return res.syft_action_data else: + raise SyftException(public_message="You have no access to the private data") + + @property + def data(self) -> Any: + try: + return self._private_data().unwrap() + except SyftException: + display(SyftError(message="You have no access to the private data")) return None @@ -310,14 +316,14 @@ def check_mock(data: Any, mock: Any) -> bool: class CreateAsset(SyftObject): # version __canonical_name__ = "CreateAsset" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type:ignore[assignment] name: str description: MarkdownDescription | None = None contributors: set[Contributor] = set() data_subjects: list[DataSubjectCreate] = [] - node_uid: UID | None = None + server_uid: UID | None = None action_id: UID | None = None data: Any | None = None mock: Any | None = None @@ -327,10 +333,12 @@ class CreateAsset(SyftObject): uploader: Contributor | None = None __repr_attrs__ = ["name"] - model_config = ConfigDict(validate_assignment=True) + model_config = ConfigDict(validate_assignment=True, extra="forbid") - def __init__(self, description: str | None = "", **data: Any) -> None: - super().__init__(**data, description=MarkdownDescription(text=str(description))) + def __init__(self, description: str | None = None, **data: Any) -> None: + if isinstance(description, str): + description = MarkdownDescription(text=description) + super().__init__(**data, description=description) @model_validator(mode="after") def __mock_is_real_for_empty_mock_must_be_false(self) -> Self: @@ -341,6 +349,17 @@ def __mock_is_real_for_empty_mock_must_be_false(self) -> Self: return self + def contains_empty(self) -> bool: + if isinstance(self.mock, ActionObject) and isinstance( + self.mock.syft_action_data_cache, ActionDataEmpty + ): + return True + if isinstance(self.data, ActionObject) and isinstance( + self.data.syft_action_data_cache, ActionDataEmpty + ): + return True + return False + def add_data_subject(self, data_subject: DataSubject) -> None: self.data_subjects.append(data_subject) @@ -351,15 +370,15 @@ def add_contributor( role: Enum | str | None = None, phone: str | None = None, note: str | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: try: _role_str = role.value if isinstance(role, Enum) else role contributor = Contributor( name=name, role=_role_str, email=email, phone=phone, note=note ) if contributor in self.contributors: - return SyftError( - message=f"Contributor with email: '{email}' already exists in '{self.name}' Asset." + raise SyftException( + public_message=f"Contributor with email: '{email}' already exists in '{self.name}' Asset." ) self.contributors.add(contributor) @@ -367,22 +386,24 @@ def add_contributor( message=f"Contributor '{name}' added to '{self.name}' Asset." ) except Exception as e: - return SyftError(message=f"Failed to add contributor. Error: {e}") + raise SyftException(public_message=f"Failed to add contributor. Error: {e}") def set_description(self, description: str) -> None: self.description = MarkdownDescription(text=description) def set_obj(self, data: Any) -> None: if isinstance(data, SyftError): - raise SyftException(data) + raise SyftException(public_message=data) self.data = data def set_mock(self, mock_data: Any, mock_is_real: bool) -> None: if isinstance(mock_data, SyftError): - raise SyftException(mock_data) + raise SyftException(public_message=mock_data) if mock_is_real and (mock_data is None or _is_action_data_empty(mock_data)): - raise SyftException("`mock_is_real` must be False if mock is empty") + raise SyftException( + public_message="`mock_is_real` must be False if mock is empty" + ) self.mock = mock_data self.mock_is_real = mock_is_real @@ -396,24 +417,10 @@ def no_mock(self) -> None: def set_shape(self, shape: tuple) -> None: self.shape = shape - def check(self) -> SyftSuccess | SyftError: + def check(self) -> SyftSuccess: if not check_mock(self.data, self.mock): - return SyftError( - message=f"set_obj type {type(self.data)} must match set_mock type {type(self.mock)}" - ) - # if not _is_action_data_empty(self.mock): - # data_shape = get_shape_or_len(self.data) - # mock_shape = get_shape_or_len(self.mock) - # if data_shape != mock_shape: - # return SyftError( - # message=f"set_obj shape {data_shape} must match set_mock shape {mock_shape}" - # ) - total_size_mb = get_mb_size(self.data) + get_mb_size(self.mock) - if total_size_mb > DATA_SIZE_WARNING_LIMIT: - print( - f"**WARNING**: The total size for asset: '{self.name}' exceeds '{DATA_SIZE_WARNING_LIMIT} MB'. " - "This might result in failure to upload dataset. " - "Please contact #support on OpenMined slack for further assistance.", + raise SyftException( + public_message=f"set_obj type {type(self.data)} must match set_mock type {type(self.mock)}" ) return SyftSuccess(message="Dataset is Valid") @@ -437,11 +444,11 @@ def get_shape_or_len(obj: Any) -> tuple[int, ...] | int | None: class Dataset(SyftObject): # version __canonical_name__: str = "Dataset" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID name: str - node_uid: UID | None = None + server_uid: UID | None = None asset_list: list[Asset] = [] contributors: set[Contributor] = set() citation: str | None = None @@ -452,10 +459,20 @@ class Dataset(SyftObject): mb_size: float | None = None created_at: DateTime = DateTime.now() uploader: Contributor - - __attr_searchable__ = ["name", "citation", "url", "description", "action_ids"] + summary: str | None = None + to_be_deleted: bool = False + + __attr_searchable__ = [ + "name", + "citation", + "url", + "description", + "action_ids", + "summary", + ] __attr_unique__ = ["name"] - __repr_attrs__ = ["name", "url", "created_at"] + __repr_attrs__ = ["name", "summary", "url", "created_at"] + __table_sort_attr__ = "Created at" def __init__( self, @@ -468,85 +485,70 @@ def __init__( @property def icon(self) -> str: - return FOLDER_ICON + return Icon.FOLDER.svg def _coll_repr_(self) -> dict[str, Any]: return { "Name": self.name, + "Summary": self.summary, "Assets": len(self.asset_list), "Size": f"{self.mb_size} (MB)", "Url": self.url, - "created at": str(self.created_at), + "Created at": str(self.created_at), } def _repr_html_(self) -> Any: uploaded_by_line = ( ( "

    " - + f"Uploaded by:{self.uploader.name} ({self.uploader.email})

    " + + f"Uploaded by:
    {self.uploader.name} ({self.uploader.email})

    " ) if self.uploader else "" ) - description_text: str = self.description.text if self.description else "" + if self.description is not None and self.description.text: + description_info_message = f""" +

    Description

    + {markdown.markdown(self.description.text, extensions=["extra"])} + """ + else: + description_info_message = "" + if self.to_be_deleted: + return "This dataset has been marked for deletion. The underlying data may be not available." return f""" -
    -

    {self.name}

    -

    {description_text}

    +

    {self.name}

    +

    Summary

    + {f"

    {self.summary}

    " if self.summary else ""} + {description_info_message} +

    Dataset Details

    {uploaded_by_line}

    Created on: {self.created_at}

    URL: {self.url}

    Contributors: - to see full details call dataset.contributors

    + To see full details call dataset.contributors.

    +

    Assets

    {self.assets._repr_html_()} """ + @property def action_ids(self) -> list[UID]: - data = [] - for asset in self.asset_list: - if asset.action_id: - data.append(asset.action_id) - return data + return [asset.action_id for asset in self.asset_list if asset.action_id] @property def assets(self) -> DictTuple[str, Asset]: return DictTuple((asset.name, asset) for asset in self.asset_list) - def _old_repr_markdown_(self) -> str: - _repr_str = f"Syft Dataset: {self.name}\n" - _repr_str += "Assets:\n" - for asset in self.asset_list: - if asset.description is not None: - _repr_str += f"\t{asset.name}: {asset.description.text}\n\n" - else: - _repr_str += f"\t{asset.name}\n\n" - if self.citation: - _repr_str += f"Citation: {self.citation}\n" - if self.url: - _repr_str += f"URL: {self.url}\n" - if self.description: - _repr_str += f"Description: {self.description.text}\n" - return as_markdown_python_code(_repr_str) - def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: - # return self._old_repr_markdown_() - return self._markdown_() - - def _markdown_(self) -> str: _repr_str = f"Syft Dataset: {self.name}\n\n" _repr_str += "Assets:\n\n" for asset in self.asset_list: if asset.description is not None: - _repr_str += f"\t{asset.name}: {asset.description.text}\n\n" + description_text = textwrap.shorten( + asset.description.text, width=100, placeholder="..." + ) + _repr_str += f"\t{asset.name}: {description_text}\n\n" else: _repr_str += f"\t{asset.name}\n\n" if self.citation: @@ -562,10 +564,10 @@ def client(self) -> Any | None: # relative from ...client.client import SyftClientSessionCache - client = SyftClientSessionCache.get_client_for_node_uid(self.node_uid) + client = SyftClientSessionCache.get_client_for_server_uid(self.server_uid) if client is None: - return SyftError( - message=f"No clients for {self.node_uid} in memory. Please login with sy.login" + raise SyftException( + public_message=f"No clients for {self.server_uid} in memory. Please login with sy.login" ) return client @@ -598,31 +600,35 @@ def _check_asset_must_contain_mock(asset_list: list[CreateAsset]) -> None: @serializable() class DatasetPageView(SyftObject): - # version __canonical_name__ = "DatasetPageView" __version__ = SYFT_OBJECT_VERSION_2 + datasets: DictTuple[str, Dataset] + total: int + + +@serializable() +class DatasetPageViewV1(SyftObject): + __canonical_name__ = "DatasetPageView" + __version__ = SYFT_OBJECT_VERSION_1 + datasets: DictTuple total: int @serializable() class CreateDataset(Dataset): - # version __canonical_name__ = "CreateDataset" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 asset_list: list[CreateAsset] = [] - __repr_attrs__ = ["name", "url"] + __repr_attrs__ = ["name", "summary", "url"] id: UID | None = None # type: ignore[assignment] created_at: DateTime | None = None # type: ignore[assignment] uploader: Contributor | None = None # type: ignore[assignment] - model_config = ConfigDict(validate_assignment=True) - - def _check_asset_must_contain_mock(self) -> None: - _check_asset_must_contain_mock(self.asset_list) + model_config = ConfigDict(validate_assignment=True, extra="forbid") @field_validator("asset_list") @classmethod @@ -632,9 +638,19 @@ def __assets_must_contain_mock( _check_asset_must_contain_mock(asset_list) return asset_list + @field_validator("to_be_deleted") + @classmethod + def __to_be_deleted_must_be_false(cls, v: bool) -> bool: + if v is True: + raise ValueError("to_be_deleted must be False") + return v + def set_description(self, description: str) -> None: self.description = MarkdownDescription(text=description) + def set_summary(self, summary: str) -> None: + self.summary = summary + def add_citation(self, citation: str) -> None: self.citation = citation @@ -648,40 +664,40 @@ def add_contributor( role: Enum | str | None = None, phone: str | None = None, note: str | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: try: _role_str = role.value if isinstance(role, Enum) else role contributor = Contributor( name=name, role=_role_str, email=email, phone=phone, note=note ) if contributor in self.contributors: - return SyftError( - message=f"Contributor with email: '{email}' already exists in '{self.name}' Dataset." + raise SyftException( + public_message=f"Contributor with email: '{email}' already exists in '{self.name}' Dataset." ) self.contributors.add(contributor) return SyftSuccess( message=f"Contributor '{name}' added to '{self.name}' Dataset." ) except Exception as e: - return SyftError(message=f"Failed to add contributor. Error: {e}") + raise SyftException(public_message=f"Failed to add contributor. Error: {e}") - def add_asset( - self, asset: CreateAsset, force_replace: bool = False - ) -> SyftSuccess | SyftError: + def add_asset(self, asset: CreateAsset, force_replace: bool = False) -> SyftSuccess: if asset.mock is None: raise ValueError(_ASSET_WITH_NONE_MOCK_ERROR_MESSAGE) for i, existing_asset in enumerate(self.asset_list): if existing_asset.name == asset.name: if not force_replace: - return SyftError( - message=f"""Asset "{asset.name}" already exists in '{self.name}' Dataset.""" - """ Use add_asset(asset, force_replace=True) to replace.""" + raise SyftException( + public_message=( + f"Asset '{asset.name}' already exists in '{self.name}' Dataset." + "\nUse add_asset(asset, force_replace=True) to replace." + ) ) else: self.asset_list[i] = asset return SyftSuccess( - f"Asset {asset.name} has been successfully replaced." + message=f"Asset {asset.name} has been successfully replaced." ) self.asset_list.append(asset) @@ -690,10 +706,10 @@ def add_asset( message=f"Asset '{asset.name}' added to '{self.name}' Dataset." ) - def replace_asset(self, asset: CreateAsset) -> SyftSuccess | SyftError: + def replace_asset(self, asset: CreateAsset) -> SyftSuccess: return self.add_asset(asset=asset, force_replace=True) - def remove_asset(self, name: str) -> SyftSuccess | SyftError: + def remove_asset(self, name: str) -> SyftSuccess: asset_to_remove = None for asset in self.asset_list: if asset.name == name: @@ -701,21 +717,21 @@ def remove_asset(self, name: str) -> SyftSuccess | SyftError: break if asset_to_remove is None: - return SyftError(message=f"No asset exists with name: {name}") + raise SyftException(public_message=f"No asset exists with name: {name}") self.asset_list.remove(asset_to_remove) return SyftSuccess( message=f"Asset '{self.name}' removed from '{self.name}' Dataset." ) - def check(self) -> Result[SyftSuccess, list[SyftError]]: + def check(self) -> SyftSuccess: errors = [] for asset in self.asset_list: result = asset.check() if not result: errors.append(result) if len(errors): - return Err(errors) - return Ok(SyftSuccess(message="Dataset is Valid")) + raise SyftException(public_message=f"Errors: {errors}") + return SyftSuccess(message="Dataset is Valid") def create_and_store_twin(context: TransformContext) -> TransformContext: @@ -732,21 +748,26 @@ def create_and_store_twin(context: TransformContext) -> TransformContext: if private_obj is None and mock_obj is None: raise ValueError("No data and no action_id means this asset has no data") + asset = context.obj # type: ignore + contains_empty = asset.contains_empty() # type: ignore twin = TwinObject( - private_obj=private_obj, - mock_obj=mock_obj, + private_obj=asset.data, # type: ignore + mock_obj=asset.mock, # type: ignore + syft_server_location=asset.syft_server_location, # type: ignore + syft_client_verify_key=asset.syft_client_verify_key, # type: ignore ) - if context.node is None: + res = twin._save_to_blob_storage(allow_empty=contains_empty).unwrap() + if isinstance(res, SyftWarning): + logger.debug(res.message) + # TODO, upload to blob storage here + if context.server is None: raise ValueError( - "f{context}'s node is None, please log in. No trasformation happened" + "f{context}'s server is None, please log in. No trasformation happened" ) - action_service = context.node.get_service("actionservice") - result = action_service.set( - context=context.to_node_context(), action_object=twin - ) - if result.is_err(): - raise RuntimeError(f"Failed to create and store twin. Error: {result}") - + context.server.services.action._set( + context=context.to_server_context(), + action_object=twin, + ).unwrap(public_message="Failed to create and store twin") context.output["action_id"] = twin.id else: private_obj = context.output.pop("data", None) @@ -764,20 +785,19 @@ def infer_shape(context: TransformContext) -> TransformContext: return context -def set_data_subjects(context: TransformContext) -> TransformContext | SyftError: +def set_data_subjects(context: TransformContext) -> TransformContext: if context.output is None: raise ValueError(f"{context}'s output is None. No transformation happened") - if context.node is None: - return SyftError( - "f{context}'s node is None, please log in. No trasformation happened" + if context.server is None: + raise SyftException( + public_message="f{context}'s server is None, please log in. No trasformation happened" ) data_subjects = context.output["data_subjects"] - get_data_subject = context.node.get_service_method(DataSubjectService.get_by_name) resultant_data_subjects = [] for data_subject in data_subjects: - result = get_data_subject(context=context, name=data_subject.name) - if isinstance(result, SyftError): - return result + result = context.server.services.data_subject.get_by_name( + context=context, name=data_subject.name + ) resultant_data_subjects.append(result) context.output["data_subjects"] = resultant_data_subjects return context @@ -791,10 +811,10 @@ def add_msg_creation_time(context: TransformContext) -> TransformContext: return context -def add_default_node_uid(context: TransformContext) -> TransformContext: +def add_default_server_uid(context: TransformContext) -> TransformContext: if context.output is not None: - if context.output["node_uid"] is None and context.node is not None: - context.output["node_uid"] = context.node.id + if context.output["server_uid"] is None and context.server is not None: + context.output["server_uid"] = context.server.id else: raise ValueError(f"{context}'s output is None. No transformation happened") return context @@ -808,7 +828,7 @@ def createasset_to_asset() -> list[Callable]: infer_shape, create_and_store_twin, set_data_subjects, - add_default_node_uid, + add_default_server_uid, ] @@ -844,8 +864,13 @@ def createdataset_to_dataset() -> list[Callable]: validate_url, convert_asset, add_current_date, + make_set_default("to_be_deleted", False), # explicitly set it to False ] -class DatasetUpdate: - pass +class DatasetUpdate(PartialSyftObject): + __canonical_name__ = "DatasetUpdate" + __version__ = SYFT_OBJECT_VERSION_1 + + name: str + to_be_deleted: bool diff --git a/packages/syft/src/syft/service/dataset/dataset_service.py b/packages/syft/src/syft/service/dataset/dataset_service.py index cc2f280cb89..a25a49ee60d 100644 --- a/packages/syft/src/syft/service/dataset/dataset_service.py +++ b/packages/syft/src/syft/service/dataset/dataset_service.py @@ -1,17 +1,16 @@ # stdlib from collections.abc import Collection from collections.abc import Sequence +import logging # relative from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager from ...types.dicttuple import DictTuple from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -28,6 +27,8 @@ from .dataset import DatasetPageView from .dataset_stash import DatasetStash +logger = logging.getLogger(__name__) + def _paginate_collection( collection: Collection, @@ -65,26 +66,23 @@ def _paginate_dataset_collection( ) -@instrument -@serializable() +@serializable(canonical_name="DatasetService", version=1) class DatasetService(AbstractService): - store: DocumentStore stash: DatasetStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = DatasetStash(store=store) @service_method( path="dataset.add", name="add", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) - def add( - self, context: AuthedServiceContext, dataset: CreateDataset - ) -> SyftSuccess | SyftError: + def add(self, context: AuthedServiceContext, dataset: CreateDataset) -> SyftSuccess: """Add a Dataset""" dataset = dataset.to(Dataset, context=context) + result = self.stash.set( context.credentials, dataset, @@ -93,19 +91,15 @@ def add( uid=dataset.id, permission=ActionPermission.ALL_READ ), ], + ).unwrap() + + return SyftSuccess( + message=( + f"Dataset uploaded to '{context.server.name}'." + f" To see the datasets uploaded by a client on this server, use command `[your_client].datasets`" + ), + value=result, ) - if result.is_err(): - return SyftError(message=str(result.err())) - if context.node is not None: - return SyftSuccess( - message=f"Dataset uploaded to '{context.node.name}'. " - f"To see the datasets uploaded by a client on this node, use command `[your_client].datasets`" - ) - else: - return SyftSuccess( - message="Dataset uploaded not to a node." - "To see the datasets uploaded by a client on this node, use command `[your_client].datasets`" - ) @service_method( path="dataset.get_all", @@ -118,40 +112,33 @@ def get_all( context: AuthedServiceContext, page_size: int | None = 0, page_index: int | None = 0, - ) -> DatasetPageView | DictTuple[str, Dataset] | SyftError: + ) -> DatasetPageView | DictTuple[str, Dataset]: """Get a Dataset""" - result = self.stash.get_all(context.credentials) - if not result.is_ok(): - return SyftError(message=result.err()) - - datasets = result.ok() + datasets = self.stash.get_all_active(context.credentials).unwrap() for dataset in datasets: - if context.node is not None: - dataset.node_uid = context.node.id + if context.server is not None: + dataset.server_uid = context.server.id return _paginate_dataset_collection( datasets=datasets, page_size=page_size, page_index=page_index ) - @service_method( - path="dataset.search", name="search", roles=DATA_SCIENTIST_ROLE_LEVEL - ) + @service_method(path="dataset.search", name="search", roles=GUEST_ROLE_LEVEL) def search( self, context: AuthedServiceContext, name: str, page_size: int | None = 0, page_index: int | None = 0, - ) -> DatasetPageView | SyftError: + ) -> DatasetPageView | DictTuple[str, Dataset]: """Search a Dataset by name""" results = self.get_all(context) - if isinstance(results, SyftError): - return results - filtered_results = [ - dataset for dataset_name, dataset in results.items() if name in dataset_name + dataset + for dataset_name, dataset in results.items() + if name in dataset_name and not dataset.to_be_deleted ] return _paginate_dataset_collection( @@ -159,31 +146,27 @@ def search( ) @service_method(path="dataset.get_by_id", name="get_by_id") - def get_by_id( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: + def get_by_id(self, context: AuthedServiceContext, uid: UID) -> Dataset: """Get a Dataset""" - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - dataset = result.ok() - if context.node is not None: - dataset.node_uid = context.node.id - return dataset - return SyftError(message=result.err()) + dataset = self.stash.get_by_uid(context.credentials, uid=uid).unwrap() + + if context.server is not None: + dataset.server_uid = context.server.id + + return dataset @service_method(path="dataset.get_by_action_id", name="get_by_action_id") def get_by_action_id( self, context: AuthedServiceContext, uid: UID - ) -> list[Dataset] | SyftError: + ) -> list[Dataset]: """Get Datasets by an Action ID""" - result = self.stash.search_action_ids(context.credentials, uid=uid) - if result.is_ok(): - datasets = result.ok() - for dataset in datasets: - if context.node is not None: - dataset.node_uid = context.node.id - return datasets - return SyftError(message=result.err()) + datasets = self.stash.search_action_ids(context.credentials, uid=uid).unwrap() + + for dataset in datasets: + if context.server is not None: + dataset.server_uid = context.server.id + + return datasets @service_method( path="dataset.get_assets_by_action_id", @@ -192,33 +175,65 @@ def get_by_action_id( ) def get_assets_by_action_id( self, context: AuthedServiceContext, uid: UID - ) -> list[Asset] | SyftError: + ) -> list[Asset]: """Get Assets by an Action ID""" datasets = self.get_by_action_id(context=context, uid=uid) - assets = [] - if isinstance(datasets, list): - for dataset in datasets: - for asset in dataset.asset_list: - if asset.action_id == uid: - assets.append(asset) - return assets - elif isinstance(datasets, SyftError): - return datasets - return [] + + for dataset in datasets: + if dataset.to_be_deleted: + datasets.remove(dataset) + + return [ + asset + for dataset in datasets + for asset in dataset.asset_list + if asset.action_id == uid + ] @service_method( - path="dataset.delete_by_id", - name="dataset_delete_by_id", + path="dataset.delete", + name="delete", + roles=DATA_OWNER_ROLE_LEVEL, warning=HighSideCRUDWarning(confirmation=True), + unwrap_on_success=False, ) - def delete_dataset( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.delete_by_uid(context.credentials, uid) - if result.is_ok(): - return result.ok() - else: - return SyftError(message=result.err()) + def delete( + self, context: AuthedServiceContext, uid: UID, delete_assets: bool = True + ) -> SyftSuccess: + """ + Soft delete: keep the dataset object, only remove the blob store entries + After soft deleting a dataset, the user will not be able to + see it using the `datasets.get_all` endpoint. + Delete unique `dataset.name` key and leave UID, just rename it in case the + user upload a new dataset with the same name. + """ + # check if the dataset exists + dataset = self.get_by_id(context=context, uid=uid) + + return_msg = [] + + if delete_assets: + # delete the dataset's assets + for asset in dataset.asset_list: + msg = ( + f"ActionObject {asset.action_id} " + f"linked with Assset {asset.id} " + f"in Dataset {uid}" + ) + + context.server.services.action.delete( + context=context, uid=asset.action_id, soft_delete=True + ) + + logger.info(f"Successfully deleted {msg}") + return_msg.append(f"Asset with id '{asset.id}' successfully deleted.") + + # soft delete the dataset object from the store + dataset.name = f"_deleted_{dataset.name}_{uid}" + dataset.to_be_deleted = True + self.stash.update(context.credentials, dataset).unwrap() + return_msg.append(f"Dataset with id '{uid}' successfully deleted.") + return SyftSuccess(message="\n".join(return_msg)) TYPE_TO_SERVICE[Dataset] = DatasetService diff --git a/packages/syft/src/syft/service/dataset/dataset_stash.py b/packages/syft/src/syft/service/dataset/dataset_stash.py index ee99a4411c7..aee2a280372 100644 --- a/packages/syft/src/syft/service/dataset/dataset_stash.py +++ b/packages/syft/src/syft/service/dataset/dataset_stash.py @@ -1,56 +1,56 @@ -# stdlib - -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument from .dataset import Dataset -from .dataset import DatasetUpdate - -NamePartitionKey = PartitionKey(key="name", type_=str) -ActionIDsPartitionKey = PartitionKey(key="action_ids", type_=list[UID]) -@instrument -@serializable() -class DatasetStash(BaseUIDStoreStash): - object_type = Dataset - settings: PartitionSettings = PartitionSettings( - name=Dataset.__canonical_name__, object_type=Dataset - ) +@serializable(canonical_name="DatasetStashSQL", version=1) +class DatasetStash(ObjectStash[Dataset]): + @as_result(StashException, NotFoundException) + def get_by_name(self, credentials: SyftVerifyKey, name: str) -> Dataset: + return self.get_one(credentials=credentials, filters={"name": name}).unwrap() - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) + @as_result(StashException) + def search_action_ids(self, credentials: SyftVerifyKey, uid: UID) -> list[Dataset]: + return self.get_all_active( + credentials=credentials, + filters={"action_ids__contains": uid}, + ).unwrap() - def get_by_name( - self, credentials: SyftVerifyKey, name: str - ) -> Result[Dataset | None, str]: - qks = QueryKeys(qks=[NamePartitionKey.with_obj(name)]) - return self.query_one(credentials=credentials, qks=qks) - - def update( + @as_result(StashException) + def get_all_active( self, credentials: SyftVerifyKey, - dataset_update: DatasetUpdate, has_permission: bool = False, - ) -> Result[Dataset, str]: - res = self.check_type(dataset_update, DatasetUpdate) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().update(credentials=credentials, obj=res.ok()) - - def search_action_ids( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[list[Dataset], str]: - qks = QueryKeys(qks=[ActionIDsPartitionKey.with_obj(uid)]) - return self.query_all(credentials=credentials, qks=qks) + order_by: str | None = None, + sort_order: str | None = None, + limit: int | None = None, + offset: int | None = None, + filters: dict | None = None, + ) -> list[Dataset]: + # TODO standardize soft delete and move to ObjectStash.get_all + default_filters = {"to_be_deleted": False} + filters = filters or {} + filters.update(default_filters) + + if offset is None: + offset = 0 + + return ( + super() + .get_all( + credentials=credentials, + filters=filters, + has_permission=has_permission, + order_by=order_by, + sort_order=sort_order, + limit=limit, + offset=offset, + ) + .unwrap() + ) diff --git a/packages/syft/src/syft/service/enclave/enclave_service.py b/packages/syft/src/syft/service/enclave/enclave_service.py index 052b81efa32..064c8806f91 100644 --- a/packages/syft/src/syft/service/enclave/enclave_service.py +++ b/packages/syft/src/syft/service/enclave/enclave_service.py @@ -1,182 +1,12 @@ # stdlib # relative -from ...client.enclave_client import EnclaveClient -from ...client.enclave_client import EnclaveMetadata from ...serde.serializable import serializable -from ...service.response import SyftError -from ...service.response import SyftSuccess -from ...service.user.user_roles import GUEST_ROLE_LEVEL -from ...store.document_store import DocumentStore -from ...types.twin_object import TwinObject -from ...types.uid import UID -from ..action.action_object import ActionObject -from ..code.user_code import UserCode -from ..code.user_code import UserCodeStatus -from ..context import AuthedServiceContext -from ..context import ChangeContext -from ..network.routes import route_to_connection -from ..policy.policy import InputPolicy +from ...store.db.db import DBManager from ..service import AbstractService -from ..service import service_method -# TODO 🟣 Created a generic Enclave Service -# Currently it mainly works only for Azure -@serializable() +@serializable(canonical_name="EnclaveService", version=1) class EnclaveService(AbstractService): - store: DocumentStore - - def __init__(self, store: DocumentStore) -> None: - self.store = store - - @service_method( - path="enclave.send_user_code_inputs_to_enclave", - name="send_user_code_inputs_to_enclave", - roles=GUEST_ROLE_LEVEL, - ) - def send_user_code_inputs_to_enclave( - self, - context: AuthedServiceContext, - user_code_id: UID, - inputs: dict, - node_name: str, - node_id: UID, - ) -> SyftSuccess | SyftError: - if not context.node or not context.node.signing_key: - return SyftError(message=f"{type(context)} has no node") - - root_context = AuthedServiceContext( - credentials=context.node.verify_key, node=context.node - ) - - user_code_service = context.node.get_service("usercodeservice") - action_service = context.node.get_service("actionservice") - user_code = user_code_service.get_by_uid(context=root_context, uid=user_code_id) - if isinstance(user_code, SyftError): - return user_code - - reason: str = context.extra_kwargs.get("reason", "") - status_update = user_code.get_status(root_context).mutate( - value=(UserCodeStatus.APPROVED, reason), - node_name=node_name, - node_id=node_id, - verify_key=context.credentials, - ) - if isinstance(status_update, SyftError): - return status_update - - res = user_code.status_link.update_with_context(root_context, status_update) - if isinstance(res, SyftError): - return res - - root_context = context.as_root_context() - if not action_service.exists(context=context, obj_id=user_code_id): - dict_object = ActionObject.from_obj({}) - dict_object.id = user_code_id - dict_object[str(context.credentials)] = inputs - root_context.extra_kwargs = {"has_result_read_permission": True} - # TODO: Instead of using the action store, modify to - # use the action service directly to store objects - action_service.set(root_context, dict_object) - - else: - res = action_service.get(uid=user_code_id, context=root_context) - if res.is_ok(): - dict_object = res.ok() - dict_object[str(context.credentials)] = inputs - action_service.set(root_context, dict_object) - else: - return SyftError( - message=f"Error while fetching the object on Enclave: {res.err()}" - ) - - return SyftSuccess(message="Enclave Code Status Updated Successfully") - - -def get_oblv_service() -> type[AbstractService] | SyftError: - # relative - from ...external import OBLV_ENABLED - - if OBLV_ENABLED: - # relative - from ...external.oblv.oblv_service import OblvService - - return OblvService - else: - return SyftError( - message="Oblivious is not enabled." - "To enable oblivious package, set sy.enable_external_lib('oblv') " - "on the client side" - "Or add --oblv when launching by hagrid" - ) - - -# Checks if the given user code would propogate value to enclave on acceptance -def propagate_inputs_to_enclave( - user_code: UserCode, context: ChangeContext -) -> SyftSuccess | SyftError: - # Temporarily disable Oblivious Enclave - # from ...external.oblv.deployment_client import OblvMetadata - - # if isinstance(user_code.enclave_metadata, OblvMetadata): - # # relative - # oblv_service_class = get_oblv_service() - # if isinstance(oblv_service_class, SyftError): - # return oblv_service_class - # method = context.node.get_service_method(oblv_service_class.get_api_for) - - # api = method( - # user_code.enclave_metadata, - # context.node.signing_key, - # worker_name=context.node.name, - # ) - # send_method = api.services.oblv.send_user_code_inputs_to_enclave - if context.node is None: - return SyftError(message=f"context {context}'s node is None") - - if isinstance(user_code.enclave_metadata, EnclaveMetadata): - # TODO 🟣 Restructure url it work for local mode host.docker.internal - - connection = route_to_connection(user_code.enclave_metadata.route) - enclave_client = EnclaveClient( - connection=connection, - credentials=context.node.signing_key, - ) - - send_method = ( - enclave_client.api.services.enclave.send_user_code_inputs_to_enclave - ) - - else: - return SyftSuccess(message="Current Request does not require Enclave Transfer") - - input_policy: InputPolicy | None = user_code.get_input_policy( - context.to_service_ctx() - ) - if input_policy is None: - return SyftError(message=f"{user_code}'s input policy is None") - inputs = input_policy._inputs_for_context(context) - if isinstance(inputs, SyftError): - return inputs - - # Save inputs to blob store - for var_name, var_value in inputs.items(): - if isinstance(var_value, ActionObject | TwinObject): - # Set the obj location to enclave - var_value._set_obj_location_( - enclave_client.api.node_uid, - enclave_client.verify_key, - ) - var_value._save_to_blob_storage() - - inputs[var_name] = var_value - - # send data of the current node to enclave - res = send_method( - user_code_id=user_code.id, - inputs=inputs, - node_name=context.node.name, - node_id=context.node.id, - ) - return res + def __init__(self, store: DBManager) -> None: + pass diff --git a/packages/syft/src/syft/service/job/html_template.py b/packages/syft/src/syft/service/job/html_template.py new file mode 100644 index 00000000000..5ce2cfd2dd6 --- /dev/null +++ b/packages/syft/src/syft/service/job/html_template.py @@ -0,0 +1,204 @@ +# relative +from ...util.notebook_ui.styles import CSS_CODE +from ...util.notebook_ui.styles import JS_DOWNLOAD_FONTS + +type_html = """ +
    + + ${job_type} +
    +""" + +header_line_html = ( + """ +
    +
    ${api_header}
    +
    +
    +
    """ + + type_html + + """ + ${user_code_name} +
    + ${button_html} +
    +
    +""" +) # noqa: E501 + +attrs_html = """
    +
    + UserCode: + ${user_code_name} +
    +
    + Status: + ${status} +
    +
    + + Started At: + ${creation_time} by ${user_repr} +
    +
    + + Updated At: + ${updated_at} +
    + ${worker_attr} +
    + Subjobs: + ${no_subjobs} +
    +
    +
    +""" + +logs_html = """ + + +
    +
    +        ${logs_lines_html}
    +    
    +
    +""" + +# TODO: add style change for selected tab +onclick_html = """ +""" + + +tabs_html = """ +
    +
    + + +
    +
    +""" + +result_html = """
    +
    + ${result} +
    +
    +""" + +job_repr_template = f""" + +
    + + +{JS_DOWNLOAD_FONTS} + + + +{CSS_CODE} + + + +{header_line_html} + + + +{attrs_html} + + + +{tabs_html} + + + +{result_html} + + + +{logs_html} + + + +{onclick_html} + + +
    +
    + +""" diff --git a/packages/syft/src/syft/service/job/job_service.py b/packages/syft/src/syft/service/job/job_service.py index 4f22c69b34a..5cff02ecb74 100644 --- a/packages/syft/src/syft/service/job/job_service.py +++ b/packages/syft/src/syft/service/job/job_service.py @@ -1,21 +1,20 @@ # stdlib -from typing import Any -from typing import cast +from collections.abc import Callable +import inspect +import time # relative -from ...abstract_node import AbstractNode -from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...server.worker_settings import WorkerSettings +from ...store.db.db import DBManager +from ...types.errors import SyftException from ...types.uid import UID -from ...util.telemetry import instrument +from ..action.action_object import ActionObject from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from ..code.user_code import UserCode from ..context import AuthedServiceContext -from ..log.log_service import LogService from ..queue.queue_stash import ActionQueueItem -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import TYPE_TO_SERVICE @@ -29,14 +28,21 @@ from .job_stash import JobStatus -@instrument -@serializable() +def wait_until(predicate: Callable[[], bool], timeout: int = 10) -> SyftSuccess: + start = time.time() + code_string = inspect.getsource(predicate).strip() + while time.time() - start < timeout: + if predicate(): + return SyftSuccess(message=f"Predicate {code_string} is True") + time.sleep(1) + raise SyftException(public_message=f"Timeout reached for predicate {code_string}") + + +@serializable(canonical_name="JobService", version=1) class JobService(AbstractService): - store: DocumentStore stash: JobStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = JobStash(store=store) @service_method( @@ -44,25 +50,12 @@ def __init__(self, store: DocumentStore) -> None: name="get", roles=GUEST_ROLE_LEVEL, ) - def get(self, context: AuthedServiceContext, uid: UID) -> list[Job] | SyftError: - res = self.stash.get_by_uid(context.credentials, uid=uid) - if res.is_err(): - return SyftError(message=res.err()) - else: - res = res.ok() - return res + def get(self, context: AuthedServiceContext, uid: UID) -> Job: + return self.stash.get_by_uid(context.credentials, uid=uid).unwrap() - @service_method( - path="job.get_all", - name="get_all", - ) - def get_all(self, context: AuthedServiceContext) -> list[Job] | SyftError: - res = self.stash.get_all(context.credentials) - if res.is_err(): - return SyftError(message=res.err()) - else: - res = res.ok() - return res + @service_method(path="job.get_all", name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL) + def get_all(self, context: AuthedServiceContext) -> list[Job]: + return self.stash.get_all(context.credentials).unwrap() @service_method( path="job.get_by_user_code_id", @@ -71,66 +64,74 @@ def get_all(self, context: AuthedServiceContext) -> list[Job] | SyftError: ) def get_by_user_code_id( self, context: AuthedServiceContext, user_code_id: UID - ) -> list[Job] | SyftError: - res = self.stash.get_by_user_code_id(context.credentials, user_code_id) - if res.is_err(): - return SyftError(message=res.err()) - - res = res.ok() - return res + ) -> list[Job]: + return self.stash.get_by_user_code_id( + context.credentials, user_code_id + ).unwrap() @service_method( path="job.delete", name="delete", roles=ADMIN_ROLE_LEVEL, ) - def delete( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - res = self.stash.delete_by_uid(context.credentials, uid) - if res.is_err(): - return SyftError(message=res.err()) + def delete(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + self.stash.delete_by_uid(context.credentials, uid).unwrap() return SyftSuccess(message="Great Success!") + @service_method( + path="job.get_by_result_id", + name="get_by_result_id", + roles=ADMIN_ROLE_LEVEL, + ) + def get_by_result_id(self, context: AuthedServiceContext, result_id: UID) -> Job: + return self.stash.get_by_result_id(context.credentials, result_id).unwrap() + @service_method( path="job.restart", name="restart", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def restart( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - res = self.stash.get_by_uid(context.credentials, uid=uid) - if res.is_err(): - return SyftError(message=res.err()) + def restart(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + job = self.stash.get_by_uid(context.credentials, uid=uid).unwrap() - context.node = cast(AbstractNode, context.node) + if job.parent_job_id is not None: + raise SyftException( + public_message="Not possible to restart subjobs. Please restart the parent job." + ) + if job.status == JobStatus.PROCESSING: + raise SyftException( + public_message="Jobs in progress cannot be restarted. " + "Please wait for completion or cancel the job via .cancel() to proceed." + ) - job = res.ok() job.status = JobStatus.CREATED - self.update(context=context, job=job) + self.update(context=context, job=job).unwrap() task_uid = UID() - worker_settings = WorkerSettings.from_node(context.node) + worker_settings = WorkerSettings.from_server(context.server) + # TODO, fix return type of get_worker_pool_ref_by_name + worker_pool_ref = context.server.get_worker_pool_ref_by_name( + context.credentials + ) queue_item = ActionQueueItem( id=task_uid, - node_uid=context.node.id, + server_uid=context.server.id, syft_client_verify_key=context.credentials, - syft_node_location=context.node.id, + syft_server_location=context.server.id, job_id=job.id, worker_settings=worker_settings, args=[], kwargs={"action": job.action}, + worker_pool=worker_pool_ref, ) - context.node.queue_stash.set_placeholder(context.credentials, queue_item) - context.node.job_stash.set(context.credentials, job) + context.server.queue_stash.set_placeholder( + context.credentials, queue_item + ).unwrap() - log_service = context.node.get_service("logservice") - result = log_service.restart(context, job.log_id) - if result.is_err(): - return SyftError(message=str(result.err())) + self.stash.set(context.credentials, job).unwrap() + context.server.services.log.restart(context, job.log_id) return SyftSuccess(message="Great Success!") @@ -139,60 +140,69 @@ def restart( name="update", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def update( - self, context: AuthedServiceContext, job: Job - ) -> SyftSuccess | SyftError: - res = self.stash.update(context.credentials, obj=job) - if res.is_err(): - return SyftError(message=res.err()) - res = res.ok() - return SyftSuccess(message="Great Success!") + def update(self, context: AuthedServiceContext, job: Job) -> SyftSuccess: + res = self.stash.update(context.credentials, obj=job).unwrap() + return SyftSuccess(message="Job updated!", value=res) + + def _kill(self, context: AuthedServiceContext, job: Job) -> SyftSuccess: + # set job and subjobs status to TERMINATING + # so that MonitorThread can kill them + job.status = JobStatus.TERMINATING + res = self.stash.update(context.credentials, obj=job).unwrap() + results = [res] + + # attempt to kill all subjobs + subjobs = self.stash.get_by_parent_id(context.credentials, uid=job.id).unwrap() + if subjobs is not None: + for subjob in subjobs: + subjob.status = JobStatus.TERMINATING + res = self.stash.update(context.credentials, obj=subjob).unwrap() + results.append(res) + + # wait for job and subjobs to be killed by MonitorThread + wait_until(lambda: job.fetched_status == JobStatus.INTERRUPTED) + wait_until( + lambda: all( + subjob.fetched_status == JobStatus.INTERRUPTED for subjob in job.subjobs + ) + ) + + return SyftSuccess(message="Job killed successfully!") @service_method( path="job.kill", name="kill", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def kill(self, context: AuthedServiceContext, id: UID) -> SyftSuccess | SyftError: - res = self.stash.get_by_uid(context.credentials, uid=id) - if res.is_err(): - return SyftError(message=res.err()) - - job = res.ok() - if job.job_pid is not None and job.status == JobStatus.PROCESSING: - job.status = JobStatus.INTERRUPTED - res = self.stash.update(context.credentials, obj=job) - if res.is_err(): - return SyftError(message=res.err()) - return SyftSuccess(message="Job killed successfully!") - else: - return SyftError( - message="Job is not running or isn't running in multiprocessing mode." - "Killing threads is currently not supported" + def kill(self, context: AuthedServiceContext, id: UID) -> SyftSuccess: + job = self.stash.get_by_uid(context.credentials, uid=id).unwrap() + if job.parent_job_id is not None: + raise SyftException( + public_message="Not possible to cancel subjobs. To stop execution, please cancel the parent job." + ) + if job.status != JobStatus.PROCESSING: + raise SyftException(public_message="Job is not running") + if job.job_pid is None: + raise SyftException( + public_message="Job termination disabled in dev mode. " + "Set 'dev_mode=False' or 'thread_workers=False' to enable." ) + return self._kill(context, job) + @service_method( path="job.get_subjobs", name="get_subjobs", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def get_subjobs( - self, context: AuthedServiceContext, uid: UID - ) -> list[Job] | SyftError: - res = self.stash.get_by_parent_id(context.credentials, uid=uid) - if res.is_err(): - return SyftError(message=res.err()) - else: - return res.ok() + def get_subjobs(self, context: AuthedServiceContext, uid: UID) -> list[Job]: + return self.stash.get_by_parent_id(context.credentials, uid=uid).unwrap() @service_method( path="job.get_active", name="get_active", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def get_active(self, context: AuthedServiceContext) -> list[Job] | SyftError: - res = self.stash.get_active(context.credentials) - if res.is_err(): - return SyftError(message=res.err()) - return res.ok() + def get_active(self, context: AuthedServiceContext) -> list[Job]: + return self.stash.get_active(context.credentials).unwrap() @service_method( path="job.add_read_permission_job_for_code_owner", @@ -205,7 +215,8 @@ def add_read_permission_job_for_code_owner( permission = ActionObjectPermission( job.id, ActionPermission.READ, user_code.user_verify_key ) - return self.stash.add_permission(permission=permission) + # TODO: make add_permission wrappable + return self.stash.add_permission(permission=permission).unwrap() @service_method( path="job.add_read_permission_log_for_code_owner", @@ -214,15 +225,12 @@ def add_read_permission_job_for_code_owner( ) def add_read_permission_log_for_code_owner( self, context: AuthedServiceContext, log_id: UID, user_code: UserCode - ) -> Any: - context.node = cast(AbstractNode, context.node) - log_service = context.node.get_service("logservice") - log_service = cast(LogService, log_service) - return log_service.stash.add_permission( + ) -> None: + return context.server.services.log.stash.add_permission( ActionObjectPermission( log_id, ActionPermission.READ, user_code.user_verify_key ) - ) + ).unwrap() @service_method( path="job.create_job_for_user_code_id", @@ -230,39 +238,45 @@ def add_read_permission_log_for_code_owner( roles=DATA_OWNER_ROLE_LEVEL, ) def create_job_for_user_code_id( - self, context: AuthedServiceContext, user_code_id: UID - ) -> Job | SyftError: - context.node = cast(AbstractNode, context.node) + self, + context: AuthedServiceContext, + user_code_id: UID, + result: ActionObject | None = None, + log_stdout: str = "", + log_stderr: str = "", + status: JobStatus = JobStatus.CREATED, + add_code_owner_read_permissions: bool = True, + ) -> Job: + is_resolved = status in [JobStatus.COMPLETED, JobStatus.ERRORED] job = Job( id=UID(), - node_uid=context.node.id, + server_uid=context.server.id, action=None, - result_id=None, + result=result, + status=status, parent_id=None, log_id=UID(), job_pid=None, user_code_id=user_code_id, + resolved=is_resolved, + ) + user_code = context.server.services.user_code.get_by_uid( + context=context, uid=user_code_id ) - user_code_service = context.node.get_service("usercodeservice") - user_code = user_code_service.get_by_uid(context=context, uid=user_code_id) - if isinstance(user_code, SyftError): - return user_code # The owner of the code should be able to read the job - self.stash.set(context.credentials, job) - self.add_read_permission_job_for_code_owner(context, job, user_code) - - log_service = context.node.get_service("logservice") - res = log_service.add(context, job.log_id) - if isinstance(res, SyftError): - return res - # The owner of the code should be able to read the job log - self.add_read_permission_log_for_code_owner(context, job.log_id, user_code) - # log_service.stash.add_permission( - # ActionObjectPermission( - # job.log_id, ActionPermission.READ, user_code.user_verify_key - # ) - # ) + self.stash.set(context.credentials, job).unwrap() + context.server.services.log.add( + context, + job.log_id, + job.id, + stdout=log_stdout, + stderr=log_stderr, + ) + + if add_code_owner_read_permissions: + self.add_read_permission_job_for_code_owner(context, job, user_code) + self.add_read_permission_log_for_code_owner(context, job.log_id, user_code) return job diff --git a/packages/syft/src/syft/service/job/job_stash.py b/packages/syft/src/syft/service/job/job_stash.py index b9b832bcbe8..36cb42de1c4 100644 --- a/packages/syft/src/syft/service/job/job_stash.py +++ b/packages/syft/src/syft/service/job/job_stash.py @@ -1,89 +1,142 @@ # stdlib from datetime import datetime from datetime import timedelta +from datetime import timezone from enum import Enum +import random +from string import Template +from time import sleep from typing import Any # third party -from pydantic import field_validator +from pydantic import Field from pydantic import model_validator -from result import Err -from result import Ok -from result import Result from typing_extensions import Self # relative -from ...client.api import APIRegistry from ...client.api import SyftAPICall -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...service.queue.queue_stash import QueueItem +from ...server.credentials import SyftVerifyKey +from ...service.context import AuthedServiceContext from ...service.worker.worker_pool import SyftWorker -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...store.document_store import UIDPartitionKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...types.datetime import DateTime +from ...types.datetime import format_timedelta +from ...types.errors import SyftException +from ...types.result import Err +from ...types.result import as_result +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_4 from ...types.syft_object import SyftObject -from ...types.syft_object import short_uid from ...types.syncable_object import SyncableSyftObject from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE from ...util.markdown import as_markdown_code -from ...util.telemetry import instrument -from ..action.action_data_empty import ActionDataLink +from ...util.util import prompt_warning_message from ..action.action_object import Action from ..action.action_object import ActionObject from ..action.action_permissions import ActionObjectPermission +from ..log.log import SyftLog from ..response import SyftError from ..response import SyftNotReady from ..response import SyftSuccess from ..user.user import UserView +from .html_template import job_repr_template -@serializable() +@serializable(canonical_name="JobStatus", version=1) class JobStatus(str, Enum): CREATED = "created" PROCESSING = "processing" ERRORED = "errored" COMPLETED = "completed" + TERMINATING = "terminating" INTERRUPTED = "interrupted" +def center_content(text: Any) -> str: + if isinstance(text, str): + text = text.replace("\n", "
    ") + center_div = f""" +
    + {text} +
    + """ + center_div = center_div.replace("\n", "") + return center_div + + +@serializable(canonical_name="JobType", version=1) +class JobType(str, Enum): + JOB = "job" + TWINAPIJOB = "twinapijob" + + def __str__(self) -> str: + return self.value + + @serializable() class Job(SyncableSyftObject): __canonical_name__ = "JobItem" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_2 id: UID - node_uid: UID - result: Any | None = None + server_uid: UID + result: Any | None = ( + None # Currently result can either have the error or the result + ) + # we should split this out into two different fields resolved: bool = False status: JobStatus = JobStatus.CREATED log_id: UID | None = None parent_job_id: UID | None = None n_iters: int | None = 0 current_iter: int | None = None - creation_time: str | None = None + creation_time: str | None = Field( + default_factory=lambda: str(datetime.now(tz=timezone.utc)) + ) action: Action | None = None job_pid: int | None = None job_worker_id: UID | None = None updated_at: DateTime | None = None user_code_id: UID | None = None + requested_by: UID | None = None + job_type: JobType = JobType.JOB + # used by JobType.TWINAPIJOB + endpoint: str | None = None + + __attr_searchable__ = [ + "parent_job_id", + "job_worker_id", + "status", + "user_code_id", + "result_id", + ] - __attr_searchable__ = ["parent_job_id", "job_worker_id", "status", "user_code_id"] - __repr_attrs__ = ["id", "result", "resolved", "progress", "creation_time"] - __exclude_sync_diff_attrs__ = ["action"] + __repr_attrs__ = [ + "id", + "result", + "resolved", + "progress", + "creation_time", + "user_code_name", + ] - @field_validator("creation_time") - @classmethod - def check_time(cls, time: Any) -> Any: - return str(datetime.now()) if time is None else time + __exclude_sync_diff_attrs__ = ["action", "server_uid"] + __table_coll_widths__ = [ + "min-content", + "auto", + "auto", + "auto", + "auto", + "auto", + "auto", + ] + __syft_include_id_coll_repr__ = False @model_validator(mode="after") def check_user_code_id(self) -> Self: @@ -98,16 +151,34 @@ def check_user_code_id(self) -> Self: return self + @property + def result_id(self) -> UID | None: + if isinstance(self.result, ActionObject): + return self.result.id.id + return None + @property def action_display_name(self) -> str: if self.action is None: return "action" else: # hacky - self.action.syft_node_location = self.syft_node_location + self.action.syft_server_location = self.syft_server_location self.action.syft_client_verify_key = self.syft_client_verify_key return self.action.job_display_name + @property + def user_code_name(self) -> str | None: + if self.user_code_id is not None: + api = self.get_api_wrapped() + if api.is_err(): + return None + else: + api = api.unwrap() + user_code = api.services.code.get_by_id(self.user_code_id) + return user_code.service_func_name + return None + @property def time_remaining_string(self) -> str | None: # update state @@ -126,15 +197,8 @@ def time_remaining_string(self) -> str | None: return None @property - def worker(self) -> SyftWorker | SyftError: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError( - message=f"Can't access Syft API. You must login to {self.syft_node_location}" - ) + def worker(self) -> SyftWorker: + api = self.get_api() return api.services.worker.get(self.job_worker_id) @property @@ -147,18 +211,7 @@ def eta_string(self) -> str | None: ): return None - def format_timedelta(local_timedelta: timedelta) -> str: - total_seconds = int(local_timedelta.total_seconds()) - hours, leftover = divmod(total_seconds, 3600) - minutes, seconds = divmod(leftover, 60) - - hours_string = f"{hours}:" if hours != 0 else "" - minutes_string = f"{minutes}:".zfill(3) - seconds_string = f"{seconds}".zfill(2) - - return f"{hours_string}{minutes_string}{seconds_string}" - - now = datetime.now() + now = datetime.now(tz=timezone.utc) time_passed = now - datetime.fromisoformat(self.creation_time) iter_duration_seconds: float = time_passed.total_seconds() / self.current_iter iters_remaining = self.n_iters - self.current_iter @@ -211,80 +264,34 @@ def apply_info(self, info: "JobInfo") -> None: self.result = info.result def restart(self, kill: bool = False) -> None: - if kill: - self.kill() + api = self.get_api() + call = SyftAPICall( + server_uid=self.server_uid, + path="job.restart", + args=[], + kwargs={"uid": self.id}, + blocking=True, + ) + res = api.make_call(call) self.fetch() - if not self.has_parent: - # this is currently the limitation, we will need to implement - # killing toplevel jobs later - print("Can only kill nested jobs") - elif kill or ( - self.status != JobStatus.PROCESSING and self.status != JobStatus.CREATED - ): - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - raise ValueError( - f"Can't access Syft API. You must login to {self.syft_node_location}" - ) - call = SyftAPICall( - node_uid=self.node_uid, - path="job.restart", - args=[], - kwargs={"uid": self.id}, - blocking=True, - ) + return res - api.make_call(call) - else: - print( - "Job is running or scheduled, if you want to kill it use job.kill() first" - ) - return None - - def kill(self) -> SyftError | None: - if self.job_pid is not None: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError( - message=f"Can't access Syft API. You must login to {self.syft_node_location}" - ) - call = SyftAPICall( - node_uid=self.node_uid, - path="job.kill", - args=[], - kwargs={"id": self.id}, - blocking=True, - ) - api.make_call(call) - return None - else: - return SyftError( - message="Job is not running or isn't running in multiprocessing mode." - ) - - def fetch(self) -> None: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - raise ValueError( - f"Can't access Syft API. You must login to {self.syft_node_location}" - ) + def kill(self) -> SyftSuccess: + api = self.get_api() call = SyftAPICall( - node_uid=self.node_uid, - path="job.get", + server_uid=self.server_uid, + path="job.kill", args=[], - kwargs={"uid": self.id}, + kwargs={"id": self.id}, blocking=True, ) - job: Job = api.make_call(call) + res = api.make_call(call) + self.fetch() + return res + + def fetch(self) -> None: + api = self.get_api() + job = api.job.get(self.id) self.resolved = job.resolved if job.resolved: self.result = job.result @@ -294,59 +301,44 @@ def fetch(self) -> None: self.current_iter = job.current_iter @property - def subjobs(self) -> list[QueueItem] | SyftError: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError( - message=f"Can't access Syft API. You must login to {self.syft_node_location}" - ) + def subjobs(self) -> list["Job"]: + api = self.get_api() return api.services.job.get_subjobs(self.id) + def get_subjobs(self, context: AuthedServiceContext) -> list["Job"]: + return context.server.services.job.get_subjobs(context, self.id) + @property - def owner(self) -> UserView | SyftError: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError( - message=f"Can't access Syft API. You must login to {self.syft_node_location}" - ) - return api.services.user.get_current_user(self.id) + def owner(self) -> UserView: + return self.get_api().services.user.get_current_user(self.id) - def _get_log_objs(self) -> SyftObject | SyftError: - api = APIRegistry.api_for( - node_uid=self.node_uid, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - raise ValueError(f"api is None. You must login to {self.node_uid}") - return api.services.log.get(self.log_id) + def _get_log_objs(self) -> SyftLog: + return self.get_api().services.log.get(self.log_id) def logs( self, stdout: bool = True, stderr: bool = True, _print: bool = True ) -> str | None: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return f"Can't access Syft API. You must login to {self.syft_node_location}" + api = self.get_api() + + has_permissions = True + results = [] if stdout: stdout_log = api.services.log.get_stdout(self.log_id) if isinstance(stdout_log, SyftError): results.append(f"Log {self.log_id} not available") + has_permissions = False else: results.append(stdout_log) if stderr: try: - std_err_log = api.services.log.get_error(self.log_id) - results.append(std_err_log) + stderr_log = api.services.log.get_stderr(self.log_id) + if isinstance(stderr_log, SyftError): + results.append(f"Error log {self.log_id} not available") + has_permissions = False + else: + results.append(stderr_log) except Exception: # no access if isinstance(self.result, Err): @@ -356,6 +348,15 @@ def logs( if isinstance(self.result, Err): results.append(self.result.value) + if has_permissions: + has_storage_permission = api.services.log.has_storage_permission( + self.log_id + ) + if not has_storage_permission: + prompt_warning_message( + message="This is a placeholder object, the real data lives on a different server and is not synced." + ) + results_str = "\n".join(results) if not _print: return results_str @@ -366,29 +367,78 @@ def logs( # def __repr__(self) -> str: # return f": {self.status}" + def status_badge(self) -> dict[str, str]: + status = self.status + if status in [JobStatus.COMPLETED]: + badge_color = "label-green" + elif status in [JobStatus.PROCESSING]: + badge_color = "label-orange" + elif status in [JobStatus.CREATED]: + badge_color = "label-gray" + elif status in [JobStatus.ERRORED, JobStatus.INTERRUPTED]: + badge_color = "label-red" + else: + badge_color = "label-orange" + return {"value": status.upper(), "type": badge_color} + + def summary_html(self) -> str: + # TODO: Fix id for buttons + # relative + from ...util.notebook_ui.components.sync import CopyIDButton + + try: + # type_html = f'
    {self.object_type_name.upper()}
    ' + job_name = self.user_code_name or self.endpoint or "Job" + description_html = f"{job_name}" + worker_summary = "" + if self.job_worker_id: + worker_copy_button = CopyIDButton( + copy_text=str(self.job_worker_id), max_width=60 + ) + worker_summary = f""" +
    + {'on worker'} + {worker_copy_button.to_html()} +
    + """ + + summary_html = f""" +
    + {description_html} +
    + {CopyIDButton(copy_text=str(self.id), max_width=60).to_html()} +
    +
    +
    + {self.creation_time[:-7] if self.creation_time else ''} +
    + {worker_summary} + """ + summary_html = summary_html.replace("\n", "") + except Exception as e: + print("Failed to build table", e) + raise + return summary_html + def _coll_repr_(self) -> dict[str, Any]: - logs = self.logs(_print=False, stderr=False) - if logs is not None: - log_lines = logs.split("\n") + # [Note]: Disable logs in table, to improve performance + # logs = self.logs(_print=False, stderr=False) + # if logs is not None: + # log_lines = logs.split("\n") + # if len(log_lines) > 2: + # logs = f"... ({len(log_lines)} lines)\n" + "\n".join(log_lines[-2:]) + subjobs = self.subjobs - if len(log_lines) > 2: - logs = f"... ({len(log_lines)} lines)\n" + "\n".join(log_lines[-2:]) - created_time = self.creation_time[:-7] if self.creation_time is not None else "" + def default_value(value: str) -> str: + return value if value else "--" + return { - "status": f"{self.action_display_name}: {self.status}" - + ( - f"\non worker {short_uid(self.job_worker_id)}" - if self.job_worker_id - else "" - ), - "progress": self.progress, - "eta": self.eta_string, - "created": f"{created_time} by {self.owner.email}", - "logs": logs, - # "result": result, - # "parent_id": str(self.parent_job_id) if self.parent_job_id else "-", - "subjobs": len(subjobs), + "Status": self.status_badge(), + "Job": self.summary_html(), + "# Subjobs": default_value(len(subjobs)), + "Progress": default_value(self.progress), + "ETA": default_value(self.eta_string), } @property @@ -417,49 +467,151 @@ def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: """ return as_markdown_code(md) + @property + def fetched_status(self) -> JobStatus: + self.fetch() + return self.status + + @property + def requesting_user(self) -> UserView | None: + try: + return self.get_api().services.user.view(self.requested_by) + except SyftException: + return None + + @property + def server_name(self) -> str | None: + return self.get_api().server_name + + @property + def parent(self) -> Self: + return self.get_api().services.job.get(self.parent_job_id) + + @property + def ancestors_name_list(self) -> list[str]: + if self.parent_job_id: + parent = self.parent + parent_name_list = parent.ancestors_name_list + parent_name_list.append(parent.user_code_name) + return parent_name_list + return [] + + def _repr_html_(self) -> str: + # relative + from ...util.notebook_ui.components.sync import CopyIDButton + + identifier = random.randint(1, 2**32) # nosec + result_tab_id = f"Result_{identifier}" + logs_tab_id = f"Logs_{identifier}" + job_type = "JOB" if not self.parent_job_id else "SUBJOB" + ancestor_name_list = self.ancestors_name_list + api_header = f"{self.server_name}/jobs/" + "/".join(ancestor_name_list) + copy_id_button = CopyIDButton(copy_text=str(self.id), max_width=60) + button_html = copy_id_button.to_html() + creation_time = self.creation_time[:-7] if self.creation_time else "--" + updated_at = str(self.updated_at)[:-7] if self.updated_at else "--" + + user_repr = "--" + if self.requested_by and (requesting_user := self.requesting_user) is not None: + user_repr = f"{requesting_user.name} {requesting_user.email}" + + worker_attr = "" + if self.job_worker_id: + try: + worker = self.worker + except SyftException: + worker = None + if worker is not None: + worker_pool_id_button = CopyIDButton( + copy_text=str(worker.worker_pool_name), max_width=60 + ) + worker_attr = f""" +
    + + Worker Pool: + {worker.name} on worker {worker_pool_id_button.to_html()} +
    + """ + + logs = self.logs(_print=False) + logs_lines = logs.strip().split("\n") if logs else [] + logs_lines.insert(0, "Message") + + logs_lines = [f"{line}" for line in logs_lines] + logs_lines_html = "\n".join(logs_lines) + + template = Template(job_repr_template) + return template.substitute( + job_type=job_type, + api_header=api_header, + user_code_name=self.user_code_name, + button_html=button_html, + status=self.status.value.title(), + creation_time=creation_time, + updated_at=updated_at, + worker_attr=worker_attr, + no_subjobs=len(self.subjobs), + logs_tab_id=logs_tab_id, + result_tab_id=result_tab_id, + identifier=identifier, + logs_lines_html=logs_lines_html, + result=self.result, + user_repr=user_repr, + ) + def wait( self, job_only: bool = False, timeout: int | None = None ) -> Any | SyftNotReady: - # stdlib - from time import sleep - - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) + self.fetch() if self.resolved: return self.resolve - - if not job_only and self.result is not None: - self.result.wait() - - if api is None: - raise ValueError( - f"Can't access Syft API. You must login to {self.syft_node_location}" + api = self.get_api() + workers = api.services.worker.get_all() + if len(workers) == 0: + raise SyftException( + public_message=f"Server {self.syft_server_location} has no workers. " + f"You need to start a worker to run jobs " + f"by setting n_consumers > 0." ) + print_warning = True counter = 0 while True: self.fetch() + if self.resolved: + if isinstance(self.result, SyftError | Err) or self.status in [ # type: ignore[unreachable] + JobStatus.ERRORED, + JobStatus.INTERRUPTED, + ]: + return self.result + break if print_warning and self.result is not None: - result_obj = api.services.action.get( + result_obj = api.services.action.get( # type: ignore[unreachable] self.result.id, resolve_nested=False ) - if isinstance(result_obj.syft_action_data, ActionDataLink) and job_only: + if isinstance(result_obj, SyftError | Err): + return result_obj + if result_obj.is_link and job_only: # type: ignore[unreachable] print( "You're trying to wait on a job that has a link as a result." "This means that the job may be ready but the linked result may not." "Use job.wait().get() instead to wait for the linked result." ) print_warning = False + sleep(1) - if self.resolved: - break # type: ignore[unreachable] - # TODO: fix the mypy issue + if timeout is not None: counter += 1 if counter > timeout: - return SyftError(message="Reached Timeout!") + raise SyftException(public_message="Reached Timeout!") + + # if self.resolve returns self.result as error, then we + # raise SyftException and not wait for the result + # otherwise if a job is resolved and not errored out, we wait for the result + if not job_only and self.result is not None: # type: ignore[unreachable] + self.result.wait(timeout) + return self.resolve # type: ignore[unreachable] @property @@ -471,27 +623,36 @@ def resolve(self) -> Any | SyftNotReady: return self.result return SyftNotReady(message=f"{self.id} not ready yet.") - def get_sync_dependencies(self, **kwargs: dict) -> list[UID]: # type: ignore + def get_sync_dependencies(self, context: AuthedServiceContext) -> list[UID]: # type: ignore dependencies = [] - if self.result is not None: + if self.result is not None and isinstance(self.result, ActionObject): dependencies.append(self.result.id.id) if self.log_id: dependencies.append(self.log_id) - subjob_ids = [subjob.id for subjob in self.subjobs] + subjobs = self.get_subjobs(context) + subjob_ids = [subjob.id for subjob in subjobs] dependencies.extend(subjob_ids) if self.user_code_id is not None: dependencies.append(self.user_code_id) + try: + output = context.server.services.output.get_by_job_id( # type: ignore + context, self.id + ) + if output is not None: + dependencies.append(output.id) + except NotFoundException: + pass + return dependencies -@serializable() class JobInfo(SyftObject): __canonical_name__ = "JobInfo" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = [ "resolved", @@ -513,6 +674,7 @@ class JobInfo(SyftObject): includes_result: bool # TODO add logs (error reporting PRD) + user_code_id: UID | None = None resolved: bool | None = None status: JobStatus | None = None n_iters: int | None = None @@ -537,9 +699,6 @@ def _repr_html_(self) -> str: result_str += "

    No result included

    " return f""" -

    JobInfo

    {metadata_str} @@ -557,6 +716,7 @@ def from_job( info = cls( includes_metadata=metadata, includes_result=result, + user_code_id=job.user_code_id, ) if metadata: @@ -573,92 +733,56 @@ def from_job( return info -@instrument -@serializable() -class JobStash(BaseStash): - object_type = Job - settings: PartitionSettings = PartitionSettings( - name=Job.__canonical_name__, object_type=Job - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - +@serializable(canonical_name="JobStashSQL", version=1) +class JobStash(ObjectStash[Job]): + @as_result(StashException) def set_result( self, credentials: SyftVerifyKey, item: Job, add_permissions: list[ActionObjectPermission] | None = None, - ) -> Result[Job | None, str]: - valid = self.check_type(item, self.object_type) - if valid.is_err(): - return SyftError(message=valid.err()) - return super().update(credentials, item, add_permissions) - - def set_placeholder( - self, - credentials: SyftVerifyKey, - item: Job, - add_permissions: list[ActionObjectPermission] | None = None, - ) -> Result[Job, str]: - # 🟡 TODO 36: Needs distributed lock - if not item.resolved: - exists = self.get_by_uid(credentials, item.id) - if exists.is_ok() and exists.ok() is None: - valid = self.check_type(item, self.object_type) - if valid.is_err(): - return SyftError(message=valid.err()) - return super().set(credentials, item, add_permissions) - return item - - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[Job | None, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - item = self.query_one(credentials=credentials, qks=qks) - return item - - def get_by_parent_id( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[Job | None, str]: - qks = QueryKeys( - qks=[PartitionKey(key="parent_job_id", type_=UID).with_obj(uid)] - ) - item = self.query_all(credentials=credentials, qks=qks) - return item - - def delete_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[SyftSuccess, str]: - qk = UIDPartitionKey.with_obj(uid) - result = super().delete(credentials=credentials, qk=qk) - if result.is_ok(): - return Ok(SyftSuccess(message=f"ID: {uid} deleted")) - return result - - def get_active(self, credentials: SyftVerifyKey) -> Result[SyftSuccess, str]: - qks = QueryKeys( - qks=[ - PartitionKey(key="status", type_=JobStatus).with_obj( - JobStatus.PROCESSING - ) - ] + ) -> Job: + # raises + self.check_type(item, self.object_type).unwrap() + if ( + isinstance(item.result, ActionObject) + and item.result.syft_blob_storage_entry_id is not None + ): + item.result._clear_cache() + return self.update(credentials, item, add_permissions).unwrap( + public_message="Failed to update" ) - return self.query_all(credentials=credentials, qks=qks) - def get_by_worker( - self, credentials: SyftVerifyKey, worker_id: str - ) -> Result[list[Job], str]: - qks = QueryKeys( - qks=[PartitionKey(key="job_worker_id", type_=str).with_obj(worker_id)] - ) - return self.query_all(credentials=credentials, qks=qks) + def get_active(self, credentials: SyftVerifyKey) -> list[Job]: + return self.get_all( + credentials=credentials, + filters={"status": JobStatus.CREATED}, + ).unwrap() + + def get_by_worker(self, credentials: SyftVerifyKey, worker_id: str) -> list[Job]: + return self.get_all( + credentials=credentials, + filters={"job_worker_id": worker_id}, + ).unwrap() + @as_result(StashException) def get_by_user_code_id( self, credentials: SyftVerifyKey, user_code_id: UID - ) -> Result[list[Job], str]: - qks = QueryKeys( - qks=[PartitionKey(key="user_code_id", type_=UID).with_obj(user_code_id)] - ) - - return self.query_all(credentials=credentials, qks=qks) + ) -> list[Job]: + return self.get_all( + credentials=credentials, + filters={"user_code_id": user_code_id}, + ).unwrap() + + @as_result(StashException) + def get_by_parent_id(self, credentials: SyftVerifyKey, uid: UID) -> list[Job]: + return self.get_all( + credentials=credentials, + filters={"parent_job_id": uid}, + ).unwrap() + + @as_result(StashException) + def get_by_result_id(self, credentials: SyftVerifyKey, uid: UID) -> Job: + return self.get_one( + credentials=credentials, filters={"result_id": uid} + ).unwrap() diff --git a/packages/syft/src/syft/service/log/log.py b/packages/syft/src/syft/service/log/log.py index f787aa8b81b..204409b0079 100644 --- a/packages/syft/src/syft/service/log/log.py +++ b/packages/syft/src/syft/service/log/log.py @@ -4,14 +4,16 @@ # relative from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...service.context import AuthedServiceContext +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syncable_object import SyncableSyftObject +from ...types.uid import UID @serializable() class SyftLog(SyncableSyftObject): __canonical_name__ = "SyftLog" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = ["stdout", "stderr"] __exclude_sync_diff_attrs__: list[str] = [] @@ -22,6 +24,7 @@ class SyftLog(SyncableSyftObject): stdout: str = "" stderr: str = "" + job_id: UID def append(self, new_str: str) -> None: self.stdout += new_str @@ -32,3 +35,8 @@ def append_error(self, new_str: str) -> None: def restart(self) -> None: self.stderr = "" self.stdout = "" + + def get_sync_dependencies( + self, context: AuthedServiceContext, **kwargs: dict + ) -> list[UID]: # type: ignore + return [self.job_id] diff --git a/packages/syft/src/syft/service/log/log_service.py b/packages/syft/src/syft/service/log/log_service.py index f551addd2b1..d4b96a0deed 100644 --- a/packages/syft/src/syft/service/log/log_service.py +++ b/packages/syft/src/syft/service/log/log_service.py @@ -1,15 +1,9 @@ -# stdlib - -# third party -from result import Ok - # relative from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager from ...types.uid import UID -from ...util.telemetry import instrument +from ..action.action_permissions import StoragePermission from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import TYPE_TO_SERVICE @@ -20,23 +14,24 @@ from .log_stash import LogStash -@instrument -@serializable() +@serializable(canonical_name="LogService", version=1) class LogService(AbstractService): - store: DocumentStore stash: LogStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = LogStash(store=store) @service_method(path="log.add", name="add", roles=DATA_SCIENTIST_ROLE_LEVEL) - def add(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess | SyftError: - new_log = SyftLog(id=uid) - result = self.stash.set(context.credentials, new_log) - if result.is_err(): - return SyftError(message=str(result.err())) - return result + def add( + self, + context: AuthedServiceContext, + uid: UID, + job_id: UID, + stdout: str = "", + stderr: str = "", + ) -> SyftSuccess: + new_log = SyftLog(id=uid, job_id=job_id, stdout=stdout, stderr=stderr) + return self.stash.set(context.credentials, new_log).unwrap() @service_method(path="log.append", name="append", roles=DATA_SCIENTIST_ROLE_LEVEL) def append( @@ -45,85 +40,62 @@ def append( uid: UID, new_str: str = "", new_err: str = "", - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=str(result.err())) - new_log = result.ok() + ) -> SyftSuccess: + new_log = self.stash.get_by_uid(context.credentials, uid).unwrap() if new_str: new_log.append(new_str) if new_err: new_log.append_error(new_err) - result = self.stash.update(context.credentials, new_log) - if result.is_err(): - return SyftError(message=str(result.err())) + self.stash.update(context.credentials, new_log).unwrap() return SyftSuccess(message="Log Append successful!") @service_method(path="log.get", name="get", roles=DATA_SCIENTIST_ROLE_LEVEL) - def get(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=str(result.err())) - - return result + def get(self, context: AuthedServiceContext, uid: UID) -> SyftLog: + return self.stash.get_by_uid(context.credentials, uid).unwrap() @service_method( path="log.get_stdout", name="get_stdout", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def get_stdout( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=str(result.err())) + def get_stdout(self, context: AuthedServiceContext, uid: UID) -> str: + result = self.get(context, uid) + return result.stdout - return Ok(result.ok().stdout) + @service_method(path="log.get_stderr", name="get_stderr", roles=ADMIN_ROLE_LEVEL) + def get_stderr(self, context: AuthedServiceContext, uid: UID) -> str: + result = self.get(context, uid) + return result.stderr @service_method(path="log.restart", name="restart", roles=DATA_SCIENTIST_ROLE_LEVEL) def restart( self, context: AuthedServiceContext, uid: UID, - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=str(result.err())) - - log = result.ok() + ) -> SyftSuccess: + log = self.stash.get_by_uid(context.credentials, uid).unwrap() log.restart() - result = self.stash.update(context.credentials, log) - if result.is_err(): - return SyftError(message=str(result.err())) + self.stash.update(context.credentials, log).unwrap() return SyftSuccess(message="Log Restart successful!") - @service_method(path="log.get_error", name="get_error", roles=ADMIN_ROLE_LEVEL) - def get_error( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=str(result.err())) - - return Ok(result.ok().stderr) - @service_method(path="log.get_all", name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL) - def get_all(self, context: AuthedServiceContext) -> SyftSuccess | SyftError: - result = self.stash.get_all(context.credentials) - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() + def get_all(self, context: AuthedServiceContext) -> list[SyftLog]: + return self.stash.get_all(context.credentials).unwrap() # type: ignore @service_method(path="log.delete", name="delete", roles=DATA_SCIENTIST_ROLE_LEVEL) - def delete( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.delete_by_uid(context.credentials, uid) - if result.is_ok(): - return result.ok() - else: - return SyftError(message=result.err()) + def delete(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + self.stash.delete_by_uid(context.credentials, uid).unwrap() + return SyftSuccess(message=f"log {uid} succesfully deleted") + + @service_method( + path="log.has_storage_permission", + name="has_storage_permission", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def has_storage_permission(self, context: AuthedServiceContext, uid: UID) -> bool: + permission = StoragePermission(uid=uid, server_uid=context.server.id) + result = self.stash.has_storage_permission(permission) + return result TYPE_TO_SERVICE[SyftLog] = LogService diff --git a/packages/syft/src/syft/service/log/log_stash.py b/packages/syft/src/syft/service/log/log_stash.py index f1c37d9f6b2..ef50b081c24 100644 --- a/packages/syft/src/syft/service/log/log_stash.py +++ b/packages/syft/src/syft/service/log/log_stash.py @@ -1,19 +1,9 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...util.telemetry import instrument +from ...store.db.stash import ObjectStash from .log import SyftLog -@instrument -@serializable() -class LogStash(BaseUIDStoreStash): - object_type = SyftLog - settings: PartitionSettings = PartitionSettings( - name=SyftLog.__canonical_name__, object_type=SyftLog - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="LogStash", version=1) +class LogStash(ObjectStash[SyftLog]): + pass diff --git a/packages/syft/src/syft/service/metadata/metadata_service.py b/packages/syft/src/syft/service/metadata/metadata_service.py index 4a33c9f8b80..b7b450b037b 100644 --- a/packages/syft/src/syft/service/metadata/metadata_service.py +++ b/packages/syft/src/syft/service/metadata/metadata_service.py @@ -1,38 +1,26 @@ # stdlib -from typing import cast # relative -from ...abstract_node import AbstractNode from ...serde.serializable import serializable -from ...store.document_store import DocumentStore -from ...util.telemetry import instrument +from ...store.db.db import DBManager from ..context import AuthedServiceContext from ..service import AbstractService from ..service import service_method from ..user.user_roles import GUEST_ROLE_LEVEL -from .node_metadata import NodeMetadataV3 +from .server_metadata import ServerMetadata -@instrument -@serializable() +@serializable(canonical_name="MetadataService", version=1) class MetadataService(AbstractService): - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: + pass @service_method( path="metadata.get_metadata", name="get_metadata", roles=GUEST_ROLE_LEVEL ) - def get_metadata(self, context: AuthedServiceContext) -> NodeMetadataV3: - context.node = cast(AbstractNode, context.node) - return context.node.metadata # type: ignore - - # @service_method(path="metadata.get_admin", name="get_admin", roles=GUEST_ROLE_LEVEL) - # def get_admin(self, context: AuthedServiceContext): - # user_service = context.node.get_service("userservice") - # admin_user = user_service.get_all(context=context)[0] - # return admin_user + def get_metadata(self, context: AuthedServiceContext) -> ServerMetadata: + return context.server.metadata # type: ignore @service_method(path="metadata.get_env", name="get_env", roles=GUEST_ROLE_LEVEL) def get_env(self, context: AuthedServiceContext) -> str: - context.node = cast(AbstractNode, context.node) - return context.node.packages + return context.server.packages diff --git a/packages/syft/src/syft/service/metadata/migrations.py b/packages/syft/src/syft/service/metadata/migrations.py index 0ecd89fd95d..bebaa12aede 100644 --- a/packages/syft/src/syft/service/metadata/migrations.py +++ b/packages/syft/src/syft/service/metadata/migrations.py @@ -7,13 +7,15 @@ def _downgrade_metadata_v3_to_v2() -> Callable: def set_defaults_from_settings(context: TransformContext) -> TransformContext: - # Extract from settings if node is attached to context + # Extract from settings if server is attached to context if context.output is not None: - if context.node is not None: - context.output["deployed_on"] = context.node.settings.deployed_on - context.output["on_board"] = context.node.settings.on_board - context.output["signup_enabled"] = context.node.settings.signup_enabled - context.output["admin_email"] = context.node.settings.admin_email + if context.server is not None: + context.output["deployed_on"] = context.server.settings.deployed_on + context.output["on_board"] = context.server.settings.on_board + context.output["signup_enabled"] = ( + context.server.settings.signup_enabled + ) + context.output["admin_email"] = context.server.settings.admin_email else: # Else set default value context.output["signup_enabled"] = False diff --git a/packages/syft/src/syft/service/metadata/node_metadata.py b/packages/syft/src/syft/service/metadata/node_metadata.py deleted file mode 100644 index 746e3336cd5..00000000000 --- a/packages/syft/src/syft/service/metadata/node_metadata.py +++ /dev/null @@ -1,141 +0,0 @@ -# future -from __future__ import annotations - -# stdlib -from collections.abc import Callable - -# third party -from packaging import version -from pydantic import BaseModel -from pydantic import model_validator - -# relative -from ...abstract_node import NodeType -from ...node.credentials import SyftVerifyKey -from ...protocol.data_protocol import get_data_protocol -from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_4 -from ...types.syft_object import StorableObjectType -from ...types.syft_object import SyftObject -from ...types.transforms import convert_types -from ...types.transforms import drop -from ...types.transforms import rename -from ...types.transforms import transform -from ...types.uid import UID - - -def check_version( - client_version: str, server_version: str, server_name: str, silent: bool = False -) -> bool: - client_syft_version = version.parse(client_version) - node_syft_version = version.parse(server_version) - msg = ( - f"You are running syft=={client_version} but " - f"{server_name} node requires {server_version}" - ) - if client_syft_version.base_version != node_syft_version.base_version: - raise ValueError(msg) - if client_syft_version.pre != node_syft_version.pre: - if not silent: - print(f"Warning: {msg}") - return False - return True - - -@serializable() -class NodeMetadataUpdate(SyftObject): - __canonical_name__ = "NodeMetadataUpdate" - __version__ = SYFT_OBJECT_VERSION_2 - - name: str | None = None - organization: str | None = None - description: str | None = None - on_board: bool | None = None - id: UID | None = None # type: ignore[assignment] - verify_key: SyftVerifyKey | None = None - highest_object_version: int | None = None - lowest_object_version: int | None = None - syft_version: str | None = None - admin_email: str | None = None - - -@serializable() -class NodeMetadataV3(SyftObject): - __canonical_name__ = "NodeMetadata" - __version__ = SYFT_OBJECT_VERSION_4 - - name: str - id: UID - verify_key: SyftVerifyKey - highest_version: int - lowest_version: int - syft_version: str - node_type: NodeType = NodeType.DOMAIN - organization: str = "OpenMined" - description: str = "Text" - node_side_type: str - show_warnings: bool - - def check_version(self, client_version: str) -> bool: - return check_version( - client_version=client_version, - server_version=self.syft_version, - server_name=self.name, - ) - - -@serializable() -class NodeMetadataJSON(BaseModel, StorableObjectType): - metadata_version: int - name: str - id: str - verify_key: str - highest_object_version: int | None = None - lowest_object_version: int | None = None - syft_version: str - node_type: str = NodeType.DOMAIN.value - organization: str = "OpenMined" - description: str = "My cool domain" - signup_enabled: bool = False - admin_email: str = "" - node_side_type: str - show_warnings: bool - supported_protocols: list = [] - - @model_validator(mode="before") - @classmethod - def add_protocol_versions(cls, values: dict) -> dict: - if "supported_protocols" not in values: - data_protocol = get_data_protocol() - values["supported_protocols"] = data_protocol.supported_protocols - return values - - def check_version(self, client_version: str) -> bool: - return check_version( - client_version=client_version, - server_version=self.syft_version, - server_name=self.name, - ) - - -@transform(NodeMetadataV3, NodeMetadataJSON) -def metadata_to_json() -> list[Callable]: - return [ - drop(["__canonical_name__"]), - rename("__version__", "metadata_version"), - convert_types(["id", "verify_key", "node_type"], str), - rename("highest_version", "highest_object_version"), - rename("lowest_version", "lowest_object_version"), - ] - - -@transform(NodeMetadataJSON, NodeMetadataV3) -def json_to_metadata() -> list[Callable]: - return [ - drop(["metadata_version", "supported_protocols"]), - convert_types(["id", "verify_key"], [UID, SyftVerifyKey]), - convert_types(["node_type"], NodeType), - rename("highest_object_version", "highest_version"), - rename("lowest_object_version", "lowest_version"), - ] diff --git a/packages/syft/src/syft/service/metadata/server_metadata.py b/packages/syft/src/syft/service/metadata/server_metadata.py new file mode 100644 index 00000000000..c56eb8a49ae --- /dev/null +++ b/packages/syft/src/syft/service/metadata/server_metadata.py @@ -0,0 +1,127 @@ +# future +from __future__ import annotations + +# stdlib +from collections.abc import Callable + +# third party +from packaging import version +from pydantic import BaseModel +from pydantic import model_validator + +# relative +from ...abstract_server import ServerType +from ...protocol.data_protocol import get_data_protocol +from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import StorableObjectType +from ...types.syft_object import SyftObject +from ...types.transforms import convert_types +from ...types.transforms import drop +from ...types.transforms import rename +from ...types.transforms import transform +from ...types.uid import UID + + +def check_version( + client_version: str, server_version: str, server_name: str, silent: bool = False +) -> bool: + client_syft_version = version.parse(client_version) + server_syft_version = version.parse(server_version) + msg = ( + f"You are running syft=={client_version} but " + f"{server_name} server requires {server_version}" + ) + if client_syft_version.base_version != server_syft_version.base_version: + raise ValueError(msg) + if client_syft_version.pre != server_syft_version.pre: + if not silent: + print(f"Warning: {msg}") + return False + return True + + +@serializable() +class ServerMetadata(SyftObject): + __canonical_name__ = "ServerMetadata" + __version__ = SYFT_OBJECT_VERSION_1 + + name: str + id: UID + verify_key: SyftVerifyKey + highest_version: int + lowest_version: int + syft_version: str + server_type: ServerType = ServerType.DATASITE + organization: str = "OpenMined" + description: str = "Text" + server_side_type: str + show_warnings: bool + eager_execution_enabled: bool + min_size_blob_storage_mb: int + + def check_version(self, client_version: str) -> bool: + return check_version( + client_version=client_version, + server_version=self.syft_version, + server_name=self.name, + ) + + +@serializable(canonical_name="ServerMetadataJSON", version=1) +class ServerMetadataJSON(BaseModel, StorableObjectType): + metadata_version: int + name: str + id: str + verify_key: str + highest_object_version: int | None = None + lowest_object_version: int | None = None + syft_version: str + server_type: str = ServerType.DATASITE.value + organization: str = "OpenMined" + description: str = "My cool datasite" + signup_enabled: bool = False + eager_execution_enabled: bool = False + admin_email: str = "" + server_side_type: str + show_warnings: bool + supported_protocols: list = [] + min_size_blob_storage_mb: int + + @model_validator(mode="before") + @classmethod + def add_protocol_versions(cls, values: dict) -> dict: + if "supported_protocols" not in values: + data_protocol = get_data_protocol() + values["supported_protocols"] = data_protocol.supported_protocols + return values + + def check_version(self, client_version: str) -> bool: + return check_version( + client_version=client_version, + server_version=self.syft_version, + server_name=self.name, + ) + + +@transform(ServerMetadata, ServerMetadataJSON) +def metadata_to_json() -> list[Callable]: + return [ + drop(["__canonical_name__"]), + rename("__version__", "metadata_version"), + convert_types(["id", "verify_key", "server_type"], str), + rename("highest_version", "highest_object_version"), + rename("lowest_version", "lowest_object_version"), + ] + + +@transform(ServerMetadataJSON, ServerMetadata) +def json_to_metadata() -> list[Callable]: + return [ + drop(["metadata_version", "supported_protocols"]), + convert_types(["id", "verify_key"], [UID, SyftVerifyKey]), + convert_types(["server_type"], ServerType), + rename("highest_object_version", "highest_version"), + rename("lowest_object_version", "lowest_version"), + ] diff --git a/packages/syft/src/syft/service/migration/__init__.py b/packages/syft/src/syft/service/migration/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/service/migration/migration_service.py b/packages/syft/src/syft/service/migration/migration_service.py new file mode 100644 index 00000000000..4a346ebde9d --- /dev/null +++ b/packages/syft/src/syft/service/migration/migration_service.py @@ -0,0 +1,526 @@ +# stdlib +from collections import defaultdict +import logging +from typing import Any + +# syft absolute +import syft + +# relative +from ...serde.serializable import serializable +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...types.blob_storage import BlobStorageEntry +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_object import SyftObject +from ...types.syft_object_registry import SyftObjectRegistry +from ...types.twin_object import TwinObject +from ...types.uid import UID +from ..action.action_object import Action +from ..action.action_object import ActionObject +from ..action.action_permissions import ActionObjectPermission +from ..action.action_permissions import StoragePermission +from ..action.action_store import ActionObjectStash +from ..context import AuthedServiceContext +from ..response import SyftError +from ..response import SyftSuccess +from ..service import AbstractService +from ..service import service_method +from ..sync.sync_service import get_store +from ..sync.sync_service import get_store_by_type +from ..user.user_roles import ADMIN_ROLE_LEVEL +from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL +from ..worker.utils import DEFAULT_WORKER_POOL_NAME +from .object_migration_state import MigrationData +from .object_migration_state import StoreMetadata +from .object_migration_state import SyftMigrationStateStash +from .object_migration_state import SyftObjectMigrationState + +logger = logging.getLogger(__name__) + + +@serializable(canonical_name="MigrationService", version=1) +class MigrationService(AbstractService): + stash: SyftMigrationStateStash + + def __init__(self, store: DBManager) -> None: + self.stash = SyftMigrationStateStash(store=store) + + @service_method(path="migration", name="get_version") + def get_version(self, context: AuthedServiceContext, canonical_name: str) -> int: + """Search for the metadata for an object.""" + + migration_state = self.stash.get_by_name( + canonical_name=canonical_name, credentials=context.credentials + ).unwrap() + + if migration_state is None: + raise SyftException( + public_message=f"No migration state exists for canonical name: {canonical_name}" + ) + + return migration_state.current_version + + @service_method(path="migration", name="get_state") + @as_result(SyftException, NotFoundException) + def get_state( + self, context: AuthedServiceContext, canonical_name: str + ) -> SyftObjectMigrationState: + return self.stash.get_by_name( + canonical_name=canonical_name, credentials=context.credentials + ).unwrap() + + @service_method(path="migration", name="register_migration_state") + def register_migration_state( + self, + context: AuthedServiceContext, + current_version: int, + canonical_name: str, + ) -> SyftObjectMigrationState: + obj = SyftObjectMigrationState( + current_version=current_version, canonical_name=canonical_name + ) + return self.stash.set(obj=obj, credentials=context.credentials).unwrap() + + @as_result(SyftException, NotFoundException) + def _find_klasses_pending_for_migration( + self, context: AuthedServiceContext, object_types: list[type[SyftObject]] + ) -> list[type[SyftObject]]: + klasses_to_be_migrated = [] + + for object_type in object_types: + canonical_name = object_type.__canonical_name__ + object_version = object_type.__version__ + + try: + migration_state = self.get_state(context, canonical_name).unwrap( + public_message=f"Failed to get migration state for {canonical_name}." + ) + if int(migration_state.current_version) != int( + migration_state.latest_version + ): + klasses_to_be_migrated.append(object_type) + except NotFoundException: + self.register_migration_state( + context, + current_version=object_version, + canonical_name=canonical_name, + ) + + return klasses_to_be_migrated + + @service_method( + path="migration.get_all_store_metadata", + name="get_all_store_metadata", + roles=ADMIN_ROLE_LEVEL, + ) + def get_all_store_metadata( + self, + context: AuthedServiceContext, + document_store_object_types: list[type[SyftObject]] | None = None, + include_action_store: bool = True, + ) -> dict[type[SyftObject], StoreMetadata]: + return self._get_all_store_metadata( + context, + document_store_object_types=document_store_object_types, + ).unwrap() + + @as_result(SyftException) + def _get_all_store_metadata( + self, + context: AuthedServiceContext, + document_store_object_types: list[type[SyftObject]] | None = None, + ) -> dict[type[SyftObject], StoreMetadata]: + # metadata = permissions + storage permissions + stashes = context.server.services.stashes + store_metadata = {} + + for klass, stash in stashes.items(): + if ( + document_store_object_types is not None + and klass not in document_store_object_types + ): + continue + store_metadata[klass] = StoreMetadata( + object_type=klass, + permissions=stash.get_all_permissions().unwrap(), + storage_permissions=stash.get_all_storage_permissions().unwrap(), + ) + + return store_metadata + + @as_result(SyftException) + def _update_store_metadata_for_klass( + self, context: AuthedServiceContext, metadata: StoreMetadata + ) -> None: + stash = self._search_stash_for_klass(context, metadata.object_type).unwrap() + permissions = [ + ActionObjectPermission.from_permission_string(uid, perm_str) + for uid, perm_strs in metadata.permissions.items() + for perm_str in perm_strs + ] + + storage_permissions = [ + StoragePermission(uid, server_uid) + for uid, server_uids in metadata.storage_permissions.items() + for server_uid in server_uids + ] + + stash.add_permissions(permissions, ignore_missing=True).unwrap() + stash.add_storage_permissions(storage_permissions, ignore_missing=True).unwrap() + + @as_result(SyftException) + def _update_store_metadata( + self, context: AuthedServiceContext, store_metadata: dict[type, StoreMetadata] + ) -> None: + print("Updating store metadata") + for metadata in store_metadata.values(): + self._update_store_metadata_for_klass(context, metadata).unwrap() + + @as_result(SyftException) + def _get_migration_objects( + self, + context: AuthedServiceContext, + document_store_object_types: list[type[SyftObject]] | None = None, + get_all: bool = False, + ) -> dict[type[SyftObject], list[SyftObject]]: + if document_store_object_types is None: + document_store_object_types = list(context.server.services.stashes.keys()) + + if get_all: + klasses_to_migrate = document_store_object_types + else: + klasses_to_migrate = self._find_klasses_pending_for_migration( + context=context, object_types=document_store_object_types + ).unwrap() + + result = defaultdict(list) + + for klass in klasses_to_migrate: + stash_or_err = self._search_stash_for_klass(context, klass) + if stash_or_err.is_err(): + continue + stash = stash_or_err.unwrap() + + for object in stash._data: + actual_klass = type(object) + use_klass = ( + klass + if actual_klass.__canonical_name__ == klass.__canonical_name__ + else actual_klass + ) + result[use_klass].append(object) + + return dict(result) + + @as_result(SyftException) + def _search_stash_for_klass( + self, context: AuthedServiceContext, klass: type[SyftObject] + ) -> ObjectStash: + if issubclass(klass, ActionObject | TwinObject | Action): + return context.server.services.action.stash + + stashes: dict[str, ObjectStash] = { # type: ignore + t.__canonical_name__: stash + for t, stash in context.server.services.stashes.items() + } + + mro = klass.__mro__ + class_index = 0 + object_stash = None + while len(mro) > class_index: + try: + canonical_name = mro[class_index].__canonical_name__ + except AttributeError: + # Classes without cname dont have a stash + break + object_stash = stashes.get(canonical_name) + if object_stash is not None: + break + class_index += 1 + if object_stash is None: + raise SyftException(public_message=f"Object stash not found for {klass}") + return object_stash + + @service_method( + path="migration.create_migrated_objects", + name="create_migrated_objects", + roles=ADMIN_ROLE_LEVEL, + ) + def create_migrated_objects( + self, + context: AuthedServiceContext, + migrated_objects: list[SyftObject], + ignore_existing: bool = True, + ) -> SyftSuccess: + self._create_migrated_objects( + context, migrated_objects, ignore_existing=ignore_existing + ).unwrap() + return SyftSuccess(message="Created migration objects!") + + @as_result(SyftException) + def _create_migrated_objects( + self, + context: AuthedServiceContext, + migrated_objects: dict[type[SyftObject], list[SyftObject]], + ignore_existing: bool = True, + skip_check_type: bool = False, + ) -> dict[type[SyftObject], list[SyftObject]]: + created_objects: dict[type[SyftObject], list[SyftObject]] = {} + + for key, objects in migrated_objects.items(): + created_objects[key] = [] + for migrated_object in objects: + stash = self._search_stash_for_klass( + context, type(migrated_object) + ).unwrap() + + result = stash.set( + context.credentials, + obj=migrated_object, + skip_check_type=skip_check_type, + ) + # Exception from the new Error Handling pattern, no need to change + if result.is_err(): + # TODO: subclass a DuplicationKeyError + if ignore_existing and ( + "Duplication Key Error" in result.err()._private_message # type: ignore + or "Duplication Key Error" in result.err().public_message # type: ignore + ): + print( + f"{type(migrated_object)} #{migrated_object.id} already exists" + ) + continue + else: + result.unwrap() # this will raise the exception inside the wrapper + created_objects[key].append(result.unwrap()) + return created_objects + + @as_result(SyftException) + def _update_migrated_objects( + self, context: AuthedServiceContext, migrated_objects: list[SyftObject] + ) -> SyftSuccess: + for migrated_object in migrated_objects: + stash = self._search_stash_for_klass( + context, type(migrated_object) + ).unwrap() + + stash.update( + context.credentials, + obj=migrated_object, + ).unwrap() + + return SyftSuccess(message="Updated migration objects!") + + @as_result(SyftException) + def _migrate_objects( + self, + context: AuthedServiceContext, + migration_objects: dict[type[SyftObject], list[SyftObject]], + ) -> list[SyftObject]: + migrated_objects = [] + + for klass, objects in migration_objects.items(): + canonical_name = klass.__canonical_name__ + latest_version = SyftObjectRegistry.get_latest_version(canonical_name) + + # Migrate data for objects in document store + logger.info( + f"Migrating data for: {canonical_name} table to version {latest_version}" + ) + for object in objects: + try: + migrated_value = object.migrate_to(latest_version, context) + migrated_objects.append(migrated_value) + except Exception: + raise SyftException( + public_message=f"Failed to migrate data to {klass} for qk {klass.__version__}: {object.id}" + ) + return migrated_objects + + @service_method( + path="migration.migrate_data", + name="migrate_data", + roles=ADMIN_ROLE_LEVEL, + ) + def migrate_data( + self, + context: AuthedServiceContext, + document_store_object_types: list[type[SyftObject]] | None = None, + ) -> SyftSuccess: + migration_objects = self._get_migration_objects( + context, document_store_object_types + ).unwrap() + migrated_objects = self._migrate_objects(context, migration_objects).unwrap() + self._update_migrated_objects(context, migrated_objects).unwrap() + + migration_actionobjects = self._get_migration_actionobjects(context).unwrap() + migrated_actionobjects = self._migrate_objects( + context, migration_actionobjects + ).unwrap() + self._update_migrated_actionobjects(context, migrated_actionobjects).unwrap() + + return SyftSuccess(message="Data upgraded to the latest version") + + @service_method( + path="migration.get_migration_actionobjects", + name="get_migration_actionobjects", + roles=ADMIN_ROLE_LEVEL, + ) + def get_migration_actionobjects( + self, context: AuthedServiceContext, get_all: bool = False + ) -> dict: + return self._get_migration_actionobjects(context, get_all=get_all).unwrap() + + @as_result(SyftException) + def _get_migration_actionobjects( + self, context: AuthedServiceContext, get_all: bool = False + ) -> dict[type[SyftObject], list[SyftObject]]: + # Track all object types from action store + action_object_types = [Action, ActionObject, TwinObject] + action_object_types.extend(ActionObject.__subclasses__()) + klass_by_canonical_name: dict[str, type[SyftObject]] = { + klass.__canonical_name__: klass for klass in action_object_types + } + + action_object_pending_migration = self._find_klasses_pending_for_migration( + context=context, object_types=action_object_types + ).unwrap() + result_dict: dict[type[SyftObject], list[SyftObject]] = defaultdict(list) + action_stash = context.server.services.action.stash + action_store_objects = action_stash.get_all(context.credentials).unwrap() + + for obj in action_store_objects: + if get_all or type(obj) in action_object_pending_migration: + klass = klass_by_canonical_name.get(obj.__canonical_name__, type(obj)) + result_dict[klass].append(obj) # type: ignore + return dict(result_dict) + + @as_result(SyftException) + def _update_migrated_actionobjects( + self, context: AuthedServiceContext, objects: list[SyftObject] + ) -> str: + action_store: ActionObjectStash = context.server.services.action.stash + for obj in objects: + action_store.set_or_update( + uid=obj.id, + credentials=context.credentials, + syft_object=obj, + ).unwrap() + return "success" + + @service_method( + path="migration.get_migration_data", + name="get_migration_data", + roles=ADMIN_ROLE_LEVEL, + ) + def get_migration_data(self, context: AuthedServiceContext) -> MigrationData: + store_objects = self._get_migration_objects(context, get_all=True).unwrap() + action_objects = self._get_migration_actionobjects( + context, get_all=True + ).unwrap() + blob_storage_objects = store_objects.pop(BlobStorageEntry, []) + store_metadata = self._get_all_store_metadata(context).unwrap() + return MigrationData( + server_uid=context.server.id, + signing_key=context.server.signing_key, + syft_version=syft.__version__, + default_pool_name=DEFAULT_WORKER_POOL_NAME, + store_objects=store_objects, + metadata=store_metadata, + action_objects=action_objects, + blob_storage_objects=blob_storage_objects, + ) + + @service_method( + path="migration.apply_migration_data", + name="apply_migration_data", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def apply_migration_data( + self, + context: AuthedServiceContext, + migration_data: MigrationData, + ) -> SyftSuccess: + # NOTE blob storage is migrated via client, + # it needs access to both source and destination blob storages. + if len(migration_data.blobs): + raise SyftException( + public_message="Blob storage migration is not supported by this endpoint, " + "please use 'client.load_migration_data' instead." + ) + + created_objects = self._create_migrated_objects( + context, migration_data.store_objects, skip_check_type=True + ).unwrap() + + # migrate + apply store objects + migrated_objects = self._migrate_objects( + context, + created_objects, + ).unwrap() + self._update_migrated_objects(context, migrated_objects).unwrap() + + # migrate+apply action objects + migrated_actionobjects = self._migrate_objects( + context, migration_data.action_objects + ).unwrap() + self._update_migrated_actionobjects(context, migrated_actionobjects).unwrap() + + # apply metadata + self._update_store_metadata(context, migration_data.metadata).unwrap() + return SyftSuccess(message="Migration completed successfully") + + @service_method( + path="migration.reset_and_restore", + name="reset_and_restore", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def reset_and_restore( + self, + context: AuthedServiceContext, + migration_data: MigrationData, + ) -> SyftSuccess | SyftError: + try: + root_verify_key = context.server.verify_key + context.server.db.init_tables(reset=True) + context.credentials = root_verify_key + self.apply_migration_data(context, migration_data) + except Exception as e: + return SyftError.from_exception( + context=context, + exc=e, + include_traceback=True, + ) + + return SyftSuccess(message="Database reset successfully.") + + @service_method( + path="migration._get_object", + name="_get_object", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def _get_object( + self, context: AuthedServiceContext, uid: UID, object_type: type + ) -> Any: + return ( + get_store_by_type(context, object_type) + .get_by_uid(credentials=context.credentials, uid=uid) + .unwrap() + ) + + @service_method( + path="migration._update_object", + name="_update_object", + roles=ADMIN_ROLE_LEVEL, + ) + def _update_object(self, context: AuthedServiceContext, object: Any) -> Any: + return ( + get_store(context, object) + .update(credentials=context.credentials, obj=object) + .unwrap() + ) diff --git a/packages/syft/src/syft/service/migration/object_migration_state.py b/packages/syft/src/syft/service/migration/object_migration_state.py new file mode 100644 index 00000000000..9ded4cd497b --- /dev/null +++ b/packages/syft/src/syft/service/migration/object_migration_state.py @@ -0,0 +1,312 @@ +# stdlib +from collections.abc import Callable +from io import BytesIO +from pathlib import Path +import sys +from typing import Any + +# third party +from typing_extensions import Self +import yaml + +# relative +from ...serde.deserialize import _deserialize +from ...serde.serializable import serializable +from ...serde.serialize import _serialize +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...types.blob_storage import BlobStorageEntry +from ...types.blob_storage import CreateBlobStorageEntry +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_migration import migrate +from ...types.syft_object import Context +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SyftBaseObject +from ...types.syft_object import SyftObject +from ...types.syft_object_registry import SyftObjectRegistry +from ...types.transforms import make_set_default +from ...types.uid import UID +from ...util.util import prompt_warning_message +from ..response import SyftSuccess +from ..worker.utils import DEFAULT_WORKER_POOL_NAME +from ..worker.worker_image import SyftWorkerImage +from ..worker.worker_pool import SyftWorker +from ..worker.worker_pool import WorkerPool + + +@serializable() +class SyftObjectMigrationState(SyftObject): + __canonical_name__ = "SyftObjectMigrationState" + __version__ = SYFT_OBJECT_VERSION_1 + + __attr_unique__ = ["canonical_name"] + + canonical_name: str + current_version: int + + @property + def latest_version(self) -> int | None: + available_versions = SyftObjectRegistry.get_versions( + canonical_name=self.canonical_name, + ) + if not available_versions: + return None + + return sorted(available_versions, reverse=True)[0] + + @property + def supported_versions(self) -> list: + return SyftObjectRegistry.get_versions(self.canonical_name) + + +@serializable(canonical_name="SyftMigrationStateSQLStash", version=1) +class SyftMigrationStateStash(ObjectStash[SyftObjectMigrationState]): + @as_result(SyftException, NotFoundException) + def get_by_name( + self, canonical_name: str, credentials: SyftVerifyKey + ) -> SyftObjectMigrationState: + return self.get_one( + credentials=credentials, + filters={"canonical_name": canonical_name}, + ).unwrap() + + +@serializable() +class StoreMetadata(SyftBaseObject): + __canonical_name__ = "StoreMetadata" + __version__ = SYFT_OBJECT_VERSION_1 + + object_type: type + permissions: dict[UID, set[str]] + storage_permissions: dict[UID, set[UID]] + + +@serializable() +class MigrationData(SyftObject): + __canonical_name__ = "MigrationData" + __version__ = SYFT_OBJECT_VERSION_2 + syft_version: str = "" + default_pool_name: str = DEFAULT_WORKER_POOL_NAME + server_uid: UID + signing_key: SyftSigningKey + store_objects: dict[type[SyftObject], list[SyftObject]] + metadata: dict[type[SyftObject], StoreMetadata] + action_objects: dict[type[SyftObject], list[SyftObject]] + blob_storage_objects: list[SyftObject] + blobs: dict[UID, Any] = {} + + __repr_attrs__ = [ + "server_uid", + "root_verify_key", + "num_objects", + "num_action_objects", + "includes_blobs", + ] + + @property + def root_verify_key(self) -> SyftVerifyKey: + return self.signing_key.verify_key + + @property + def num_objects(self) -> int: + return sum(len(objs) for objs in self.store_objects.values()) + + @property + def num_action_objects(self) -> int: + return sum(len(objs) for objs in self.action_objects.values()) + + @property + def includes_blobs(self) -> bool: + blob_ids = [obj.id for obj in self.blob_storage_objects] + return set(self.blobs.keys()) == set(blob_ids) + + @property + def includes_custom_workerpools(self) -> bool: + cname = WorkerPool.__canonical_name__ + worker_pools = None + for k, v in self.store_objects.items(): + if k.__canonical_name__ == cname: + worker_pools = v + + if worker_pools is None: + return False + + custom_pools = [ + pool + for pool in worker_pools + if getattr(pool, "name", None) != self.default_pool_name + ] + return len(custom_pools) > 0 + + def make_migration_config(self) -> dict[str, Any]: + server_uid = self.server_uid.to_string() + server_private_key = str(self.signing_key) + migration_config = { + "server": { + "env": [ + {"name": "SERVER_UID", "value": server_uid}, + {"name": "SERVER_PRIVATE_KEY", "value": server_private_key}, + ] + } + } + return migration_config + + @classmethod + def from_file(self, path: str | Path) -> Self: + path = Path(path) + if not path.exists(): + raise SyftException(f"File {str(path)} does not exist.") + + with open(path, "rb") as f: + res: SyftObject = _deserialize(f.read(), from_bytes=True) + + if not isinstance(res, MigrationData): + latest_version = SyftObjectRegistry.get_latest_version( # type: ignore[unreachable] + MigrationData.__canonical_name__ + ) + print("Upgrading MigrationData object to latest version...") + res = res.migrate_to(latest_version) + + return res + + def save(self, path: str | Path, yaml_path: str | Path) -> SyftSuccess: + if not self.includes_blobs: + proceed = prompt_warning_message( + "You are saving migration data without blob storage data. " + "This means that any existing blobs will be missing when you load this data." + "\nTo include blobs, call `download_blobs()` before saving.", + confirm=True, + ) + if not proceed: + raise SyftException(message="Migration data not saved.") + + path = Path(path) + with open(path, "wb") as f: + f.write(_serialize(self, to_bytes=True)) + + yaml_path = Path(yaml_path) + migration_config = self.make_migration_config() + with open(yaml_path, "w") as f: + yaml.dump(migration_config, f) + + return SyftSuccess(message=f"Migration data saved to {str(path)}.") + + def download_blobs(self) -> None: + for obj in self.blob_storage_objects: + blob = self.download_blob(obj.id) + self.blobs[obj.id] = blob + return None + + def download_blob(self, obj_id: str) -> Any: + api = self._get_api() + blob_retrieval = api.services.blob_storage.read(obj_id) + return blob_retrieval.read() + + def migrate_and_upload_blobs(self) -> SyftSuccess: + for obj in self.blob_storage_objects: + self.migrate_and_upload_blob(obj) + return SyftSuccess(message="All blobs uploaded successfully.") + + def migrate_and_upload_blob(self, obj: BlobStorageEntry) -> SyftSuccess: + api = self._get_api() + + if obj.id not in self.blobs: + raise SyftException( + public_message=f"Blob {obj.id} not found in migration data." + ) + data = self.blobs[obj.id] + + migrated_obj = obj.migrate_to(BlobStorageEntry.__version__, Context()) + serialized = _serialize(data, to_bytes=True) + size = sys.getsizeof(serialized) + blob_create = CreateBlobStorageEntry.from_blob_storage_entry(migrated_obj) + blob_create.file_size = size + blob_deposit_object = api.services.blob_storage.allocate_for_user( + blob_create, migrated_obj.uploaded_by + ) + return blob_deposit_object.write(BytesIO(serialized)).unwrap() + + def get_items_by_canonical_name(self, canonical_name: str) -> list[SyftObject]: + for k, v in self.store_objects.items(): + if k.__canonical_name__ == canonical_name: + return v + + for k, v in self.action_objects.items(): + if k.__canonical_name__ == canonical_name: + return v + return [] + + def get_metadata_by_canonical_name(self, canonical_name: str) -> StoreMetadata: + for k, v in self.metadata.items(): + if k.__canonical_name__ == canonical_name: + return v + return StoreMetadata( + object_type=SyftObject, permissions={}, storage_permissions={} + ) + + def copy_without_workerpools(self) -> "MigrationData": + items_to_exclude = [ + WorkerPool.__canonical_name__, + SyftWorkerImage.__canonical_name__, + SyftWorker.__canonical_name__, + ] + + store_objects = { + k: v + for k, v in self.store_objects.items() + if k.__canonical_name__ not in items_to_exclude + } + metadata = { + k: v + for k, v in self.metadata.items() + if k.__canonical_name__ not in items_to_exclude + } + return self.__class__( + server_uid=self.server_uid, + signing_key=self.signing_key, + store_objects=store_objects, + metadata=metadata, + action_objects=self.action_objects, + blob_storage_objects=self.blob_storage_objects, + blobs=self.blobs, + ) + + def copy_without_blobs(self) -> "MigrationData": + # Create a shallow copy of the MigrationData instance, removing blob-related data + # This is required for sending the MigrationData to the backend. + copy_data = self.__class__( + server_uid=self.server_uid, + signing_key=self.signing_key, + store_objects=self.store_objects.copy(), + metadata=self.metadata.copy(), + action_objects=self.action_objects.copy(), + blob_storage_objects=[], + blobs={}, + ) + return copy_data + + +@serializable() +class MigrationDataV1(SyftObject): + __canonical_name__ = "MigrationData" + __version__ = SYFT_OBJECT_VERSION_1 + + server_uid: UID + signing_key: SyftSigningKey + store_objects: dict[type[SyftObject], list[SyftObject]] + metadata: dict[type[SyftObject], StoreMetadata] + action_objects: dict[type[SyftObject], list[SyftObject]] + blob_storage_objects: list[SyftObject] + blobs: dict[UID, Any] = {} + + +@migrate(MigrationDataV1, MigrationData) +def migrate_migrationdata_v1_to_v2() -> list[Callable]: + return [ + make_set_default("default_pool_name", DEFAULT_WORKER_POOL_NAME), + make_set_default("syft_version", ""), + ] diff --git a/packages/syft/src/syft/service/network/association_request.py b/packages/syft/src/syft/service/network/association_request.py new file mode 100644 index 00000000000..49071512ff1 --- /dev/null +++ b/packages/syft/src/syft/service/network/association_request.py @@ -0,0 +1,114 @@ +# stdlib +import secrets + +# relative +from ...client.client import SyftClient +from ...serde.serializable import serializable +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ..context import ChangeContext +from ..request.request import Change +from ..response import SyftSuccess +from .routes import ServerRoute +from .server_peer import ServerPeer + + +@serializable() +class AssociationRequestChange(Change): + __canonical_name__ = "AssociationRequestChange" + __version__ = SYFT_OBJECT_VERSION_1 + + self_server_route: ServerRoute + remote_peer: ServerPeer + challenge: bytes + + __repr_attrs__ = ["self_server_route", "remote_peer"] + + @as_result(SyftException) + def _run( + self, context: ChangeContext, apply: bool + ) -> SyftSuccess: # tuple[bytes, ServerPeer]: + """ + Executes the association request. + + Args: + context (ChangeContext): The change context. + apply (bool): A flag indicating whether to apply the association request. + + Returns: + tuple[bytes, ServerPeer]: The result of the association request. + Raises on errors. + """ + if not apply: + # TODO: implement undo for AssociationRequestChange + raise SyftException( + public_message="Undo not supported for AssociationRequestChange" + ) + + # Get the network service + service_ctx = context.to_service_ctx() + + # Check if remote peer to be added is via reverse tunnel + rtunnel_route = self.remote_peer.get_rtunnel_route() + add_rtunnel_route = ( + rtunnel_route is not None + and self.remote_peer.latest_added_route == rtunnel_route + ) + + # If the remote peer is added via reverse tunnel, we skip ping to peer + if add_rtunnel_route: + service_ctx.server.services.network.set_reverse_tunnel_config( + context=context, + remote_server_peer=self.remote_peer, + ) + else: + # Pinging the remote peer to verify the connection + try: + # FIX: unwrap client_with_context? + remote_client: SyftClient = self.remote_peer.client_with_context( + context=service_ctx + ).unwrap( + public_message=f"Failed to create remote client for peer: {self.remote_peer.id}." + ) + random_challenge = secrets.token_bytes(16) + remote_res = remote_client.api.services.network.ping( + challenge=random_challenge + ) + except Exception as e: + raise SyftException( + public_message="Remote Peer cannot ping peer:" + str(e) + ) + + challenge_signature = remote_res + + # Verifying if the challenge is valid + try: + self.remote_peer.verify_key.verify_key.verify( + random_challenge, challenge_signature + ) + except Exception as e: + raise SyftException(public_message=str(e)) + + # Adding the remote peer to the network stash + service_ctx.server.services.network.stash.create_or_update_peer( + service_ctx.server.verify_key, self.remote_peer + ) + # this way they can match up who we are with who they think we are + # Sending a signed messages for the peer to verify + self.self_server_route.validate_with_context(context=service_ctx) + return SyftSuccess( + message=f"Routes successfully added for peer: {self.remote_peer.name}", + value=self, + ) + + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context, apply=True).unwrap() + + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context, apply=False).unwrap() + + def __repr_syft_nested__(self) -> str: + return f"Request for connection from : {self.remote_peer.name}" diff --git a/packages/syft/src/syft/service/network/network_service.py b/packages/syft/src/syft/service/network/network_service.py index 768f1f49631..aa2264dbe10 100644 --- a/packages/syft/src/syft/service/network/network_service.py +++ b/packages/syft/src/syft/service/network/network_service.py @@ -1,39 +1,43 @@ # stdlib from collections.abc import Callable +from enum import Enum +import logging import secrets from typing import Any from typing import cast -# third party -from result import Result - # relative -from ...abstract_node import AbstractNode -from ...abstract_node import NodeType +from ...abstract_server import ServerType from ...client.client import HTTPConnection from ...client.client import PythonConnection from ...client.client import SyftClient -from ...client.client import VeilidConnection -from ...node.credentials import SyftVerifyKey -from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable -from ...service.settings.settings import NodeSettingsV2 -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...types.grid_url import GridURL +from ...server.credentials import SyftVerifyKey +from ...server.worker_settings import WorkerSettings +from ...service.settings.settings import ServerSettings +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.server_url import ServerURL from ...types.transforms import TransformContext from ...types.transforms import keep +from ...types.transforms import make_set_default from ...types.transforms import transform from ...types.transforms import transform_method from ...types.uid import UID -from ...util.telemetry import instrument +from ...util.util import generate_token +from ...util.util import get_env +from ...util.util import prompt_warning_message +from ...util.util import str_to_bool from ..context import AuthedServiceContext -from ..data_subject.data_subject import NamePartitionKey -from ..metadata.node_metadata import NodeMetadataV3 -from ..response import SyftError +from ..metadata.server_metadata import ServerMetadata +from ..request.request import Request +from ..request.request import RequestStatus +from ..request.request import SubmitRequest +from ..response import SyftInfo from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -42,314 +46,350 @@ from ..user.user_roles import DATA_OWNER_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL from ..warnings import CRUDWarning -from .node_peer import NodePeer -from .routes import HTTPNodeRoute -from .routes import NodeRoute -from .routes import PythonNodeRoute -from .routes import VeilidNodeRoute - -VerifyKeyPartitionKey = PartitionKey(key="verify_key", type_=SyftVerifyKey) -NodeTypePartitionKey = PartitionKey(key="node_type", type_=NodeType) -OrderByNamePartitionKey = PartitionKey(key="name", type_=str) - - -@instrument -@serializable() -class NetworkStash(BaseUIDStoreStash): - object_type = NodePeer - settings: PartitionSettings = PartitionSettings( - name=NodePeer.__canonical_name__, object_type=NodePeer - ) +from .association_request import AssociationRequestChange +from .reverse_tunnel_service import ReverseTunnelService +from .routes import HTTPServerRoute +from .routes import PythonServerRoute +from .routes import ServerRoute +from .routes import ServerRouteType +from .server_peer import ServerPeer +from .server_peer import ServerPeerUpdate - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +logger = logging.getLogger(__name__) - def get_by_name( - self, credentials: SyftVerifyKey, name: str - ) -> Result[NodePeer | None, str]: - qks = QueryKeys(qks=[NamePartitionKey.with_obj(name)]) - return self.query_one(credentials=credentials, qks=qks) +REVERSE_TUNNEL_ENABLED = "REVERSE_TUNNEL_ENABLED" - def update( - self, - credentials: SyftVerifyKey, - peer: NodePeer, - has_permission: bool = False, - ) -> Result[NodePeer, str]: - valid = self.check_type(peer, NodePeer) - if valid.is_err(): - return SyftError(message=valid.err()) - return super().update(credentials, peer) - def update_peer( - self, credentials: SyftVerifyKey, peer: NodePeer - ) -> Result[NodePeer, str]: - valid = self.check_type(peer, NodePeer) - if valid.is_err(): - return SyftError(message=valid.err()) - existing: Result | NodePeer = self.get_by_uid( - credentials=credentials, uid=peer.id - ) - if existing.is_ok() and existing.ok(): - existing = existing.ok() - existing.update_routes(peer.node_routes) - result = self.update(credentials, existing) - return result - else: - result = self.set(credentials, peer) - return result +def reverse_tunnel_enabled() -> bool: + return str_to_bool(get_env(REVERSE_TUNNEL_ENABLED, "false")) - def get_for_verify_key( - self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Result[NodePeer, SyftError]: - qks = QueryKeys(qks=[VerifyKeyPartitionKey.with_obj(verify_key)]) - return self.query_one(credentials, qks) - - def get_by_node_type( - self, credentials: SyftVerifyKey, node_type: NodeType - ) -> Result[list[NodePeer], SyftError]: - qks = QueryKeys(qks=[NodeTypePartitionKey.with_obj(node_type)]) - return self.query_all( - credentials=credentials, qks=qks, order_by=OrderByNamePartitionKey - ) +@serializable(canonical_name="ServerPeerAssociationStatus", version=1) +class ServerPeerAssociationStatus(Enum): + PEER_ASSOCIATED = "PEER_ASSOCIATED" + PEER_ASSOCIATION_PENDING = "PEER_ASSOCIATION_PENDING" + PEER_NOT_FOUND = "PEER_NOT_FOUND" + + +@serializable(canonical_name="NetworkSQLStash", version=1) +class NetworkStash(ObjectStash[ServerPeer]): + @as_result(StashException, NotFoundException) + def get_by_name(self, credentials: SyftVerifyKey, name: str) -> ServerPeer: + try: + return self.get_one( + credentials=credentials, + filters={"name": name}, + ).unwrap() + except NotFoundException as e: + raise NotFoundException.from_exception( + e, public_message=f"ServerPeer with {name} not found" + ) + + @as_result(StashException) + def create_or_update_peer( + self, credentials: SyftVerifyKey, peer: ServerPeer + ) -> ServerPeer: + """ + Update the selected peer and its route priorities if the peer already exists + If the peer does not exist, simply adds it to the database. + + Args: + credentials (SyftVerifyKey): The credentials used to authenticate the request. + peer (ServerPeer): The peer to be updated or added. + + Returns: + ServerPeer: The updated or added peer if the operation was successful. + Raises an exception if the operation failed. + """ + self.check_type(peer, ServerPeer).unwrap() + + try: + existing_peer: ServerPeer = self.get_by_uid( + credentials=credentials, uid=peer.id + ).unwrap() + except SyftException: + return self.set(credentials, obj=peer).unwrap() + + existing_peer.update_routes(peer.server_routes) + peer_update = ServerPeerUpdate( + id=peer.id, server_routes=existing_peer.server_routes + ) + return self.update(credentials, peer_update).unwrap() -@instrument -@serializable() + @as_result(StashException, NotFoundException) + def get_by_verify_key( + self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey + ) -> ServerPeer: + return self.get_one( + credentials=credentials, + filters={"verify_key": verify_key}, + ).unwrap() + + @as_result(StashException) + def get_by_server_type( + self, credentials: SyftVerifyKey, server_type: ServerType + ) -> list[ServerPeer]: + return self.get_all( + credentials=credentials, + filters={"server_type": server_type}, + ).unwrap() + + +@serializable(canonical_name="NetworkService", version=1) class NetworkService(AbstractService): - store: DocumentStore stash: NetworkStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = NetworkStash(store=store) + if reverse_tunnel_enabled(): + self.rtunnel_service = ReverseTunnelService() - # TODO: Check with MADHAVA, can we even allow guest user to introduce routes to - # domain nodes? @service_method( path="network.exchange_credentials_with", name="exchange_credentials_with", roles=GUEST_ROLE_LEVEL, warning=CRUDWarning(confirmation=True), + unwrap_on_success=False, ) def exchange_credentials_with( self, context: AuthedServiceContext, - self_node_route: NodeRoute, - remote_node_route: NodeRoute, - remote_node_verify_key: SyftVerifyKey, - ) -> SyftSuccess | SyftError: - """Exchange Route With Another Node""" + self_server_route: ServerRoute, + remote_server_route: ServerRoute, + remote_server_verify_key: SyftVerifyKey, + reverse_tunnel: bool = False, + ) -> Request | SyftSuccess: + """ + Exchange Route With Another Server. If there is a pending association request, return it + """ # Step 1: Validate the Route - self_node_peer = self_node_route.validate_with_context(context=context) + self_server_peer = self_server_route.validate_with_context( + context=context + ).unwrap() - if isinstance(self_node_peer, SyftError): - return self_node_peer + if reverse_tunnel and not reverse_tunnel_enabled(): + raise SyftException( + public_message="Reverse tunneling is not enabled on this server." + ) - # Step 2: Send the Node Peer to the remote node + elif reverse_tunnel: + _rtunnel_route = self_server_peer.server_routes[-1] + _rtunnel_route.rtunnel_token = generate_token() + _rtunnel_route.host_or_ip = f"{self_server_peer.name}.syft.local" + self_server_peer.server_routes[-1] = _rtunnel_route + + # Step 2: Send the Server Peer to the remote server # Also give them their own to validate that it belongs to them # random challenge prevents replay attacks - remote_client: SyftClient = remote_node_route.client_with_context( + remote_client: SyftClient = remote_server_route.client_with_context( context=context ) - random_challenge = secrets.token_bytes(16) + remote_server_peer = ServerPeer.from_client(remote_client) - remote_res = remote_client.api.services.network.add_peer( - peer=self_node_peer, - challenge=random_challenge, - self_node_route=remote_node_route, - verify_key=remote_node_verify_key, - ) - - if isinstance(remote_res, SyftError): - return remote_res - - challenge_signature, remote_node_peer = remote_res + # Step 3: Check remotely if the self server already exists as a peer + # Update the peer if it exists, otherwise add it + try: + remote_self_server_peer = ( + remote_client.api.services.network.get_peer_by_name( + name=self_server_peer.name + ) + ) + except SyftException: + remote_self_server_peer = None - # Verifying if the challenge is valid + association_request_approved = True + if isinstance(remote_self_server_peer, ServerPeer): + updated_peer = ServerPeerUpdate( + id=self_server_peer.id, server_routes=self_server_peer.server_routes + ) + try: + remote_client.api.services.network.update_peer(peer_update=updated_peer) + except Exception as e: + logger.error(f"Failed to update peer information on remote client. {e}") + raise SyftException.from_exception( + e, + public_message=f"Failed to add peer information on remote client : {remote_client.id}", + private_message=f"Error: {e}", + ) - try: - remote_node_verify_key.verify_key.verify( - random_challenge, challenge_signature + # If peer does not exist, ask the remote client to add this server + # (represented by `self_server_peer`) as a peer + if remote_self_server_peer is None: + random_challenge = secrets.token_bytes(16) + remote_res = remote_client.api.services.network.add_peer( + peer=self_server_peer, + challenge=random_challenge, + self_server_route=remote_server_route, + verify_key=remote_server_verify_key, + ) + association_request_approved = not isinstance(remote_res, Request) + + # Step 4: Save the remote peer for later + self.stash.create_or_update_peer( + context.server.verify_key, + remote_server_peer, + ).unwrap(public_message=f"Failed to save peer: {remote_server_peer}.") + # Step 5: Save config to enable reverse tunneling + if reverse_tunnel and reverse_tunnel_enabled(): + self.set_reverse_tunnel_config( + context=context, + self_server_peer=self_server_peer, + remote_server_peer=remote_server_peer, ) - except Exception as e: - return SyftError(message=str(e)) - - # save the remote peer for later - context.node = cast(AbstractNode, context.node) - result = self.stash.update_peer( - context.node.verify_key, - remote_node_peer, - ) - if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="Routes Exchanged") + return ( + SyftSuccess(message="Routes Exchanged") + if association_request_approved + else remote_res + ) @service_method(path="network.add_peer", name="add_peer", roles=GUEST_ROLE_LEVEL) def add_peer( self, context: AuthedServiceContext, - peer: NodePeer, + peer: ServerPeer, challenge: bytes, - self_node_route: NodeRoute, + self_server_route: ServerRoute, verify_key: SyftVerifyKey, - ) -> list | SyftError: - """Add a Network Node Peer""" + ) -> Request | SyftSuccess: + """Add a Network Server Peer. Called by a remote server to add + itself as a peer for the current server. + """ # Using the verify_key of the peer to verify the signature # It is also our single source of truth for the peer if peer.verify_key != context.credentials: - return SyftError( - message=( - f"The {type(peer)}.verify_key: " + raise SyftException( + public_message=( + f"The {type(peer).__name__}.verify_key: " f"{peer.verify_key} does not match the signature of the message" ) ) - context.node = cast(AbstractNode, context.node) - if verify_key != context.node.verify_key: - return SyftError( - message="verify_key does not match the remote node's verify_key for add_peer" + if verify_key != context.server.verify_key: + raise SyftException( + public_message="verify_key does not match the remote server's verify_key for add_peer" ) - try: - remote_client: SyftClient = peer.client_with_context(context=context) - random_challenge = secrets.token_bytes(16) - remote_res = remote_client.api.services.network.ping( - challenge=random_challenge - ) - except Exception as e: - return SyftError(message="Remote Peer cannot ping peer:" + str(e)) - - if isinstance(remote_res, SyftError): - return remote_res - - challenge_signature = remote_res - - # Verifying if the challenge is valid - try: - peer.verify_key.verify_key.verify(random_challenge, challenge_signature) - except Exception as e: - return SyftError(message=str(e)) - - result = self.stash.update_peer(context.node.verify_key, peer) - if result.is_err(): - return SyftError(message=str(result.err())) + # check if the peer already is a server peer + existing_peer_res = self.stash.get_by_uid(context.server.verify_key, peer.id) + + if existing_peer_res.is_ok() and isinstance( + existing_peer := existing_peer_res.ok(), ServerPeer + ): + msg = [ + f"The peer '{peer.name}' is already associated with '{context.server.name}'" + ] + + if existing_peer != peer: + msg.append("Peer information change detected.") + self.stash.create_or_update_peer( + context.server.verify_key, + peer, + ).unwrap( + public_message="\n".join( + msg + ["Attempt to update peer information failed."] + ) + ) - # this way they can match up who we are with who they think we are - # Sending a signed messages for the peer to verify - self_node_peer = self_node_route.validate_with_context(context=context) + msg.append("Peer information successfully updated.") + return SyftSuccess(message="\n".join(msg)) - if isinstance(self_node_peer, SyftError): - return self_node_peer + return SyftSuccess(message="\n".join(msg)) - # Q,TODO: Should the returned node peer also be signed - # as the challenge is already signed - challenge_signature = context.node.signing_key.signing_key.sign( - challenge - ).signature + # check if the peer already submitted an association request + association_requests: list[Request] = self._get_association_requests_by_peer_id( + context=context, peer_id=peer.id + ) + if ( + association_requests + and (association_request := association_requests[-1]).status + == RequestStatus.PENDING + ): + return association_request + # only create and submit a new request if there is no requests yet + # or all previous requests have been rejected + association_request_change = AssociationRequestChange( + self_server_route=self_server_route, challenge=challenge, remote_peer=peer + ) + submit_request = SubmitRequest( + changes=[association_request_change], + requesting_user_verify_key=context.credentials, + ) + request = context.server.services.request.submit(context, submit_request) + if ( + isinstance(request, Request) + and context.server.settings.association_request_auto_approval + ): + return context.server.services.request.apply(context, uid=request.id) - return [challenge_signature, self_node_peer] + return request @service_method(path="network.ping", name="ping", roles=GUEST_ROLE_LEVEL) - def ping( - self, context: AuthedServiceContext, challenge: bytes - ) -> bytes | SyftError: + def ping(self, context: AuthedServiceContext, challenge: bytes) -> bytes: """To check alivesness/authenticity of a peer""" - # # Only the root user can ping the node to check its state - # if context.node.verify_key != context.credentials: + # # Only the root user can ping the server to check its state + # if context.server.verify_key != context.credentials: # return SyftError(message=("Only the root user can access ping endpoint")) # this way they can match up who we are with who they think we are # Sending a signed messages for the peer to verify - context.node = cast(AbstractNode, context.node) - challenge_signature = context.node.signing_key.signing_key.sign( + + challenge_signature = context.server.signing_key.signing_key.sign( challenge ).signature return challenge_signature - @service_method(path="network.add_route_for", name="add_route_for") - def add_route_for( - self, - context: AuthedServiceContext, - route: NodeRoute, - peer: NodePeer, - ) -> SyftSuccess | SyftError: - """Add Route for this Node to another Node""" - # check root user is asking for the exchange - client = peer.client_with_context(context=context) - result = client.api.services.network.verify_route(route) - - if not isinstance(result, SyftSuccess): - return result - return SyftSuccess(message="Route Verified") - @service_method( - path="network.verify_route", name="verify_route", roles=GUEST_ROLE_LEVEL + path="network.check_peer_association", + name="check_peer_association", + roles=GUEST_ROLE_LEVEL, ) - def verify_route( - self, context: AuthedServiceContext, route: NodeRoute - ) -> SyftSuccess | SyftError: - """Add a Network Node Route""" - # get the peer asking for route verification from its verify_key - context.node = cast(AbstractNode, context.node) - peer = self.stash.get_for_verify_key( - context.node.verify_key, - context.credentials, - ) - if peer.is_err(): - return SyftError(message=peer.err()) - peer = peer.ok() + def check_peer_association( + self, context: AuthedServiceContext, peer_id: UID + ) -> ServerPeerAssociationStatus: + """Check if a peer exists in the network stash""" - if peer.verify_key != context.credentials: - return SyftError( - message=( - f"verify_key: {context.credentials} at route {route} " - f"does not match listed peer: {peer}" + # get the server peer for the given sender peer_id + try: + self.stash.get_by_uid(context.server.verify_key, peer_id).unwrap() + return ServerPeerAssociationStatus.PEER_ASSOCIATED + except SyftException: + association_requests: list[Request] = ( + self._get_association_requests_by_peer_id( + context=context, peer_id=peer_id ) ) - peer.update_routes([route]) - result = self.stash.update_peer(context.node.verify_key, peer) - if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="Network Route Verified") + if ( + association_requests + and association_requests[-1].status == RequestStatus.PENDING + ): + return ServerPeerAssociationStatus.PEER_ASSOCIATION_PENDING + + return ServerPeerAssociationStatus.PEER_NOT_FOUND @service_method( path="network.get_all_peers", name="get_all_peers", roles=GUEST_ROLE_LEVEL ) - def get_all_peers( - self, context: AuthedServiceContext - ) -> list[NodePeer] | SyftError: + def get_all_peers(self, context: AuthedServiceContext) -> list[ServerPeer]: """Get all Peers""" - context.node = cast(AbstractNode, context.node) - result = self.stash.get_all( - credentials=context.node.verify_key, - order_by=OrderByNamePartitionKey, - ) - if result.is_ok(): - peers = result.ok() - return peers - return SyftError(message=result.err()) + return self.stash.get_all( + credentials=context.server.verify_key, + order_by="name", + sort_order="asc", + ).unwrap() @service_method( path="network.get_peer_by_name", name="get_peer_by_name", roles=GUEST_ROLE_LEVEL ) - def get_peer_by_name( - self, context: AuthedServiceContext, name: str - ) -> NodePeer | None | SyftError: + def get_peer_by_name(self, context: AuthedServiceContext, name: str) -> ServerPeer: """Get Peer by Name""" - context.node = cast(AbstractNode, context.node) - result = self.stash.get_by_name( - credentials=context.node.verify_key, + return self.stash.get_by_name( + credentials=context.server.verify_key, name=name, - ) - if result.is_ok(): - peer = result.ok() - return peer - return SyftError(message=str(result.err())) + ).unwrap() @service_method( path="network.get_peers_by_type", @@ -357,123 +397,452 @@ def get_peer_by_name( roles=GUEST_ROLE_LEVEL, ) def get_peers_by_type( - self, context: AuthedServiceContext, node_type: NodeType - ) -> list[NodePeer] | SyftError: - context.node = cast(AbstractNode, context.node) - result = self.stash.get_by_node_type( - credentials=context.node.verify_key, - node_type=node_type, - ) + self, context: AuthedServiceContext, server_type: ServerType + ) -> list[ServerPeer]: + return self.stash.get_by_server_type( + credentials=context.server.verify_key, + server_type=server_type, + ).unwrap() - if result.is_err(): - return SyftError(message=str(result.err())) + @service_method( + path="network.update_peer", + name="update_peer", + roles=GUEST_ROLE_LEVEL, + ) + def update_peer( + self, + context: AuthedServiceContext, + peer_update: ServerPeerUpdate, + ) -> SyftSuccess: + # try setting all fields of ServerPeerUpdate according to ServerPeer + + peer = self.stash.update( + credentials=context.server.verify_key, + obj=peer_update, + ).unwrap() + + self.set_reverse_tunnel_config(context=context, remote_server_peer=peer) + return SyftSuccess( + message=f"Peer '{peer.name}' information successfully updated." + ) - # Return peers or an empty list when result is None - return result.ok() or [] + def set_reverse_tunnel_config( + self, + context: AuthedServiceContext, + remote_server_peer: ServerPeer, + self_server_peer: ServerPeer | None = None, + ) -> None: + server_type = cast(ServerType, context.server.server_type) + if server_type.value == ServerType.GATEWAY.value: + rtunnel_route = remote_server_peer.get_rtunnel_route() + ( + self.rtunnel_service.set_server_config(remote_server_peer) + if rtunnel_route + else None + ) + else: + self_server_peer = ( + context.server.settings.to(ServerPeer) + if self_server_peer is None + else self_server_peer + ) + rtunnel_route = self_server_peer.get_rtunnel_route() + ( + self.rtunnel_service.set_client_config( + self_server_peer=self_server_peer, + remote_server_route=remote_server_peer.pick_highest_priority_route(), + ) + if rtunnel_route + else None + ) @service_method( path="network.delete_peer_by_id", name="delete_peer_by_id", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) - def delete_peer_by_id( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - """Delete Node Peer""" - result = self.stash.delete_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="Node Peer Deleted") + def delete_peer_by_id(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + """Delete Server Peer""" + peer_to_delete = self.stash.get_by_uid(context.credentials, uid).unwrap() + + server_side_type = cast(ServerType, context.server.server_type) + if server_side_type.value == ServerType.GATEWAY.value: + rtunnel_route = peer_to_delete.get_rtunnel_route() + ( + self.rtunnel_service.clear_server_config(peer_to_delete) + if rtunnel_route + else None + ) + + # TODO: Handle the case when peer is deleted from datasite server + + self.stash.delete_by_uid(context.credentials, uid).unwrap() + + # Delete all the association requests from this peer + association_requests: list[Request] = self._get_association_requests_by_peer_id( + context=context, peer_id=uid + ) + for request in association_requests: + context.server.services.request.delete_by_uid(context, request.id) + # TODO: Notify the peer (either by email or by other form of notifications) + # that it has been deleted from the network + return SyftSuccess(message=f"Server Peer with id {uid} deleted.") @service_method( - path="network.exchange_veilid_route", - name="exchange_veilid_route", - roles=DATA_OWNER_ROLE_LEVEL, + path="network.add_route_on_peer", + name="add_route_on_peer", + unwrap_on_success=False, ) - def exchange_veilid_route( + def add_route_on_peer( self, context: AuthedServiceContext, - remote_node_route: NodeRoute, - ) -> SyftSuccess | SyftError: - """Exchange Route With Another Node""" - context.node = cast(AbstractNode, context.node) - # Step 1: Get our own Veilid Node Peer to send to the remote node - self_node_peer: NodePeer = context.node.settings.to(NodePeer) - - veilid_service = context.node.get_service("veilidservice") - veilid_route = veilid_service.get_veilid_route(context=context) - - if isinstance(veilid_route, SyftError): - return veilid_route - - self_node_peer.node_routes = [veilid_route] + peer: ServerPeer, + route: ServerRoute, + ) -> SyftSuccess: + """ + Add or update the route information on the remote peer. + + Args: + context (AuthedServiceContext): The authentication context. + peer (ServerPeer): The peer representing the remote server. + route (ServerRoute): The route to be added. + + Returns: + SyftSuccess: A success message if the route is verified, + otherwise an error message. + """ + # creates a client on the remote server based on the credentials + # of the current server's client + remote_client = peer.client_with_context(context=context).unwrap() + # ask the remote server to add the route to the self server + result = remote_client.api.services.network.add_route( + peer_verify_key=context.credentials, + route=route, + called_by_peer=True, + ) - # Step 2: Create a Remote Client - remote_client: SyftClient = remote_node_route.client_with_context( - context=context + return SyftSuccess( + message="Route information added to remote peer", value=result ) - # Step 3: Send the Node Peer to the remote node - remote_node_peer: NodePeer | SyftError = ( - remote_client.api.services.network.add_veilid_peer( - peer=self_node_peer, + @service_method( + path="network.add_route", + name="add_route", + roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, + ) + def add_route( + self, + context: AuthedServiceContext, + peer_verify_key: SyftVerifyKey, + route: ServerRoute, + called_by_peer: bool = False, + ) -> SyftSuccess: + """ + Add a route to the peer. If the route already exists, update its priority. + + Args: + context (AuthedServiceContext): The authentication context of the remote server. + peer_verify_key (SyftVerifyKey): The verify key of the remote server peer. + route (ServerRoute): The route to be added. + called_by_peer (bool): The flag to indicate that it's called by a remote peer. + + Returns: + SyftSuccess + """ + # verify if the peer is truly the one sending the request to add the route to itself + if called_by_peer and peer_verify_key != context.credentials: + raise SyftException( + public_message=( + f"The {type(peer_verify_key).__name__}: " + f"{peer_verify_key} does not match the signature of the message" + ) + ) + # get the full peer object from the store to update its routes + remote_server_peer: ServerPeer = ( + self._get_remote_server_peer_by_verify_key(context, peer_verify_key) + ).unwrap() + # add and update the priority for the peer + if route in remote_server_peer.server_routes: + return SyftSuccess( + message=f"The route already exists between '{context.server.name}' and " + f"peer '{remote_server_peer.name}'." ) - ) - if not isinstance(remote_node_peer, NodePeer): - return remote_node_peer + remote_server_peer.update_route(route=route) - # Step 4: Add the remote Node Peer to our stash - result = self.stash.update_peer(context.node.verify_key, remote_node_peer) - if result.is_err(): - return SyftError(message=str(result.err())) + # update the peer in the store with the updated routes + peer_update = ServerPeerUpdate( + id=remote_server_peer.id, server_routes=remote_server_peer.server_routes + ) + self.stash.update( + credentials=context.server.verify_key, + obj=peer_update, + ).unwrap() + + return SyftSuccess( + message=f"New route ({str(route)}) with id '{route.id}' " + f"to peer {remote_server_peer.server_type.value} '{remote_server_peer.name}' " + f"was added for {str(context.server.server_type)} '{context.server.name}'" + ) - return SyftSuccess(message="Routes Exchanged") + @service_method( + path="network.delete_route_on_peer", + name="delete_route_on_peer", + unwrap_on_success=False, + ) + def delete_route_on_peer( + self, + context: AuthedServiceContext, + peer: ServerPeer, + route: ServerRoute, + ) -> SyftSuccess | SyftInfo: + """ + Delete the route on the remote peer. + + Args: + context (AuthedServiceContext): The authentication context for the service. + peer (ServerPeer): The peer for which the route will be deleted. + route (ServerRoute): The route to be deleted. + + Returns: + SyftSuccess: If the route is successfully deleted. + SyftInfo: If there is only one route left for the peer and + the admin chose not to remove it + """ + # creates a client on the remote server based on the credentials + # of the current server's client + remote_client = peer.client_with_context(context=context).unwrap() + # ask the remote server to delete the route to the self server + return remote_client.api.services.network.delete_route( + peer_verify_key=context.credentials, + route=route, + called_by_peer=True, + ) @service_method( - path="network.add_veilid_peer", name="add_veilid_peer", roles=GUEST_ROLE_LEVEL + path="network.delete_route", + name="delete_route", + roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, ) - def add_veilid_peer( + def delete_route( self, context: AuthedServiceContext, - peer: NodePeer, - ) -> NodePeer | SyftError: - """Add a Veilid Node Peer""" - context.node = cast(AbstractNode, context.node) - # Step 1: Using the verify_key of the peer to verify the signature - # It is also our single source of truth for the peer - if peer.verify_key != context.credentials: - return SyftError( - message=( - f"The {type(peer)}.verify_key: " - f"{peer.verify_key} does not match the signature of the message" + peer_verify_key: SyftVerifyKey, + route: ServerRoute | None = None, + called_by_peer: bool = False, + ) -> SyftSuccess | SyftInfo: + """ + Delete a route for a given peer. + If a peer has no routes left, there will be a prompt asking if the user want to remove it. + If the answer is yes, it will be removed from the stash and will no longer be a peer. + + Args: + context (AuthedServiceContext): The authentication context for the service. + peer_verify_key (SyftVerifyKey): The verify key of the remote server peer. + route (ServerRoute): The route to be deleted. + called_by_peer (bool): The flag to indicate that it's called by a remote peer. + + Returns: + SyftSuccess: If the route is successfully deleted. + SyftInfo: If there is only one route left for the peer and + the admin chose not to remove it + """ + if called_by_peer and peer_verify_key != context.credentials: + # verify if the peer is truly the one sending the request to delete the route to itself + raise SyftException( + public_message=( + f"The {type(peer_verify_key).__name__}: " + f"{peer_verify_key} does not match the signature of the message" ) ) + remote_server_peer: ServerPeer = self._get_remote_server_peer_by_verify_key( + context=context, peer_verify_key=peer_verify_key + ).unwrap() + + if len(remote_server_peer.server_routes) == 1: + warning_message = ( + f"There is only one route left to peer " + f"{remote_server_peer.server_type.value} '{remote_server_peer.name}'. " + f"Removing this route will remove the peer for " + f"{str(context.server.server_type)} '{context.server.name}'." + ) + response: bool = prompt_warning_message( + message=warning_message, + confirm=False, + ) + if not response: + return SyftInfo( + message=f"The last route to {remote_server_peer.server_type.value} " + f"'{remote_server_peer.name}' with id " + f"'{remote_server_peer.server_routes[0].id}' was not deleted." + ) - # Step 2: Save the remote peer to our stash - result = self.stash.update_peer(context.node.verify_key, peer) - if result.is_err(): - return SyftError(message=str(result.err())) + if route is not None: + remote_server_peer.delete_route(route) - # Step 3: Get our own Veilid Node Peer to send to the remote node - self_node_peer: NodePeer = context.node.settings.to(NodePeer) + return_message = ( + f"Route '{str(route)}' to peer " + f"{remote_server_peer.server_type.value} '{remote_server_peer.name}' " + f"was deleted for {str(context.server.server_type)} '{context.server.name}'." + ) - veilid_service = context.node.get_service("veilidservice") - veilid_route = veilid_service.get_veilid_route(context=context) + if len(remote_server_peer.server_routes) == 0: + # remove the peer + # TODO: should we do this as we are deleting the peer with a guest role level? + self.stash.delete_by_uid( + credentials=context.server.verify_key, uid=remote_server_peer.id + ).unwrap() + return_message += ( + f" There is no routes left to connect to peer " + f"{remote_server_peer.server_type.value} '{remote_server_peer.name}', so it is deleted for " + f"{str(context.server.server_type)} '{context.server.name}'." + ) + else: + # update the peer with the route removed + peer_update = ServerPeerUpdate( + id=remote_server_peer.id, server_routes=remote_server_peer.server_routes + ) + self.stash.update( + credentials=context.server.verify_key, obj=peer_update + ).unwrap() - if isinstance(veilid_route, SyftError): - return veilid_route + return SyftSuccess(message=return_message) - self_node_peer.node_routes = [veilid_route] + @service_method( + path="network.update_route_priority_on_peer", + name="update_route_priority_on_peer", + unwrap_on_success=False, + ) + def update_route_priority_on_peer( + self, + context: AuthedServiceContext, + peer: ServerPeer, + route: ServerRoute, + priority: int | None = None, + ) -> SyftSuccess: + """ + Update the route priority on the remote peer. + + Args: + context (AuthedServiceContext): The authentication context. + peer (ServerPeer): The peer representing the remote server. + route (ServerRoute): The route to be added. + priority (int | None): The new priority value for the route. If not + provided, it will be assigned the highest priority among all peers + + Returns: + SyftSuccess: A success message if the route is verified, + otherwise an error message. + """ + # creates a client on the remote server based on the credentials + # of the current server's client + remote_client = peer.client_with_context(context=context).unwrap() + result = remote_client.api.services.network.update_route_priority( + peer_verify_key=context.credentials, + route=route, + priority=priority, + called_by_peer=True, + ) + return result - return self_node_peer + @service_method( + path="network.update_route_priority", + name="update_route_priority", + roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, + ) + def update_route_priority( + self, + context: AuthedServiceContext, + peer_verify_key: SyftVerifyKey, + route: ServerRoute, + priority: int | None = None, + called_by_peer: bool = False, + ) -> SyftSuccess: + """ + Updates a route's priority for the given peer + + Args: + context (AuthedServiceContext): The authentication context for the service. + peer_verify_key (SyftVerifyKey): The verify key of the peer whose route priority needs to be updated. + route (ServerRoute): The route for which the priority needs to be updated. + priority (int | None): The new priority value for the route. If not + provided, it will be assigned the highest priority among all peers + + Returns: + SyftSuccess : Successful response + """ + if called_by_peer and peer_verify_key != context.credentials: + raise SyftException( + public_message=( + f"The {type(peer_verify_key).__name__}: " + f"{peer_verify_key} does not match the signature of the message" + ) + ) + # get the full peer object from the store to update its routes + remote_server_peer: ServerPeer = ( + self._get_remote_server_peer_by_verify_key(context, peer_verify_key) + ).unwrap() + # update the route's priority for the peer + updated_server_route: ServerRouteType = ( + remote_server_peer.update_existed_route_priority( + route=route, priority=priority + ).unwrap() + ) + new_priority: int = updated_server_route.priority + # update the peer in the store + peer_update = ServerPeerUpdate( + id=remote_server_peer.id, server_routes=remote_server_peer.server_routes + ) + self.stash.update(context.server.verify_key, peer_update).unwrap() + return SyftSuccess( + message=f"Route {route.id}'s priority updated to " + f"{new_priority} for peer {remote_server_peer.name}" + ) + + @as_result(SyftException) + def _get_remote_server_peer_by_verify_key( + self, context: AuthedServiceContext, peer_verify_key: SyftVerifyKey + ) -> ServerPeer: + """ + Helper function to get the full server peer object from t + he stash using its verify key + """ + return self.stash.get_by_verify_key( + credentials=context.server.verify_key, + verify_key=peer_verify_key, + ).unwrap() + + def _get_association_requests_by_peer_id( + self, context: AuthedServiceContext, peer_id: UID + ) -> list[Request]: + """ + Get all the association requests from a peer. The association requests are sorted by request_time. + """ + all_requests: list[Request] = context.server.services.request.get_all(context) + association_requests: list[Request] = [ + request + for request in all_requests + if any( + isinstance(change, AssociationRequestChange) + and change.remote_peer.id == peer_id + for change in request.changes + ) + ] + + return sorted( + association_requests, key=lambda request: request.request_time.utc_timestamp + ) -TYPE_TO_SERVICE[NodePeer] = NetworkService -SERVICE_TO_TYPES[NetworkService].update({NodePeer}) +TYPE_TO_SERVICE[ServerPeer] = NetworkService +SERVICE_TO_TYPES[NetworkService].update({ServerPeer}) -def from_grid_url(context: TransformContext) -> TransformContext: +def from_server_url(context: TransformContext) -> TransformContext: if context.obj is not None and context.output is not None: url = context.obj.url.as_container_host() context.output["host_or_ip"] = url.host_or_ip @@ -482,68 +851,69 @@ def from_grid_url(context: TransformContext) -> TransformContext: context.output["private"] = False context.output["proxy_target_uid"] = context.obj.proxy_target_uid context.output["priority"] = 1 + context.output["rtunnel_token"] = context.obj.rtunnel_token return context -@transform(HTTPConnection, HTTPNodeRoute) -def http_connection_to_node_route() -> list[Callable]: - return [from_grid_url] +@transform(HTTPConnection, HTTPServerRoute) +def http_connection_to_server_route() -> list[Callable]: + return [from_server_url] -def get_python_node_route(context: TransformContext) -> TransformContext: +def get_python_server_route(context: TransformContext) -> TransformContext: if context.output is not None and context.obj is not None: - context.output["id"] = context.obj.node.id - context.output["worker_settings"] = WorkerSettings.from_node(context.obj.node) + context.output["id"] = context.obj.server.id + context.output["worker_settings"] = WorkerSettings.from_server( + context.obj.server + ) context.output["proxy_target_uid"] = context.obj.proxy_target_uid return context -@transform(PythonConnection, PythonNodeRoute) -def python_connection_to_node_route() -> list[Callable]: - return [get_python_node_route] +@transform(PythonConnection, PythonServerRoute) +def python_connection_to_server_route() -> list[Callable]: + return [get_python_server_route] -@transform_method(PythonNodeRoute, PythonConnection) -def node_route_to_python_connection( +@transform_method(PythonServerRoute, PythonConnection) +def server_route_to_python_connection( obj: Any, context: TransformContext | None = None ) -> list[Callable]: - return PythonConnection(node=obj.node, proxy_target_uid=obj.proxy_target_uid) + return PythonConnection(server=obj.server, proxy_target_uid=obj.proxy_target_uid) -@transform_method(HTTPNodeRoute, HTTPConnection) -def node_route_to_http_connection( +@transform_method(HTTPServerRoute, HTTPConnection) +def server_route_to_http_connection( obj: Any, context: TransformContext | None = None ) -> list[Callable]: - url = GridURL( + url = ServerURL( protocol=obj.protocol, host_or_ip=obj.host_or_ip, port=obj.port ).as_container_host() - return HTTPConnection(url=url, proxy_target_uid=obj.proxy_target_uid) - - -@transform_method(VeilidNodeRoute, VeilidConnection) -def node_route_to_veilid_connection( - obj: VeilidNodeRoute, context: TransformContext | None = None -) -> list[Callable]: - return VeilidConnection(vld_key=obj.vld_key, proxy_target_uid=obj.proxy_target_uid) - - -@transform_method(VeilidConnection, VeilidNodeRoute) -def veilid_connection_to_node_route( - obj: VeilidConnection, context: TransformContext | None = None -) -> list[Callable]: - return VeilidNodeRoute(vld_key=obj.vld_key, proxy_target_uid=obj.proxy_target_uid) + return HTTPConnection( + url=url, + proxy_target_uid=obj.proxy_target_uid, + rtunnel_token=obj.rtunnel_token, + ) -@transform(NodeMetadataV3, NodePeer) +@transform(ServerMetadata, ServerPeer) def metadata_to_peer() -> list[Callable]: return [ - keep(["id", "name", "verify_key", "node_type", "admin_email"]), + keep( + [ + "id", + "name", + "verify_key", + "server_type", + ] + ), + make_set_default("admin_email", ""), ] -@transform(NodeSettingsV2, NodePeer) +@transform(ServerSettings, ServerPeer) def settings_to_peer() -> list[Callable]: return [ - keep(["id", "name", "verify_key", "node_type", "admin_email"]), + keep(["id", "name", "verify_key", "server_type", "admin_email"]), ] diff --git a/packages/syft/src/syft/service/network/node_peer.py b/packages/syft/src/syft/service/network/node_peer.py deleted file mode 100644 index 0f4a8a0b448..00000000000 --- a/packages/syft/src/syft/service/network/node_peer.py +++ /dev/null @@ -1,164 +0,0 @@ -# stdlib - -# relative -from ...abstract_node import NodeType -from ...client.client import SyftClient -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey -from ...serde.serializable import serializable -from ...service.response import SyftError -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SyftObject -from ...types.uid import UID -from ..context import NodeServiceContext -from ..metadata.node_metadata import NodeMetadataV3 -from .routes import HTTPNodeRoute -from .routes import NodeRoute -from .routes import NodeRouteType -from .routes import PythonNodeRoute -from .routes import VeilidNodeRoute -from .routes import connection_to_route -from .routes import route_to_connection - - -@serializable() -class NodePeer(SyftObject): - # version - __canonical_name__ = "NodePeer" - __version__ = SYFT_OBJECT_VERSION_2 - - __attr_searchable__ = ["name", "node_type"] - __attr_unique__ = ["verify_key"] - __repr_attrs__ = ["name", "node_type", "admin_email"] - - id: UID | None = None # type: ignore[assignment] - name: str - verify_key: SyftVerifyKey - node_routes: list[NodeRouteType] = [] - node_type: NodeType - admin_email: str - - def update_routes(self, new_routes: list[NodeRoute]) -> None: - add_routes = [] - new_routes = self.update_route_priorities(new_routes) - for new_route in new_routes: - existed, index = self.existed_route(new_route) - if existed and index is not None: - # if the route already exists, we do not append it to self.new_route, - # but update its priority - self.node_routes[index].priority = new_route.priority - else: - add_routes.append(new_route) - - self.node_routes += add_routes - - def update_route_priorities(self, new_routes: list[NodeRoute]) -> list[NodeRoute]: - """ - Since we pick the newest route has the highest priority, we - update the priority of the newly added routes here to be increments of - current routes' highest priority. - """ - current_max_priority = max(route.priority for route in self.node_routes) - for route in new_routes: - route.priority = current_max_priority + 1 - current_max_priority += 1 - return new_routes - - def existed_route(self, route: NodeRoute) -> tuple[bool, int | None]: - """Check if a route exists in self.node_routes - - For HTTPNodeRoute: check based on protocol, host_or_ip (url) and port - - For PythonNodeRoute: check if the route exists in the set of all node_routes - Args: - route: the route to be checked - Returns: - if the route exists, returns (True, index of the existed route in self.node_routes) - if the route does not exist returns (False, None) - """ - if isinstance(route, HTTPNodeRoute): - for i, r in enumerate(self.node_routes): - if ( - (route.host_or_ip == r.host_or_ip) - and (route.port == r.port) - and (route.protocol == r.protocol) - ): - return (True, i) - return (False, None) - elif isinstance(route, PythonNodeRoute): # PythonNodeRoute - for i, r in enumerate(self.node_routes): # something went wrong here - if ( - (route.worker_settings.id == r.worker_settings.id) - and (route.worker_settings.name == r.worker_settings.name) - and (route.worker_settings.node_type == r.worker_settings.node_type) - and ( - route.worker_settings.node_side_type - == r.worker_settings.node_side_type - ) - and ( - route.worker_settings.signing_key - == r.worker_settings.signing_key - ) - ): - return (True, i) - return (False, None) - elif isinstance(route, VeilidNodeRoute): - for i, r in enumerate(self.node_routes): - if ( - route.vld_key == r.vld_key - and route.proxy_target_uid == r.proxy_target_uid - ): - return (True, i) - - return (False, None) - else: - raise ValueError(f"Unsupported route type: {type(route)}") - - @staticmethod - def from_client(client: SyftClient) -> "NodePeer": - if not client.metadata: - raise Exception("Client has to have metadata first") - - peer = client.metadata.to(NodeMetadataV3).to(NodePeer) - route = connection_to_route(client.connection) - peer.node_routes.append(route) - return peer - - def client_with_context(self, context: NodeServiceContext) -> SyftClient: - if len(self.node_routes) < 1: - raise Exception(f"No routes to peer: {self}") - # select the latest added route - final_route = self.pick_highest_priority_route() - connection = route_to_connection(route=final_route) - - client_type = connection.get_client_type() - if isinstance(client_type, SyftError): - return client_type - if context.node is None: - return SyftError(message=f"context {context}'s node is None") - return client_type(connection=connection, credentials=context.node.signing_key) - - def client_with_key(self, credentials: SyftSigningKey) -> SyftClient: - if len(self.node_routes) < 1: - raise Exception(f"No routes to peer: {self}") - # select the latest added route - final_route = self.pick_highest_priority_route() - connection = route_to_connection(route=final_route) - client_type = connection.get_client_type() - if isinstance(client_type, SyftError): - return client_type - - return client_type(connection=connection, credentials=credentials) - - @property - def guest_client(self) -> SyftClient: - guest_key = SyftSigningKey.generate() - return self.client_with_key(credentials=guest_key) - - def proxy_from(self, client: SyftClient) -> SyftClient: - return client.proxy_to(self) - - def pick_highest_priority_route(self) -> NodeRoute: - final_route: NodeRoute = self.node_routes[-1] - for route in self.node_routes: - if route.priority > final_route.priority: - final_route = route - return final_route diff --git a/packages/syft/src/syft/service/network/rathole.py b/packages/syft/src/syft/service/network/rathole.py new file mode 100644 index 00000000000..96abcd08af6 --- /dev/null +++ b/packages/syft/src/syft/service/network/rathole.py @@ -0,0 +1,43 @@ +# third party +from typing_extensions import Self + +# relative +from ...serde.serializable import serializable +from ...types.base import SyftBaseModel +from ...util.util import get_env +from .server_peer import ServerPeer + + +def get_rathole_port() -> int: + return int(get_env("RATHOLE_PORT", "2333")) + + +@serializable(canonical_name="RatholeConfig", version=1) +class RatholeConfig(SyftBaseModel): + uuid: str + secret_token: str + local_addr_host: str + local_addr_port: int + server_name: str | None = None + + @property + def local_address(self) -> str: + return f"{self.local_addr_host}:{self.local_addr_port}" + + @classmethod + def from_peer(cls, peer: ServerPeer) -> Self: + # relative + from .routes import HTTPServerRoute + + high_priority_route = peer.pick_highest_priority_route() + + if not isinstance(high_priority_route, HTTPServerRoute): + raise ValueError("Rathole only supports HTTPServerRoute") + + return cls( + uuid=peer.id, + secret_token=peer.rtunnel_token, + local_addr_host=high_priority_route.host_or_ip, + local_addr_port=high_priority_route.port, + server_name=peer.name, + ) diff --git a/packages/syft/src/syft/service/network/rathole_config_builder.py b/packages/syft/src/syft/service/network/rathole_config_builder.py new file mode 100644 index 00000000000..fb0ef01d798 --- /dev/null +++ b/packages/syft/src/syft/service/network/rathole_config_builder.py @@ -0,0 +1,308 @@ +# stdlib +import secrets +from typing import cast + +# third party +from kr8s.objects import Service +import yaml + +# relative +from ...custom_worker.k8s import KubeUtils +from ...custom_worker.k8s import get_kr8s_client +from ...types.uid import UID +from .rathole import RatholeConfig +from .rathole import get_rathole_port +from .rathole_toml import RatholeClientToml +from .rathole_toml import RatholeServerToml +from .server_peer import ServerPeer + +RATHOLE_TOML_CONFIG_MAP = "rathole-config" +RATHOLE_PROXY_CONFIG_MAP = "proxy-config-dynamic" +PROXY_CONFIG_MAP = "proxy-config" +DEFAULT_LOCAL_ADDR_HOST = "0.0.0.0" # nosec + + +class RatholeConfigBuilder: + def __init__(self) -> None: + self.k8rs_client = get_kr8s_client() + + def add_host_to_server(self, peer: ServerPeer) -> None: + """Add a host to the rathole server toml file. + + Args: + peer (ServerPeer): The peer to be added to the rathole server. + + Returns: + None + """ + + rathole_route = peer.get_rtunnel_route() + if not rathole_route: + raise Exception(f"Peer: {peer} has no rathole route: {rathole_route}") + + random_port = self._get_random_port() + + peer_id = cast(UID, peer.id) + + config = RatholeConfig( + uuid=peer_id.to_string(), + secret_token=rathole_route.rtunnel_token, + local_addr_host=DEFAULT_LOCAL_ADDR_HOST, + local_addr_port=random_port, + server_name=peer.name, + ) + + # Get rathole toml config map + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeServerToml.filename + + toml_str = rathole_config_map.data[client_filename] + + # Add the peer info to the toml file + rathole_toml = RatholeServerToml(toml_str) + rathole_toml.add_config(config=config) + + # First time adding a peer + if not rathole_toml.get_rathole_listener_addr(): + bind_addr = f"localhost:{get_rathole_port()}" + rathole_toml.set_rathole_listener_addr(bind_addr) + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + # Add the peer info to the proxy config map + self._add_dynamic_addr_to_rathole(config) + + def remove_host_from_server(self, peer_id: str, server_name: str) -> None: + """Remove a host from the rathole server toml file. + + Args: + peer_id (str): The id of the peer to be removed. + server_name (str): The name of the peer to be removed. + + Returns: + None + """ + + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeServerToml.filename + + toml_str = rathole_config_map.data[client_filename] + + rathole_toml = RatholeServerToml(toml_str=toml_str) + + rathole_toml.remove_config(peer_id) + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + # Remove the peer info from the proxy config map + self._remove_dynamic_addr_from_rathole(server_name) + + def _get_random_port(self) -> int: + """Get a random port number.""" + return secrets.randbits(15) + + def add_host_to_client( + self, peer_name: str, peer_id: str, rtunnel_token: str, remote_addr: str + ) -> None: + """Add a host to the rathole client toml file.""" + + config = RatholeConfig( + uuid=peer_id, + secret_token=rtunnel_token, + local_addr_host="proxy", + local_addr_port=80, + server_name=peer_name, + ) + + # Get rathole toml config map + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeClientToml.filename + + toml_str = rathole_config_map.data[client_filename] + + rathole_toml = RatholeClientToml(toml_str=toml_str) + + rathole_toml.add_config(config=config) + + rathole_toml.set_remote_addr(remote_addr) + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + def remove_host_from_client(self, peer_id: str) -> None: + """Remove a host from the rathole client toml file.""" + + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeClientToml.filename + + toml_str = rathole_config_map.data[client_filename] + + rathole_toml = RatholeClientToml(toml_str=toml_str) + + rathole_toml.remove_config(peer_id) + + rathole_toml.clear_remote_addr() + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + def _add_dynamic_addr_to_rathole( + self, config: RatholeConfig, entrypoint: str = "web" + ) -> None: + """Add a port to the rathole proxy config map.""" + + rathole_proxy_config_map = KubeUtils.get_configmap( + self.k8rs_client, RATHOLE_PROXY_CONFIG_MAP + ) + + if rathole_proxy_config_map is None: + raise Exception("Rathole proxy config map not found.") + + rathole_proxy = rathole_proxy_config_map.data["rathole-dynamic.yml"] + + if not rathole_proxy: + rathole_proxy = {"http": {"routers": {}, "services": {}, "middlewares": {}}} + else: + rathole_proxy = yaml.safe_load(rathole_proxy) + + rathole_proxy["http"]["services"][config.server_name] = { + "loadBalancer": { + "servers": [{"url": f"http://rathole:{config.local_addr_port}"}] + } + } + + rathole_proxy["http"]["middlewares"]["strip-rathole-prefix"] = { + "replacePathRegex": {"regex": "^/rathole/(.*)", "replacement": "/$1"} + } + + proxy_rule = ( + f"Host(`{config.server_name}.syft.local`) || " + f"HostHeader(`{config.server_name}.syft.local`) && PathPrefix(`/rtunnel`)" + ) + + rathole_proxy["http"]["routers"][config.server_name] = { + "rule": proxy_rule, + "service": config.server_name, + "entryPoints": [entrypoint], + "middlewares": ["strip-rathole-prefix"], + } + + KubeUtils.update_configmap( + config_map=rathole_proxy_config_map, + patch={"data": {"rathole-dynamic.yml": yaml.safe_dump(rathole_proxy)}}, + ) + + self._expose_port_on_rathole_service(config.server_name, config.local_addr_port) + + def _remove_dynamic_addr_from_rathole(self, server_name: str) -> None: + """Remove a port from the rathole proxy config map.""" + + rathole_proxy_config_map = KubeUtils.get_configmap( + self.k8rs_client, RATHOLE_PROXY_CONFIG_MAP + ) + + if rathole_proxy_config_map is None: + raise Exception("Rathole proxy config map not found.") + + rathole_proxy = rathole_proxy_config_map.data["rathole-dynamic.yml"] + + if not rathole_proxy: + return + + rathole_proxy = yaml.safe_load(rathole_proxy) + + if server_name in rathole_proxy["http"]["routers"]: + del rathole_proxy["http"]["routers"][server_name] + + if server_name in rathole_proxy["http"]["services"]: + del rathole_proxy["http"]["services"][server_name] + + KubeUtils.update_configmap( + config_map=rathole_proxy_config_map, + patch={"data": {"rathole-dynamic.yml": yaml.safe_dump(rathole_proxy)}}, + ) + + self._remove_port_on_rathole_service(server_name) + + def _expose_port_on_rathole_service(self, port_name: str, port: int) -> None: + """Expose a port on the rathole service.""" + + rathole_service = KubeUtils.get_service(self.k8rs_client, "rathole") + + rathole_service = cast(Service, rathole_service) + + config = rathole_service.raw + + existing_port_idx = None + for idx, existing_port in enumerate(config["spec"]["ports"]): + if existing_port["name"] == port_name: + print("Port already exists.", existing_port_idx, port_name) + existing_port_idx = idx + break + + if existing_port_idx is not None: + config["spec"]["ports"][existing_port_idx]["port"] = port + config["spec"]["ports"][existing_port_idx]["targetPort"] = port + else: + config["spec"]["ports"].append( + { + "name": port_name, + "port": port, + "targetPort": port, + "protocol": "TCP", + } + ) + + rathole_service.patch(config) + + def _remove_port_on_rathole_service(self, port_name: str) -> None: + """Remove a port from the rathole service.""" + + rathole_service = KubeUtils.get_service(self.k8rs_client, "rathole") + + rathole_service = cast(Service, rathole_service) + + config = rathole_service.raw + + ports = config["spec"]["ports"] + + for port in ports: + if port["name"] == port_name: + ports.remove(port) + break + + rathole_service.patch(config) diff --git a/packages/syft/src/syft/service/network/rathole_toml.py b/packages/syft/src/syft/service/network/rathole_toml.py new file mode 100644 index 00000000000..8ded821279e --- /dev/null +++ b/packages/syft/src/syft/service/network/rathole_toml.py @@ -0,0 +1,247 @@ +# third party +import tomli +import tomli_w + +# relative +from .rathole import RatholeConfig + + +class TomlReaderWriter: + @staticmethod + def load(toml_str: str) -> dict: + return tomli.loads(toml_str) + + @staticmethod + def dump(toml_dict: str) -> str: + return tomli_w.dumps(toml_dict) + + +class RatholeBaseToml: + filename: str + + def __init__(self, toml_str: str) -> None: + self.toml_writer = TomlReaderWriter + self.toml_str = toml_str + + def read(self) -> dict: + return self.toml_writer.load(self.toml_str) + + def save(self, toml_dict: dict) -> None: + self.toml_str = self.toml_writer.dump(toml_dict) + + def _validate(self) -> bool: + raise NotImplementedError + + @property + def is_valid(self) -> bool: + return self._validate() + + +class RatholeClientToml(RatholeBaseToml): + filename: str = "client.toml" + + def set_remote_addr(self, remote_host: str) -> None: + """Add a new remote address to the client toml file.""" + + toml = self.read() + + # Add the new remote address + if "client" not in toml: + toml["client"] = {} + + toml["client"]["remote_addr"] = remote_host + + self.save(toml) + + def clear_remote_addr(self) -> None: + """Clear the remote address from the client toml file.""" + + toml = self.read() + + # Clear the remote address + if "client" not in toml: + return + + toml["client"]["remote_addr"] = "" + + self.save(toml) + + def add_config(self, config: RatholeConfig) -> None: + """Add a new config to the toml file.""" + + toml = self.read() + + # Add the new config + if "services" not in toml["client"]: + toml["client"]["services"] = {} + + if config.uuid not in toml["client"]["services"]: + toml["client"]["services"][config.uuid] = {} + + toml["client"]["services"][config.uuid] = { + "token": config.secret_token, + "local_addr": config.local_address, + } + + self.save(toml) + + def remove_config(self, uuid: str) -> None: + """Remove a config from the toml file.""" + + toml = self.read() + + # Remove the config + if "services" not in toml["client"]: + return + + if uuid not in toml["client"]["services"]: + return + + del toml["client"]["services"][uuid] + + self.save(toml) + + def update_config(self, config: RatholeConfig) -> None: + """Update a config in the toml file.""" + + toml = self.read() + + # Update the config + if "services" not in toml["client"]: + return + + if config.uuid not in toml["client"]["services"]: + return + + toml["client"]["services"][config.uuid] = { + "token": config.secret_token, + "local_addr": config.local_address, + } + + self.save(toml) + + def get_config(self, uuid: str) -> RatholeConfig | None: + """Get a config from the toml file.""" + + toml = self.read() + + # Get the config + if "services" not in toml["client"]: + return None + + if uuid not in toml["client"]["services"]: + return None + + service = toml["client"]["services"][uuid] + + return RatholeConfig( + uuid=uuid, + secret_token=service["token"], + local_addr_host=service["local_addr"].split(":")[0], + local_addr_port=service["local_addr"].split(":")[1], + ) + + def _validate(self) -> bool: + toml = self.read() + + if not toml["client"]["remote_addr"]: + return False + + for uuid, config in toml["client"]["services"].items(): + if not uuid: + return False + + if not config["token"] or not config["local_addr"]: + return False + + return True + + +class RatholeServerToml(RatholeBaseToml): + filename: str = "server.toml" + + def set_rathole_listener_addr(self, bind_addr: str) -> None: + """Set the bind address in the server toml file.""" + + toml = self.read() + + # Set the bind address + toml["server"]["bind_addr"] = bind_addr + + self.save(toml) + + def get_rathole_listener_addr(self) -> str: + """Get the bind address from the server toml file.""" + + toml = self.read() + + return toml["server"]["bind_addr"] + + def add_config(self, config: RatholeConfig) -> None: + """Add a new config to the toml file.""" + + toml = self.read() + + # Add the new config + if "services" not in toml["server"]: + toml["server"]["services"] = {} + + if config.uuid not in toml["server"]["services"]: + toml["server"]["services"][config.uuid] = {} + + toml["server"]["services"][config.uuid] = { + "token": config.secret_token, + "bind_addr": config.local_address, + } + + self.save(toml) + + def remove_config(self, uuid: str) -> None: + """Remove a config from the toml file.""" + + toml = self.read() + + # Remove the config + if "services" not in toml["server"]: + return + + if uuid not in toml["server"]["services"]: + return + + del toml["server"]["services"][uuid] + + self.save(toml) + + def update_config(self, config: RatholeConfig) -> None: + """Update a config in the toml file.""" + + toml = self.read() + + # Update the config + if "services" not in toml["server"]: + return + + if config.uuid not in toml["server"]["services"]: + return + + toml["server"]["services"][config.uuid] = { + "token": config.secret_token, + "bind_addr": config.local_address, + } + + self.save(toml) + + def _validate(self) -> bool: + toml = self.read() + + if not toml["server"]["bind_addr"]: + return False + + for uuid, config in toml["server"]["services"].items(): + if not uuid: + return False + + if not config["token"] or not config["bind_addr"]: + return False + + return True diff --git a/packages/syft/src/syft/service/network/reverse_tunnel_service.py b/packages/syft/src/syft/service/network/reverse_tunnel_service.py new file mode 100644 index 00000000000..e155dd3f5b5 --- /dev/null +++ b/packages/syft/src/syft/service/network/reverse_tunnel_service.py @@ -0,0 +1,48 @@ +# relative +from ...types.server_url import ServerURL +from .rathole_config_builder import RatholeConfigBuilder +from .routes import ServerRoute +from .server_peer import ServerPeer + + +class ReverseTunnelService: + def __init__(self) -> None: + self.builder = RatholeConfigBuilder() + + def set_client_config( + self, + self_server_peer: ServerPeer, + remote_server_route: ServerRoute, + ) -> None: + rathole_route = self_server_peer.get_rtunnel_route() + if not rathole_route: + raise Exception( + "Failed to exchange routes via . " + + f"Peer: {self_server_peer} has no rathole route: {rathole_route}" + ) + + remote_url = ServerURL( + host_or_ip=remote_server_route.host_or_ip, port=remote_server_route.port + ) + rathole_remote_addr = remote_url.as_container_host() + + remote_addr = rathole_remote_addr.url_no_protocol + + self.builder.add_host_to_client( + peer_name=self_server_peer.name, + peer_id=str(self_server_peer.id), + rtunnel_token=rathole_route.rtunnel_token, + remote_addr=remote_addr, + ) + + def set_server_config(self, remote_peer: ServerPeer) -> None: + rathole_route = remote_peer.get_rtunnel_route() + self.builder.add_host_to_server(remote_peer) if rathole_route else None + + def clear_client_config(self, self_server_peer: ServerPeer) -> None: + self.builder.remove_host_from_client(str(self_server_peer.id)) + + def clear_server_config(self, remote_peer: ServerPeer) -> None: + self.builder.remove_host_from_server( + str(remote_peer.id), server_name=remote_peer.name + ) diff --git a/packages/syft/src/syft/service/network/routes.py b/packages/syft/src/syft/service/network/routes.py index 95f3eeec9ab..f6de35f75fd 100644 --- a/packages/syft/src/syft/service/network/routes.py +++ b/packages/syft/src/syft/service/network/routes.py @@ -5,176 +5,196 @@ import secrets from typing import Any from typing import TYPE_CHECKING -from typing import cast # third party from typing_extensions import Self # relative -from ...abstract_node import AbstractNode +from ...abstract_server import AbstractServer from ...client.client import HTTPConnection -from ...client.client import NodeConnection from ...client.client import PythonConnection +from ...client.client import ServerConnection from ...client.client import SyftClient -from ...client.client import VeilidConnection -from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable +from ...server.worker_settings import WorkerSettings +from ...types.errors import SyftException +from ...types.result import as_result from ...types.syft_object import SYFT_OBJECT_VERSION_1 -from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.uid import UID from ..context import AuthedServiceContext -from ..context import NodeServiceContext -from ..response import SyftError +from ..context import ServerServiceContext if TYPE_CHECKING: # relative - from .node_peer import NodePeer + from .server_peer import ServerPeer -class NodeRoute: - def client_with_context(self, context: NodeServiceContext) -> SyftClient: +@serializable(canonical_name="ServerRoute", version=1) +class ServerRoute: + def client_with_context(self, context: ServerServiceContext) -> SyftClient: + """ + Convert the current route (self) to a connection (either HTTP, Veilid or Python) + and create a SyftClient from the connection. + + Args: + context (ServerServiceContext): The ServerServiceContext containing the server information. + + Returns: + SyftClient: Returns the created SyftClient + """ connection = route_to_connection(route=self, context=context) - client_type = connection.get_client_type() - if isinstance(client_type, SyftError): - return client_type - if context.node is None: - return SyftError(message=f"context {context}'s node is None") - return client_type(connection=connection, credentials=context.node.signing_key) - - def validate_with_context(self, context: AuthedServiceContext) -> NodePeer: + client_type = connection.get_client_type().unwrap() + return client_type( + connection=connection, credentials=context.server.signing_key + ) + + @as_result(SyftException) + def validate_with_context(self, context: AuthedServiceContext) -> ServerPeer: # relative - from .node_peer import NodePeer + from .server_peer import ServerPeer - # Step 1: Check if the given route is able to reach the given node + # Step 1: Check if the given route is able to reach the given server # As we allow the user to give custom routes, we need to check the reachability of the route self_client = self.client_with_context(context=context) # generating a random challenge random_challenge = secrets.token_bytes(16) challenge_signature = self_client.api.services.network.ping(random_challenge) - - if isinstance(challenge_signature, SyftError): - return challenge_signature - try: - context.node = cast(AbstractNode, context.node) # Verifying if the challenge is valid - context.node.verify_key.verify_key.verify( + context.server.verify_key.verify_key.verify( random_challenge, challenge_signature ) except Exception: - return SyftError(message="Signature Verification Failed in ping") + raise SyftException(public_message="Signature Verification Failed in ping") - # Step 2: Create a Node Peer with the given route - self_node_peer = context.node.settings.to(NodePeer) - self_node_peer.node_routes.append(self) + # Step 2: Create a Server Peer with the given route + self_server_peer: ServerPeer = context.server.settings.to(ServerPeer) + self_server_peer.server_routes.append(self) - return self_node_peer + return self_server_peer @serializable() -class HTTPNodeRoute(SyftObject, NodeRoute): - __canonical_name__ = "HTTPNodeRoute" - __version__ = SYFT_OBJECT_VERSION_2 +class HTTPServerRoute(SyftObject, ServerRoute): + __canonical_name__ = "HTTPServerRoute" + __version__ = SYFT_OBJECT_VERSION_1 + id: UID | None = None # type: ignore host_or_ip: str private: bool = False protocol: str = "http" port: int = 80 proxy_target_uid: UID | None = None priority: int = 1 + rtunnel_token: str | None = None def __eq__(self, other: Any) -> bool: - if isinstance(other, HTTPNodeRoute): - return hash(self) == hash(other) - return self == other + if not isinstance(other, HTTPServerRoute): + return False + return hash(self) == hash(other) def __hash__(self) -> int: - return hash(self.host_or_ip) + hash(self.port) + hash(self.protocol) - - -@serializable() -class VeilidNodeRoute(SyftObject, NodeRoute): - __canonical_name__ = "VeilidNodeRoute" - __version__ = SYFT_OBJECT_VERSION_1 - - vld_key: str - proxy_target_uid: UID | None = None - priority: int = 1 - - def __eq__(self, other: Any) -> bool: - if isinstance(other, VeilidNodeRoute): - return hash(self) == hash(other) - return self == other + return ( + hash(self.host_or_ip) + + hash(self.port) + + hash(self.protocol) + + hash(self.proxy_target_uid) + + hash(self.rtunnel_token) + ) - def __hash__(self) -> int: - return hash(self.vld_key) + def __str__(self) -> str: + return f"{self.protocol}://{self.host_or_ip}:{self.port}" @serializable() -class PythonNodeRoute(SyftObject, NodeRoute): - __canonical_name__ = "PythonNodeRoute" - __version__ = SYFT_OBJECT_VERSION_2 +class PythonServerRoute(SyftObject, ServerRoute): + __canonical_name__ = "PythonServerRoute" + __version__ = SYFT_OBJECT_VERSION_1 + id: UID | None = None # type: ignore worker_settings: WorkerSettings proxy_target_uid: UID | None = None priority: int = 1 @property - def node(self) -> AbstractNode | None: + def server(self) -> AbstractServer | None: # relative - from ...node.worker import Worker + from ...server.worker import Worker - node = Worker( + server = Worker( id=self.worker_settings.id, name=self.worker_settings.name, - node_type=self.worker_settings.node_type, - node_side_type=self.worker_settings.node_side_type, + server_type=self.worker_settings.server_type, + server_side_type=self.worker_settings.server_side_type, signing_key=self.worker_settings.signing_key, - document_store_config=self.worker_settings.document_store_config, - action_store_config=self.worker_settings.action_store_config, + db_config=self.worker_settings.db_config, processes=1, ) - return node + return server @classmethod - def with_node(cls, node: AbstractNode) -> Self: - worker_settings = WorkerSettings.from_node(node) + def with_server(cls, server: AbstractServer) -> Self: + worker_settings = WorkerSettings.from_server(server) return cls(id=worker_settings.id, worker_settings=worker_settings) def __eq__(self, other: Any) -> bool: - if isinstance(other, PythonNodeRoute): - return hash(self) == hash(other) - return self == other + if not isinstance(other, PythonServerRoute): + return False + return hash(self) == hash(other) + + def __hash__(self) -> int: + return ( + hash(self.worker_settings.id) + + hash(self.worker_settings.name) + + hash(self.worker_settings.server_type) + + hash(self.worker_settings.server_side_type) + + hash(self.worker_settings.signing_key) + ) + + def __str__(self) -> str: + return "PythonServerRoute" + + +@serializable() +class VeilidServerRoute(SyftObject, ServerRoute): + __canonical_name__ = "VeilidServerRoute" + __version__ = SYFT_OBJECT_VERSION_1 + + vld_key: str + proxy_target_uid: UID | None = None + priority: int = 1 + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, VeilidServerRoute): + return False + return hash(self) == hash(other) def __hash__(self) -> int: - return hash(self.worker_settings.id) + return hash(self.vld_key) + hash(self.proxy_target_uid) -NodeRouteType = HTTPNodeRoute | PythonNodeRoute | VeilidNodeRoute +ServerRouteTypeV1 = HTTPServerRoute | PythonServerRoute | VeilidServerRoute +ServerRouteType = HTTPServerRoute | PythonServerRoute def route_to_connection( - route: NodeRoute, context: TransformContext | None = None -) -> NodeConnection: - if isinstance(route, HTTPNodeRoute): + route: ServerRoute, context: TransformContext | None = None +) -> ServerConnection: + if isinstance(route, HTTPServerRoute): return route.to(HTTPConnection, context=context) - elif isinstance(route, PythonNodeRoute): + elif isinstance(route, PythonServerRoute): return route.to(PythonConnection, context=context) - elif isinstance(route, VeilidNodeRoute): - return route.to(VeilidConnection, context=context) else: raise ValueError(f"Route {route} is not supported.") -def connection_to_route(connection: NodeConnection) -> NodeRoute: +def connection_to_route(connection: ServerConnection) -> ServerRoute: if isinstance(connection, HTTPConnection): - return connection.to(HTTPNodeRoute) + return connection.to(HTTPServerRoute) elif isinstance(connection, PythonConnection): # type: ignore[unreachable] - return connection.to(PythonNodeRoute) - elif isinstance(connection, VeilidConnection): - return connection.to(VeilidNodeRoute) + return connection.to(PythonServerRoute) else: raise ValueError(f"Connection {connection} is not supported.") diff --git a/packages/syft/src/syft/service/network/server_peer.py b/packages/syft/src/syft/service/network/server_peer.py new file mode 100644 index 00000000000..594c5fe19dc --- /dev/null +++ b/packages/syft/src/syft/service/network/server_peer.py @@ -0,0 +1,311 @@ +# stdlib +from collections.abc import Callable +from enum import Enum +import logging + +# relative +from ...abstract_server import ServerType +from ...client.client import ServerConnection +from ...client.client import SyftClient +from ...serde.serializable import serializable +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...types.datetime import DateTime +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SyftObject +from ...types.transforms import TransformContext +from ...types.uid import UID +from ..context import ServerServiceContext +from ..metadata.server_metadata import ServerMetadata +from .routes import HTTPServerRoute +from .routes import PythonServerRoute +from .routes import ServerRoute +from .routes import ServerRouteType +from .routes import VeilidServerRoute +from .routes import connection_to_route +from .routes import route_to_connection + +logger = logging.getLogger(__name__) + + +@serializable(canonical_name="ServerPeerConnectionStatus", version=1) +class ServerPeerConnectionStatus(Enum): + ACTIVE = "ACTIVE" + INACTIVE = "INACTIVE" + TIMEOUT = "TIMEOUT" + + +@serializable() +class ServerPeer(SyftObject): + # version + __canonical_name__ = "ServerPeer" + __version__ = SYFT_OBJECT_VERSION_1 + + __attr_searchable__ = ["name", "server_type"] + __attr_unique__ = ["verify_key"] + __repr_attrs__ = [ + "name", + "server_type", + "admin_email", + "ping_status", + "ping_status_message", + "pinged_timestamp", + ] + + id: UID | None = None # type: ignore[assignment] + name: str + verify_key: SyftVerifyKey + server_routes: list[ServerRouteType] = [] + server_type: ServerType + admin_email: str + ping_status: ServerPeerConnectionStatus | None = None + ping_status_message: str | None = None + pinged_timestamp: DateTime | None = None + + def existed_route(self, route: ServerRouteType) -> tuple[bool, int | None]: + """Check if a route exists in self.server_routes + + Args: + route: the route to be checked. For now it can be either + HTTPServerRoute or PythonServerRoute + + Returns: + if the route exists, returns (True, index of the existed route in self.server_routes) + if the route does not exist returns (False, None) + """ + + if route: + if not isinstance( + route, HTTPServerRoute | PythonServerRoute | VeilidServerRoute + ): + raise ValueError(f"Unsupported route type: {type(route)}") + for i, r in enumerate(self.server_routes): + if route == r: + return (True, i) + + return (False, None) + + def update_route_priority(self, route: ServerRoute) -> ServerRoute: + """ + Assign the new_route's priority to be current max + 1 + + Args: + route (ServerRoute): The new route whose priority is to be updated. + + Returns: + ServerRoute: The new route with the updated priority + """ + current_max_priority: int = max(route.priority for route in self.server_routes) + route.priority = current_max_priority + 1 + return route + + def pick_highest_priority_route(self, oldest: bool = True) -> ServerRoute: + """ + Picks the route with the highest priority from the list of server routes. + + Args: + oldest (bool): + If True, picks the oldest route to have the highest priority, + meaning the route with min priority value. + If False, picks the most recent route with the highest priority, + meaning the route with max priority value. + + Returns: + ServerRoute: The route with the highest priority. + + """ + highest_priority_route: ServerRoute = self.server_routes[-1] + for route in self.server_routes[:-1]: + if oldest: + if route.priority < highest_priority_route.priority: + highest_priority_route = route + else: + if route.priority > highest_priority_route.priority: + highest_priority_route = route + return highest_priority_route + + def update_route(self, route: ServerRoute) -> None: + """ + Update the route for the server. + If the route already exists, return it. + If the route is new, assign it to have the priority of (current_max + 1) + + Args: + route (ServerRoute): The new route to be added to the peer. + """ + existed, idx = self.existed_route(route) + if existed: + self.server_routes[idx] = route # type: ignore + else: + new_route = self.update_route_priority(route) + self.server_routes.append(new_route) + + def update_routes(self, new_routes: list[ServerRoute]) -> None: + """ + Update multiple routes of the server peer. + + This method takes a list of new routes as input. + It first updates the priorities of the new routes. + Then, for each new route, it checks if the route already exists for the server peer. + If it does, it updates the priority of the existing route. + If it doesn't, it adds the new route to the server. + + Args: + new_routes (list[ServerRoute]): The new routes to be added to the server. + + Returns: + None + """ + for new_route in new_routes: + self.update_route(new_route) + + @as_result(SyftException) + def update_existed_route_priority( + self, route: ServerRoute, priority: int | None = None + ) -> ServerRouteType: + """ + Update the priority of an existed route. + + Args: + route (ServerRoute): The route whose priority is to be updated. + priority (int | None): The new priority of the route. If not given, + the route will be assigned with the highest priority. + + Returns: + ServerRoute: The route with updated priority if the route exists + """ + if priority is not None and priority <= 0: + raise SyftException( + public_message="Priority must be greater than 0. Now it is {priority}." + ) + + existed, index = self.existed_route(route=route) + + if not existed or index is None: + raise SyftException( + public_message=f"Route with id {route.id} does not exist." + ) + + if priority is not None: + self.server_routes[index].priority = priority + else: + self.server_routes[index].priority = self.update_route_priority( + route + ).priority + + return self.server_routes[index] + + @staticmethod + def from_client(client: SyftClient) -> "ServerPeer": + if not client.metadata: + raise ValueError("Client has to have metadata first") + + peer = client.metadata.to(ServerMetadata).to(ServerPeer) + route = connection_to_route(client.connection) + peer.server_routes.append(route) + return peer + + @property + def latest_added_route(self) -> ServerRoute | None: + """ + Returns the latest added route from the list of server routes. + + Returns: + ServerRoute | None: The latest added route, or None if there are no routes. + """ + return self.server_routes[-1] if self.server_routes else None + + @as_result(SyftException) + def client_with_context(self, context: ServerServiceContext) -> SyftClient: + # third party + + if len(self.server_routes) < 1: + raise ValueError(f"No routes to peer: {self}") + # select the route with highest priority to connect to the peer + final_route: ServerRoute = self.pick_highest_priority_route() + connection: ServerConnection = route_to_connection(route=final_route) + client_type = connection.get_client_type().unwrap( + public_message=f"Failed to establish a connection with {self.server_type} '{self.name}'" + ) + + return client_type( + connection=connection, credentials=context.server.signing_key + ) + + @as_result(SyftException) + def client_with_key(self, credentials: SyftSigningKey) -> SyftClient: + if len(self.server_routes) < 1: + raise SyftException(public_message=f"No routes to peer: {self}") + + final_route: ServerRoute = self.pick_highest_priority_route() + + connection = route_to_connection(route=final_route) + client_type = connection.get_client_type().unwrap() + return client_type(connection=connection, credentials=credentials) + + @property + def guest_client(self) -> SyftClient: + guest_key = SyftSigningKey.generate() + return self.client_with_key(credentials=guest_key).unwrap() + + def proxy_from(self, client: SyftClient) -> SyftClient: + return client.proxy_to(self) + + def get_rtunnel_route(self) -> HTTPServerRoute | None: + for route in self.server_routes: + if hasattr(route, "rtunnel_token") and route.rtunnel_token: + return route + return None + + def delete_route(self, route: ServerRouteType) -> None: + """ + Deletes a route from the peer's route list. + Takes O(n) where is n is the number of routes in self.server_routes. + + Args: + route (ServerRouteType): The route to be deleted; + + Returns: + None + """ + if route: + try: + self.server_routes = [r for r in self.server_routes if r != route] + except Exception as e: + raise SyftException( + public_message=f"Error deleting route with id {route.id}. Exception: {e}" + ) + + return None + + +@serializable() +class ServerPeerUpdate(PartialSyftObject): + __canonical_name__ = "ServerPeerUpdate" + __version__ = SYFT_OBJECT_VERSION_1 + + id: UID + name: str + server_routes: list[ServerRouteType] + admin_email: str + ping_status: ServerPeerConnectionStatus + ping_status_message: str + pinged_timestamp: DateTime + + +def drop_veilid_route() -> Callable: + def _drop_veilid_route(context: TransformContext) -> TransformContext: + if context.output: + server_routes = context.output["server_routes"] + new_routes = [ + server_route + for server_route in server_routes + if not isinstance(server_route, VeilidServerRoute) + ] + context.output["server_routes"] = new_routes + return context + + return _drop_veilid_route diff --git a/packages/syft/src/syft/service/network/utils.py b/packages/syft/src/syft/service/network/utils.py new file mode 100644 index 00000000000..655d40b9b3e --- /dev/null +++ b/packages/syft/src/syft/service/network/utils.py @@ -0,0 +1,128 @@ +# stdlib +import logging +import threading +import time + +# relative +from ...serde.serializable import serializable +from ...types.datetime import DateTime +from ...types.errors import SyftException +from ..context import AuthedServiceContext +from ..response import SyftError +from .network_service import ServerPeerAssociationStatus +from .server_peer import ServerPeer +from .server_peer import ServerPeerConnectionStatus +from .server_peer import ServerPeerUpdate + +logger = logging.getLogger(__name__) + + +@serializable(without=["thread"], canonical_name="PeerHealthCheckTask", version=1) +class PeerHealthCheckTask: + repeat_time = 10 # in seconds + + def __init__(self) -> None: + self.thread: threading.Thread | None = None + self.started_time = None + self._stop = False + + def peer_route_heathcheck(self, context: AuthedServiceContext) -> None: + """ + Perform a health check on the peers in the network stash. + - If peer is accessible, ping the peer. + - Peer is connected to the network. + + Args: + context (AuthedServiceContext): The authenticated service context. + + Returns: + None + """ + network_stash = context.server.services.network.stash + + try: + all_peers: list[ServerPeer] = network_stash.get_all( + context.server.verify_key + ).unwrap() # type: ignore + except SyftException as exc: + msg = exc._private_message or exc.public_message + logger.error(f"Failed to fetch peers from stash: {msg}") + raise SyftException(message="Failed to fetch peers from stash") + + for peer in all_peers: + peer_update = ServerPeerUpdate(id=peer.id) + peer_update.pinged_timestamp = DateTime.now() + try: + peer_client = peer.client_with_context(context=context) + if peer_client.is_err(): + logger.error( + f"Failed to create client for peer: {peer}: {peer_client.err()}" + ) + peer_update.ping_status = ServerPeerConnectionStatus.TIMEOUT + peer_client = None # type: ignore [assignment] + except Exception as e: + logger.error(f"Failed to create client for peer: {peer}", exc_info=e) + + peer_update.ping_status = ServerPeerConnectionStatus.TIMEOUT + peer_client = None + + if peer_client is not None: + peer_client = peer_client.ok() # type: ignore [assignment] + peer_status = peer_client.api.services.network.check_peer_association( # type: ignore [union-attr] + peer_id=context.server.id + ) + peer_update.ping_status = ( + ServerPeerConnectionStatus.ACTIVE + if peer_status == ServerPeerAssociationStatus.PEER_ASSOCIATED + else ServerPeerConnectionStatus.INACTIVE + ) + if isinstance(peer_status, SyftError): + peer_update.ping_status_message = ( + f"Error `{peer_status.message}` when pinging peer '{peer.name}'" + ) + else: + peer_update.ping_status_message = ( + f"Peer '{peer.name}''s ping status: " + f"{peer_update.ping_status.value.lower()}" + ) + + result = network_stash.update( + credentials=context.server.verify_key, + obj=peer_update, + has_permission=True, + ) + + if result.is_err(): + logger.error(f"Failed to update peer in stash: {result.err()}") + + return None + + def _run(self, context: AuthedServiceContext) -> None: + self.started_time = DateTime.now() + while True: + if self._stop: + break + self.peer_route_heathcheck(context) + time.sleep(self.repeat_time) + + def run(self, context: AuthedServiceContext) -> None: + if self.thread is not None: + logger.info( + f"Peer health check task is already running in thread " + f"{self.thread.name} with ID: {self.thread.ident}." + ) + else: + self.thread = threading.Thread(target=self._run, args=(context,)) + logger.info( + f"Start running peers health check in thread " + f"{self.thread.name} with ID: {self.thread.ident}." + ) + self.thread.start() + + def stop(self) -> None: + if self.thread: + self._stop = True + self.thread.join() + self.thread = None + self.started_time = None + logger.info("Peer health check task stopped.") diff --git a/packages/syft/src/syft/service/notification/email_templates.py b/packages/syft/src/syft/service/notification/email_templates.py index 09de0297bf1..97a414c1fda 100644 --- a/packages/syft/src/syft/service/notification/email_templates.py +++ b/packages/syft/src/syft/service/notification/email_templates.py @@ -1,45 +1,258 @@ # stdlib +from datetime import datetime from typing import TYPE_CHECKING from typing import cast # relative -from ...abstract_node import AbstractNode +from ...serde.serializable import serializable from ...store.linked_obj import LinkedObject from ..context import AuthedServiceContext if TYPE_CHECKING: # relative + from ..request.request import Request from .notifications import Notification class EmailTemplate: @staticmethod def email_title(notification: "Notification", context: AuthedServiceContext) -> str: - return "" + raise NotImplementedError( + "Email Template subclasses must implement the email_title method." + ) + + @staticmethod + def email_body(notification: "Notification", context: AuthedServiceContext) -> str: + raise NotImplementedError( + "Email Template subclasses must implement the email_body method." + ) + + @staticmethod + def batched_email_title( + notifications: list["Notification"], context: AuthedServiceContext + ) -> str: + raise NotImplementedError( + "Email Template subclasses must implement the batched_email_title method." + ) + + @staticmethod + def batched_email_body( + notifications: list["Notification"], context: AuthedServiceContext + ) -> str: + raise NotImplementedError( + "Email Template subclasses must implement the batched_email_body method." + ) + + +@serializable(canonical_name="FailedJobTemplate", version=1) +class FailedJobTemplate(EmailTemplate): + @staticmethod + def email_title(notification: "Notification", context: AuthedServiceContext) -> str: + return "Job Failed Notification" + + @staticmethod + def email_body(notification: "Notification", context: AuthedServiceContext) -> str: + notification.linked_obj = cast(LinkedObject, notification.linked_obj) + queueitem_obj = notification.linked_obj.resolve_with_context( + context=context + ).unwrap() + + worker_pool_obj = queueitem_obj.worker_pool.resolve_with_context( + context=context + ).unwrap() + method = queueitem_obj.method + if queueitem_obj.service == "apiservice": + method = queueitem_obj.kwargs.pop("path", "") + queueitem_obj.kwargs.pop("log_id") + + head = """ + + + + Job Failed Notification + + + """ + body = f""" + +
    +

    Job Failed Notification

    +

    Hello,

    +

    We regret to inform you that your function job has encountered an + unexpected error and could not be completed successfully.

    + +
    +

    Job Details

    +

    Job ID: {queueitem_obj.job_id}

    +

    Worker Pool: {worker_pool_obj.name}

    +

    Method: {method}

    +

    Service: {queueitem_obj.service}

    +

    Arguments (args): {queueitem_obj.args}

    +

    Keyword Arguments (kwargs): {queueitem_obj.kwargs}

    +
    + + +
    + + """ + return f"""{head} {body}""" + + +@serializable(canonical_name="PasswordResetTemplate", version=1) +class PasswordResetTemplate(EmailTemplate): + @staticmethod + def email_title(notification: "Notification", context: AuthedServiceContext) -> str: + return "Password Reset Requested" @staticmethod def email_body(notification: "Notification", context: AuthedServiceContext) -> str: - return "" + user_service = context.server.services.user + admin_verify_key = user_service.root_verify_key + user = user_service.stash.get_by_verify_key( + credentials=admin_verify_key, verify_key=notification.to_user_verify_key + ).unwrap() + if not user: + raise Exception("User not found!") + + user.reset_token = user_service.generate_new_password_reset_token( + context.server.settings.pwd_token_config + ) + user.reset_token_date = datetime.now() + + result = user_service.stash.update( + credentials=context.credentials, obj=user, has_permission=True + ) + if result.is_err(): + raise Exception("Couldn't update the user password") + expiry_time = context.server.services.settings.get( + context=context + ).pwd_token_config.token_exp_min + head = """ + + """ + body = f""" +
    +

    Password Reset

    +

    We received a request to reset your password. Your new temporary token is:

    +

    {user.reset_token}

    +

    Use + + syft_client.reset_password(token='{user.reset_token}', new_password=*****) + . + to reset your password. This token is valid for {expiry_time} seconds only.

    +

    If you didn't request a password reset, please ignore this email.

    +
    + """ + return f"""{head} {body}""" + + +@serializable(canonical_name="OnboardEmailTemplate", version=1) class OnBoardEmailTemplate(EmailTemplate): @staticmethod def email_title(notification: "Notification", context: AuthedServiceContext) -> str: - context.node = cast(AbstractNode, context.node) - return f"Welcome to {context.node.name} node!" + return f"Welcome to {context.server.name} server!" @staticmethod def email_body(notification: "Notification", context: AuthedServiceContext) -> str: - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - admin_name = user_service.get_by_verify_key( - user_service.admin_verify_key() - ).name + user_service = context.server.services.user + admin_verify_key = user_service.root_verify_key + admin = user_service.get_by_verify_key(admin_verify_key).unwrap() + admin_name = admin.name head = ( f""" - Welcome to {context.node.name} + Welcome to {context.server.name} """ + """ + + """ + body = f""" + +
    +
    + Batched Requests Notification +
    +
    +

    Hello Admin,

    +

    This is to inform you that a batch of requests has been processed. + Below are the details of the most recent requests:

    + + + + + + + + + + + + + {notifications_info} + + +
    Request IDUserUser EmailDateStatusChanges
    + {see_more_info} +
    + +
    + + """ + return f"""{head} {body}""" + + +@serializable(canonical_name="RequestUpdateEmailTemplate", version=1) class RequestUpdateEmailTemplate(EmailTemplate): @staticmethod def email_title(notification: "Notification", context: AuthedServiceContext) -> str: - context.node = cast(AbstractNode, context.node) - return f"Domain {context.node.name}: {notification.subject}" + return f"Datasite {context.server.name}: {notification.subject}" @staticmethod def email_body(notification: "Notification", context: AuthedServiceContext) -> str: - context.node = cast(AbstractNode, context.node) + # relative + from ..request.request import RequestStatus + notification.linked_obj = cast(LinkedObject, notification.linked_obj) - request_obj = notification.linked_obj.resolve_with_context(context=context).ok() + request_obj: Request = notification.linked_obj.resolve_with_context( + context=context + ).unwrap() badge_color = "red" if request_obj.status.name == "REJECTED" else "green" + + request_id = request_obj.id + request_name = request_obj.requesting_user_name + request_email = request_obj.requesting_user_email + request_time = request_obj.request_time + request_status = request_obj.status + request_status_name = request_status.name # fails in l0 check right now + request_changes = ",".join( + [change.__class__.__name__ for change in request_obj.changes] + ) + + deny_reason_html = "" + if request_status == RequestStatus.REJECTED: + deny_reason_or_err = request_obj.get_deny_reason(context=context) + if deny_reason_or_err.is_err(): + deny_reason = None + else: + deny_reason = deny_reason_or_err.unwrap() + + if not isinstance(deny_reason, str) or not len(deny_reason): + deny_reason = ( + "No reason provided, please contact the admin for more information." + ) + + deny_reason_html = f"

    Deny Reason: {deny_reason}

    " + head = """ Access Request Notification @@ -379,18 +805,33 @@ def email_body(notification: "Notification", context: AuthedServiceContext) -> s
    Request Details
    -

    ID: {request_obj.id}

    +

    ID: {request_id}

    Submitted By: - {request_obj.requesting_user_name} {request_obj.requesting_user_email or ""} + {request_name} {request_email}

    -

    Date: {request_obj.request_time}

    -

    Status:

    { - request_obj.status.name - }
    +

    Date: {request_time}

    +

    Status:

    + {request_status_name} +
    +
    + {deny_reason_html}

    Changes: - {",".join([change.__class__.__name__ for change in request_obj.changes])} + {request_changes} +

    + +

    Use:
    + + request = client.api.services.request.get_by_uid(uid=sy.UID("{request_id}")) +
    + to get this specific request. +

    + +

    Or you can view all requests with:
    + + client.requests +

    diff --git a/packages/syft/src/syft/service/notification/notification_service.py b/packages/syft/src/syft/service/notification/notification_service.py index d4738a0b68c..7d6cb83e2f1 100644 --- a/packages/syft/src/syft/service/notification/notification_service.py +++ b/packages/syft/src/syft/service/notification/notification_service.py @@ -1,16 +1,15 @@ # stdlib -from typing import cast # relative -from ...abstract_node import AbstractNode from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_permissions import ActionObjectREAD from ..context import AuthedServiceContext from ..notifier.notifier import NotifierSettings -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -27,20 +26,17 @@ from .notifications import ReplyNotification -@instrument -@serializable() +@serializable(canonical_name="NotificationService", version=1) class NotificationService(AbstractService): - store: DocumentStore stash: NotificationStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = NotificationStash(store=store) @service_method(path="notifications.send", name="send") def send( self, context: AuthedServiceContext, notification: CreateNotification - ) -> Notification | SyftError: + ) -> Notification: """Send a new notification""" new_notification = notification.to(Notification, context=context) @@ -51,44 +47,31 @@ def send( ) ] - result = self.stash.set( + self.stash.set( context.credentials, new_notification, add_permissions=permissions - ) - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - - res = notifier_service.dispatch_notification(context, new_notification) - if isinstance(res, SyftError): - return res + ).unwrap() - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() + context.server.services.notifier.dispatch_notification( + context, new_notification + ).unwrap() + return new_notification @service_method(path="notifications.reply", name="reply", roles=GUEST_ROLE_LEVEL) def reply( self, context: AuthedServiceContext, reply: ReplyNotification, - ) -> ReplyNotification | SyftError: + ) -> ReplyNotification: msg = self.stash.get_by_uid( credentials=context.credentials, uid=reply.target_msg + ).unwrap( + public_message=f"The target notification id {reply.target_msg} was not found!" ) - if msg.is_err(): - return SyftError( - message=f"The target notification id {reply.target_msg} was not found!. Error: {msg.err()}" - ) - msg = msg.ok() reply.from_user_verify_key = context.credentials msg.replies.append(reply) - result = self.stash.update(credentials=context.credentials, obj=msg) - - if result.is_err(): - return SyftError( - message=f"Couldn't add a new notification reply in the target notification. Error: {result.err()}" - ) - - return result.ok() + return self.stash.update(credentials=context.credentials, obj=msg).unwrap( + public_message="Couldn't add a new notification reply in the target notification" + ) @service_method( path="notifications.user_settings", @@ -97,10 +80,8 @@ def reply( def user_settings( self, context: AuthedServiceContext, - ) -> NotifierSettings | SyftError: - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - return notifier_service.user_settings(context) + ) -> NotifierSettings: + return context.server.services.notifier.user_settings(context) @service_method( path="notifications.settings", @@ -110,39 +91,32 @@ def user_settings( def settings( self, context: AuthedServiceContext, - ) -> NotifierSettings | SyftError: - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - result = notifier_service.settings(context) - return result + ) -> NotifierSettings: + return context.server.services.notifier.settings(context).unwrap() @service_method( path="notifications.activate", name="activate", roles=DATA_SCIENTIST_ROLE_LEVEL, + unwrap_on_success=False, ) def activate( self, context: AuthedServiceContext, - ) -> Notification | SyftError: - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - result = notifier_service.activate(context) - return result + ) -> Notification: + return context.server.services.notifier.activate(context).unwrap() @service_method( path="notifications.deactivate", name="deactivate", roles=DATA_SCIENTIST_ROLE_LEVEL, + unwrap_on_success=False, ) def deactivate( self, context: AuthedServiceContext, - ) -> Notification | SyftError: - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - result = notifier_service.deactivate(context) - return result + ) -> SyftSuccess: + return context.server.services.notifier.deactivate(context).unwrap() @service_method( path="notifications.get_all", @@ -152,47 +126,34 @@ def deactivate( def get_all( self, context: AuthedServiceContext, - ) -> list[Notification] | SyftError: - result = self.stash.get_all_inbox_for_verify_key( + ) -> list[Notification]: + return self.stash.get_all_inbox_for_verify_key( context.credentials, verify_key=context.credentials, - ) - if result.err(): - return SyftError(message=str(result.err())) - notifications = result.ok() - return notifications + ).unwrap() @service_method( path="notifications.get_all_sent", name="outbox", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def get_all_sent( - self, context: AuthedServiceContext - ) -> list[Notification] | SyftError: - result = self.stash.get_all_sent_for_verify_key( + def get_all_sent(self, context: AuthedServiceContext) -> list[Notification]: + return self.stash.get_all_sent_for_verify_key( context.credentials, context.credentials - ) - if result.err(): - return SyftError(message=str(result.err())) - notifications = result.ok() - return notifications + ).unwrap() # get_all_read and unread cover the same functionality currently as # get_all_for_status. However, there may be more statuses added in the future, # so we are keeping the more generic get_all_for_status method. + @as_result(StashException) def get_all_for_status( self, context: AuthedServiceContext, status: NotificationStatus, - ) -> list[Notification] | SyftError: - result = self.stash.get_all_by_verify_key_for_status( + ) -> list[Notification]: + return self.stash.get_all_by_verify_key_for_status( context.credentials, verify_key=context.credentials, status=status - ) - if result.err(): - return SyftError(message=str(result.err())) - notifications = result.ok() - return notifications + ).unwrap() @service_method( path="notifications.get_all_read", @@ -202,11 +163,11 @@ def get_all_for_status( def get_all_read( self, context: AuthedServiceContext, - ) -> list[Notification] | SyftError: + ) -> list[Notification]: return self.get_all_for_status( context=context, status=NotificationStatus.READ, - ) + ).unwrap() @service_method( path="notifications.get_all_unread", @@ -216,71 +177,54 @@ def get_all_read( def get_all_unread( self, context: AuthedServiceContext, - ) -> list[Notification] | SyftError: + ) -> list[Notification]: return self.get_all_for_status( context=context, status=NotificationStatus.UNREAD, - ) + ).unwrap() @service_method(path="notifications.mark_as_read", name="mark_as_read") - def mark_as_read( - self, context: AuthedServiceContext, uid: UID - ) -> Notification | SyftError: - result = self.stash.update_notification_status( + def mark_as_read(self, context: AuthedServiceContext, uid: UID) -> Notification: + return self.stash.update_notification_status( context.credentials, uid=uid, status=NotificationStatus.READ - ) - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() + ).unwrap() @service_method(path="notifications.mark_as_unread", name="mark_as_unread") - def mark_as_unread( - self, context: AuthedServiceContext, uid: UID - ) -> Notification | SyftError: - result = self.stash.update_notification_status( + def mark_as_unread(self, context: AuthedServiceContext, uid: UID) -> Notification: + return self.stash.update_notification_status( context.credentials, uid=uid, status=NotificationStatus.UNREAD - ) - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() + ).unwrap() @service_method( path="notifications.resolve_object", name="resolve_object", - roles=DATA_SCIENTIST_ROLE_LEVEL, + roles=GUEST_ROLE_LEVEL, ) def resolve_object( self, context: AuthedServiceContext, linked_obj: LinkedObject - ) -> Notification | SyftError: - context.node = cast(AbstractNode, context.node) - service = context.node.get_service(linked_obj.service_type) - result = service.resolve_link(context=context, linked_obj=linked_obj) - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() + ) -> Notification: + service = context.server.get_service(linked_obj.service_type) + return service.resolve_link(context=context, linked_obj=linked_obj).unwrap() - @service_method(path="notifications.clear", name="clear") - def clear(self, context: AuthedServiceContext) -> SyftError | SyftSuccess: - result = self.stash.delete_all_for_verify_key( + @service_method(path="notifications.clear", name="clear", unwrap_on_success=False) + def clear(self, context: AuthedServiceContext) -> SyftSuccess: + self.stash.delete_all_for_verify_key( credentials=context.credentials, verify_key=context.credentials - ) - if result.is_ok(): - return SyftSuccess(message="All notifications cleared !!") - return SyftError(message=str(result.err())) + ).unwrap() + return SyftSuccess(message="Cleared all notifications") + @as_result(SyftException) def filter_by_obj( self, context: AuthedServiceContext, obj_uid: UID - ) -> Notification | SyftError: - notifications = self.stash.get_all(context.credentials) - if notifications.is_err(): - return SyftError(message="Could not get notifications!!") - for notification in notifications.ok(): + ) -> Notification: + notifications = self.stash.get_all(context.credentials).unwrap() + for notification in notifications: if ( notification.linked_obj and notification.linked_obj.object_uid == obj_uid ): return notification - return SyftError(message="Could not get notifications!!") + raise SyftException(public_message="Could not get notifications!!") TYPE_TO_SERVICE[Notification] = NotificationService diff --git a/packages/syft/src/syft/service/notification/notification_stash.py b/packages/syft/src/syft/service/notification/notification_stash.py index 84aafb33849..029cf1b325a 100644 --- a/packages/syft/src/syft/service/notification/notification_stash.py +++ b/packages/syft/src/syft/service/notification/notification_stash.py @@ -1,138 +1,99 @@ -# stdlib - -# third party -from result import Err -from result import Ok -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject -from ...types.datetime import DateTime +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument from .notifications import Notification from .notifications import NotificationStatus -FromUserVerifyKeyPartitionKey = PartitionKey( - key="from_user_verify_key", type_=SyftVerifyKey -) -ToUserVerifyKeyPartitionKey = PartitionKey( - key="to_user_verify_key", type_=SyftVerifyKey -) -StatusPartitionKey = PartitionKey(key="status", type_=NotificationStatus) - -OrderByCreatedAtTimeStampPartitionKey = PartitionKey(key="created_at", type_=DateTime) - -LinkedObjectPartitionKey = PartitionKey(key="linked_obj", type_=LinkedObject) - - -@instrument -@serializable() -class NotificationStash(BaseUIDStoreStash): - object_type = Notification - settings: PartitionSettings = PartitionSettings( - name=Notification.__canonical_name__, - object_type=Notification, - ) +@serializable(canonical_name="NotificationSQLStash", version=1) +class NotificationStash(ObjectStash[Notification]): + @as_result(StashException) def get_all_inbox_for_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Result[list[Notification], str]: - qks = QueryKeys( - qks=[ - ToUserVerifyKeyPartitionKey.with_obj(verify_key), - ] - ) - return self.get_all_for_verify_key( - credentials=credentials, verify_key=verify_key, qks=qks - ) + ) -> list[Notification]: + if not isinstance(verify_key, SyftVerifyKey | str): + raise AttributeError("verify_key must be of type SyftVerifyKey or str") + return self.get_all( + credentials, + filters={"to_user_verify_key": verify_key}, + ).unwrap() + @as_result(StashException) def get_all_sent_for_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Result[list[Notification], str]: - qks = QueryKeys( - qks=[ - FromUserVerifyKeyPartitionKey.with_obj(verify_key), - ] - ) - return self.get_all_for_verify_key(credentials, verify_key=verify_key, qks=qks) + ) -> list[Notification]: + if not isinstance(verify_key, SyftVerifyKey | str): + raise AttributeError("verify_key must be of type SyftVerifyKey or str") + return self.get_all( + credentials, + filters={"from_user_verify_key": verify_key}, + ).unwrap() + @as_result(StashException) def get_all_for_verify_key( - self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey, qks: QueryKeys - ) -> Result[list[Notification], str]: - if isinstance(verify_key, str): - verify_key = SyftVerifyKey.from_string(verify_key) - return self.query_all( + self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey + ) -> list[Notification]: + if not isinstance(verify_key, SyftVerifyKey | str): + raise AttributeError("verify_key must be of type SyftVerifyKey or str") + return self.get_all( credentials, - qks=qks, - order_by=OrderByCreatedAtTimeStampPartitionKey, - ) + filters={"from_user_verify_key": verify_key}, + ).unwrap() + @as_result(StashException) def get_all_by_verify_key_for_status( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey, status: NotificationStatus, - ) -> Result[list[Notification], str]: - qks = QueryKeys( - qks=[ - ToUserVerifyKeyPartitionKey.with_obj(verify_key), - StatusPartitionKey.with_obj(status), - ] - ) - return self.query_all( + ) -> list[Notification]: + if not isinstance(verify_key, SyftVerifyKey | str): + raise AttributeError("verify_key must be of type SyftVerifyKey or str") + return self.get_all( credentials, - qks=qks, - order_by=OrderByCreatedAtTimeStampPartitionKey, - ) + filters={ + "to_user_verify_key": str(verify_key), + "status": status.name, + }, + ).unwrap() + @as_result(StashException, NotFoundException) def get_notification_for_linked_obj( self, credentials: SyftVerifyKey, linked_obj: LinkedObject, - ) -> Result[Notification, str]: - qks = QueryKeys( - qks=[ - LinkedObjectPartitionKey.with_obj(linked_obj), - ] - ) - return self.query_one(credentials=credentials, qks=qks) + ) -> Notification: + return self.get_one( + credentials, + filters={ + "linked_obj.id": linked_obj.id, + }, + ).unwrap() + @as_result(StashException, NotFoundException) def update_notification_status( self, credentials: SyftVerifyKey, uid: UID, status: NotificationStatus - ) -> Result[Notification, str]: - result = self.get_by_uid(credentials, uid=uid) - if result.is_err(): - return result.err() - - notification = result.ok() - if notification is None: - return Err(f"No notification exists for id: {uid}") + ) -> Notification: + notification = self.get_by_uid(credentials, uid=uid).unwrap() notification.status = status - return self.update(credentials, obj=notification) + return self.update(credentials, obj=notification).unwrap() + @as_result(StashException, NotFoundException) def delete_all_for_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Result[bool, str]: - result = self.get_all_inbox_for_verify_key( + ) -> bool: + if not isinstance(verify_key, SyftVerifyKey | str): + raise AttributeError("verify_key must be of type SyftVerifyKey or str") + notifications = self.get_all_inbox_for_verify_key( credentials, verify_key=verify_key, - ) - # If result is an error then return the error - if result.is_err(): - return result - - # get the list of notifications - notifications = result.ok() - + ).unwrap() for notification in notifications: - result = self.delete_by_uid(credentials, uid=notification.id) - if result.is_err(): - return result - return Ok(True) + self.delete_by_uid(credentials, uid=notification.id).unwrap() + return True diff --git a/packages/syft/src/syft/service/notification/notifications.py b/packages/syft/src/syft/service/notification/notifications.py index 6df1716ed4a..3fbddf6eb98 100644 --- a/packages/syft/src/syft/service/notification/notifications.py +++ b/packages/syft/src/syft/service/notification/notifications.py @@ -4,33 +4,31 @@ from typing import cast # relative -from ...client.api import APIRegistry -from ...client.api import SyftAPI -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime +from ...types.syft_migration import migrate +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.transforms import add_credentials_for_key -from ...types.transforms import add_node_uid_for_key +from ...types.transforms import add_server_uid_for_key from ...types.transforms import generate_id from ...types.transforms import transform from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE from ..notifier.notifier_enums import NOTIFIERS from .email_templates import EmailTemplate -@serializable() +@serializable(canonical_name="NotificationStatus", version=1) class NotificationStatus(Enum): UNREAD = 0 READ = 1 -@serializable() +@serializable(canonical_name="NotificationRequestStatus", version=1) class NotificationRequestStatus(Enum): NO_ACTION = 0 @@ -43,7 +41,7 @@ class NotificationExpiryStatus(Enum): @serializable() class ReplyNotification(SyftObject): __canonical_name__ = "ReplyNotification" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 text: str target_msg: UID @@ -51,13 +49,38 @@ class ReplyNotification(SyftObject): from_user_verify_key: SyftVerifyKey | None = None +@serializable() +class NotificationV1(SyftObject): + __canonical_name__ = "Notification" + __version__ = SYFT_OBJECT_VERSION_1 + + subject: str + server_uid: UID + from_user_verify_key: SyftVerifyKey + to_user_verify_key: SyftVerifyKey + created_at: DateTime + status: NotificationStatus = NotificationStatus.UNREAD + linked_obj: LinkedObject | None = None + notifier_types: list[NOTIFIERS] = [] + email_template: type[EmailTemplate] | None = None + replies: list[ReplyNotification] | None = [] + + __attr_searchable__ = [ + "from_user_verify_key", + "to_user_verify_key", + "status", + ] + __repr_attrs__ = ["subject", "status", "created_at", "linked_obj"] + __table_sort_attr__ = "Created at" + + @serializable() class Notification(SyftObject): __canonical_name__ = "Notification" __version__ = SYFT_OBJECT_VERSION_2 subject: str - node_uid: UID + server_uid: UID from_user_verify_key: SyftVerifyKey to_user_verify_key: SyftVerifyKey created_at: DateTime @@ -65,7 +88,7 @@ class Notification(SyftObject): linked_obj: LinkedObject | None = None notifier_types: list[NOTIFIERS] = [] email_template: type[EmailTemplate] | None = None - replies: list[ReplyNotification] | None = [] + replies: list[ReplyNotification] = [] __attr_searchable__ = [ "from_user_verify_key", @@ -73,12 +96,11 @@ class Notification(SyftObject): "status", ] __repr_attrs__ = ["subject", "status", "created_at", "linked_obj"] + __table_sort_attr__ = "Created at" + __order_by__ = ("created_at", "asc") def _repr_html_(self) -> str: return f""" -

    Notification

    ID: {self.id}

    @@ -101,27 +123,15 @@ def _coll_repr_(self) -> dict[str, str]: return { "Subject": self.subject, "Status": self.determine_status().name.capitalize(), - "Created At": str(self.created_at), + "Created at": str(self.created_at), "Linked object": f"{self.linked_obj.object_type.__canonical_name__} ({self.linked_obj.object_uid})", } def mark_read(self) -> None: - api: SyftAPI = cast( - SyftAPI, - APIRegistry.api_for( - self.node_uid, user_verify_key=self.syft_client_verify_key - ), - ) - return api.services.notifications.mark_as_read(uid=self.id) + return self.get_api().services.notifications.mark_as_read(uid=self.id) def mark_unread(self) -> None: - api: SyftAPI = cast( - SyftAPI, - APIRegistry.api_for( - self.node_uid, user_verify_key=self.syft_client_verify_key - ), - ) - return api.services.notifications.mark_as_unread(uid=self.id) + return self.get_api().services.notifications.mark_as_unread(uid=self.id) def determine_status(self) -> Enum: # relative @@ -137,7 +147,7 @@ def determine_status(self) -> Enum: @serializable() class CreateNotification(SyftObject): __canonical_name__ = "CreateNotification" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 subject: str from_user_verify_key: SyftVerifyKey | None = None # type: ignore[assignment] @@ -161,5 +171,10 @@ def createnotification_to_notification() -> list[Callable]: generate_id, add_msg_creation_time, add_credentials_for_key("from_user_verify_key"), - add_node_uid_for_key("node_uid"), + add_server_uid_for_key("server_uid"), ] + + +@migrate(NotificationV1, Notification) +def migrate_nofitication_v1_to_v2() -> list[Callable]: + return [] # skip migration, no changes in the class diff --git a/packages/syft/src/syft/service/notifier/notifier.py b/packages/syft/src/syft/service/notifier/notifier.py index d5fd172030d..298e6c5b7b0 100644 --- a/packages/syft/src/syft/service/notifier/notifier.py +++ b/packages/syft/src/syft/service/notifier/notifier.py @@ -1,56 +1,85 @@ -# stdlib +# .api.services.notifications.settings() is how the server itself would dispatch notifications. +# .api.services.notifications.user_settings() sets if a specific user wants or not to receive notifications. +# Class NotifierSettings holds both pieces of info. +# Users will get notification x where x in {email, slack, sms, app} if three things are set to True: +# 1) .....settings().active +# 2) .....settings().x_enabled +# 2) .....user_settings().x # stdlib +from collections.abc import Callable +from datetime import datetime +import logging +from typing import Any from typing import TypeVar -from typing import cast # third party -from result import Err -from result import Ok -from result import Result +from pydantic import BaseModel # relative -from ...abstract_node import AbstractNode -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_migration import migrate from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_3 from ...types.syft_object import SyftObject +from ...types.transforms import drop +from ...types.transforms import make_set_default from ..context import AuthedServiceContext from ..notification.notifications import Notification from ..response import SyftError from ..response import SyftSuccess +from .notifier_enums import NOTIFICATION_FREQUENCY from .notifier_enums import NOTIFIERS from .smtp_client import SMTPClient +logger = logging.getLogger(__name__) + -class BaseNotifier: +class BaseNotifier(BaseModel): + @as_result(SyftException) def send( - self, target: SyftVerifyKey, notification: Notification - ) -> SyftSuccess | SyftError: - return SyftError(message="Not implemented") + self, context: AuthedServiceContext, notification: Notification + ) -> SyftSuccess: + raise SyftException(public_message="Not implemented") + + @as_result(SyftException) + def send_batches( + self, context: AuthedServiceContext, notification_queue: list[Notification] + ) -> SyftSuccess: + raise SyftException(public_message="Not implemented") TBaseNotifier = TypeVar("TBaseNotifier", bound=BaseNotifier) +@serializable() +class UserNotificationActivity(SyftObject): + __canonical_name__ = "UserNotificationActivity" + __version__ = SYFT_OBJECT_VERSION_1 + count: int = 1 + date: datetime = datetime.now() + + +@serializable(canonical_name="EmailNotifier", version=1) class EmailNotifier(BaseNotifier): - smtp_client: SMTPClient - sender = "" + smtp_client: SMTPClient | None = None + sender: str = "" def __init__( self, - username: str, - password: str, - sender: str, - server: str, - port: int = 587, + **data: Any, ) -> None: - self.sender = sender + super().__init__(**data) + self.sender = data.get("sender", "") self.smtp_client = SMTPClient( - server=server, - port=port, - username=username, - password=password, + server=data.get("server", ""), + port=int(data.get("port", 587)), + username=data.get("username", ""), + password=data.get("password", ""), ) @classmethod @@ -60,29 +89,79 @@ def check_credentials( password: str, server: str, port: int = 587, - ) -> Result[Ok, Err]: - return SMTPClient.check_credentials( - server=server, - port=port, - username=username, - password=password, - ) - - def send( - self, context: AuthedServiceContext, notification: Notification - ) -> Result[Ok, Err]: + ) -> bool: try: - context.node = cast(AbstractNode, context.node) - - user_service = context.node.get_service("userservice") + SMTPClient.check_credentials( + server=server, + port=port, + username=username, + password=password, + ) + return True + except Exception: + logger.exception("Credentials validation failed") + return False - receiver = user_service.get_by_verify_key(notification.to_user_verify_key) + @as_result(SyftException) + def send_batches( + self, context: AuthedServiceContext, notification_queue: list[Notification] + ) -> SyftSuccess | SyftError: + subject = None + receiver_email = None + sender = None + notification_sample = notification_queue[0] + try: + sender = self.sender + receiver = context.server.services.user.get_by_verify_key( + notification_sample.to_user_verify_key + ).unwrap() if not receiver.notifications_enabled[NOTIFIERS.EMAIL]: - return Ok( - "Email notifications are disabled for this user." + return SyftSuccess( + message="Email notifications are disabled for this user." ) # TODO: Should we return an error here? + receiver_email = receiver.email + if notification_sample.email_template: + subject = notification_sample.email_template.batched_email_title( + notifications=notification_queue, context=context + ) + body = notification_sample.email_template.batched_email_body( + notifications=notification_queue, context=context + ) + else: + subject = notification_sample.subject + body = notification_sample._repr_html_() + + if isinstance(receiver_email, str): + receiver_email = [receiver_email] + self.smtp_client.send( # type: ignore + sender=sender, receiver=receiver_email, subject=subject, body=body + ) + message = f"> Sent email: {subject} to {receiver_email}" + logging.info(message) + return SyftSuccess(message="Email sent successfully!") + except Exception as e: + message = f"> Error sending email: {subject} to {receiver_email} from: {sender}. {e}" + logger.error(message) + return SyftError(message="Failed to send an email.") + + @as_result(SyftException) + def send( + self, context: AuthedServiceContext, notification: Notification + ) -> SyftSuccess | SyftError: + subject = None + receiver_email = None + sender = None + try: + sender = self.sender + receiver = context.server.services.user.get_by_verify_key( + notification.to_user_verify_key + ).unwrap() + if not receiver.notifications_enabled[NOTIFIERS.EMAIL]: + return SyftSuccess( + message="Email notifications are disabled for this user." + ) # TODO: Should we return an error here? receiver_email = receiver.email if notification.email_template: @@ -99,14 +178,24 @@ def send( if isinstance(receiver_email, str): receiver_email = [receiver_email] - self.smtp_client.send( - sender=self.sender, receiver=receiver_email, subject=subject, body=body - ) - return Ok("Email sent successfully!") - except Exception: - return Err( - "Some notifications failed to be delivered. Please check the health of the mailing server." + self.smtp_client.send( # type: ignore + sender=sender, receiver=receiver_email, subject=subject, body=body ) + message = f"> Sent email: {subject} to {receiver_email}" + print(message) + logging.info(message) + return SyftSuccess(message="Email sent successfully!") + except Exception as e: + message = f"> Error sending email: {subject} to {receiver_email} from: {sender}. {e}" + logger.error(message) + return SyftError(message="Failed to send an email.") + # raise SyftException.from_exception( + # exc, + # public_message=( + # "Some notifications failed to be delivered." + # " Please check the health of the mailing server." + # ), + # ) @serializable() @@ -127,7 +216,7 @@ class NotificationPreferences(SyftObject): @serializable() -class NotifierSettings(SyftObject): +class NotifierSettingsV1(SyftObject): __canonical_name__ = "NotifierSettings" __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = [ @@ -135,6 +224,78 @@ class NotifierSettings(SyftObject): "email_enabled", ] active: bool = False + + notifiers: dict[NOTIFIERS, type[TBaseNotifier]] = { + NOTIFIERS.EMAIL: EmailNotifier, + } + + notifiers_status: dict[NOTIFIERS, bool] = { + NOTIFIERS.EMAIL: True, + NOTIFIERS.SMS: False, + NOTIFIERS.SLACK: False, + NOTIFIERS.APP: False, + } + + email_sender: str | None = "" + email_server: str | None = "" + email_port: int | None = 587 + email_username: str | None = "" + email_password: str | None = "" + + +@serializable() +class NotifierSettingsV2(SyftObject): + __canonical_name__ = "NotifierSettings" + __version__ = SYFT_OBJECT_VERSION_2 + __repr_attrs__ = [ + "active", + "email_enabled", + ] + active: bool = False + # Flag to identify which notification is enabled + # For now, consider only the email notification + # In future, Admin, must be able to have a better + # control on diff notifications. + + notifiers: dict[NOTIFIERS, type[TBaseNotifier]] = { + NOTIFIERS.EMAIL: EmailNotifier, + } + + notifiers_status: dict[NOTIFIERS, bool] = { + NOTIFIERS.EMAIL: True, + NOTIFIERS.SMS: False, + NOTIFIERS.SLACK: False, + NOTIFIERS.APP: False, + } + + email_sender: str | None = "" + email_server: str | None = "" + email_port: int | None = 587 + email_username: str | None = "" + email_password: str | None = "" + + email_activity: dict[str, dict[SyftVerifyKey, UserNotificationActivity]] = {} + email_rate_limit: dict[str, int] = {} + + +@serializable() +class EmailFrequency(SyftObject): + __canonical_name__ = "EmailFrequency" + __version__ = SYFT_OBJECT_VERSION_1 + + frequency: NOTIFICATION_FREQUENCY + start_time: datetime = datetime.now() + + +@serializable() +class NotifierSettings(SyftObject): + __canonical_name__ = "NotifierSettings" + __version__ = SYFT_OBJECT_VERSION_3 + __repr_attrs__ = [ + "active", + "email_enabled", + ] + active: bool = False # Flag to identify which notification is enabled # For now, consider only the email notification # In future, Admin, must be able to have a better @@ -156,6 +317,10 @@ class NotifierSettings(SyftObject): email_port: int | None = 587 email_username: str | None = "" email_password: str | None = "" + email_frequency: dict[str, EmailFrequency] = {} + email_queue: dict[str, dict[SyftVerifyKey, list[Notification]]] = {} + email_activity: dict[str, dict[SyftVerifyKey, UserNotificationActivity]] = {} + email_rate_limit: dict[str, int] = {} @property def email_enabled(self) -> bool: @@ -179,7 +344,7 @@ def validate_email_credentials( password: str, server: str, port: int, - ) -> Result[Ok, Err]: + ) -> bool: return self.notifiers[NOTIFIERS.EMAIL].check_credentials( server=server, port=port, @@ -187,19 +352,33 @@ def validate_email_credentials( password=password, ) + @as_result(SyftException) + def send_batched_notification( + self, + context: AuthedServiceContext, + notification_queue: list[Notification], + ) -> None: + if len(notification_queue) == 0: + return None + notifier_objs: list[BaseNotifier] = self.select_notifiers(notification_queue[0]) + for notifier in notifier_objs: + notifier.send_batches( + context=context, notification_queue=notification_queue + ).unwrap() + return None + + @as_result(SyftException) def send_notifications( self, context: AuthedServiceContext, notification: Notification, - ) -> Result[Ok, Err]: - notifier_objs: list = self.select_notifiers(notification) + ) -> int: + notifier_objs: list[BaseNotifier] = self.select_notifiers(notification) for notifier in notifier_objs: - result = notifier.send(context, notification) - if result.err(): - return result + notifier.send(context=context, notification=notification).unwrap() - return Ok("Notification sent successfully!") + return len(notifier_objs) def select_notifiers(self, notification: Notification) -> list[BaseNotifier]: """ @@ -225,6 +404,7 @@ def select_notifiers(self, notification: Notification) -> list[BaseNotifier]: password=self.email_password, sender=self.email_sender, server=self.email_server, + port=self.email_port, ) ) # If notifier is not email, we just create the notifier object @@ -233,3 +413,31 @@ def select_notifiers(self, notification: Notification) -> list[BaseNotifier]: notifier_objs.append(self.notifiers[notifier_type]()) # type: ignore[misc] return notifier_objs + + +@migrate(NotifierSettingsV1, NotifierSettingsV2) +def migrate_server_settings_v1_to_v2() -> list[Callable]: + return [ + make_set_default("email_activity", {}), + make_set_default("email_rate_limit", {}), + ] + + +@migrate(NotifierSettingsV2, NotifierSettingsV1) +def migrate_server_settings_v2_to_v1() -> list[Callable]: + # Use drop function on "notifications_enabled" attrubute + return [drop(["email_activity"]), drop(["email_rate_limit"])] + + +@migrate(NotifierSettingsV2, NotifierSettings) +def migrate_server_settings_v2_to_current() -> list[Callable]: + return [ + make_set_default("email_frequency", {}), + make_set_default("email_queue", {}), + ] + + +@migrate(NotifierSettings, NotifierSettingsV2) +def migrate_server_settings_current_to_v2() -> list[Callable]: + # Use drop function on "notifications_enabled" attrubute + return [drop(["email_frequency"]), drop(["email_queue"])] diff --git a/packages/syft/src/syft/service/notifier/notifier_enums.py b/packages/syft/src/syft/service/notifier/notifier_enums.py index 0cb438b4fb2..5467e78f11c 100644 --- a/packages/syft/src/syft/service/notifier/notifier_enums.py +++ b/packages/syft/src/syft/service/notifier/notifier_enums.py @@ -6,9 +6,26 @@ from ...serde.serializable import serializable -@serializable() +@serializable(canonical_name="EMAIL_TYPES", version=1) +class EMAIL_TYPES(Enum): + PASSWORD_RESET_EMAIL = "PasswordResetTemplate" # nosec + ONBOARD_EMAIL = "OnBoardEmailTemplate" + REQUEST_EMAIL = "RequestEmailTemplate" + REQUEST_UPDATE_EMAIL = "RequestUpdateEmailTemplate" + + +@serializable(canonical_name="NOTIFIERS", version=1) class NOTIFIERS(Enum): EMAIL = auto() SMS = auto() SLACK = auto() APP = auto() + + +@serializable(canonical_name="NOTIFICATION_FREQUENCY", version=1) +class NOTIFICATION_FREQUENCY(Enum): + INSTANT = auto() + SIX_HOURS = auto() + TWELVE_HOURS = auto() + DAILY = auto() + WEEKLY = auto() diff --git a/packages/syft/src/syft/service/notifier/notifier_service.py b/packages/syft/src/syft/service/notifier/notifier_service.py index 37cb247bea5..53d6ac083b5 100644 --- a/packages/syft/src/syft/service/notifier/notifier_service.py +++ b/packages/syft/src/syft/service/notifier/notifier_service.py @@ -1,62 +1,68 @@ # stdlib - -# stdlib -from typing import cast +from datetime import datetime +from datetime import timedelta +import logging # third party from pydantic import EmailStr -from result import Err -from result import Ok -from result import Result # relative -from ...abstract_node import AbstractNode +from ...abstract_server import AbstractServer from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result from ..context import AuthedServiceContext +from ..notification.email_templates import PasswordResetTemplate from ..notification.notifications import Notification -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService +from .notifier import EmailFrequency from .notifier import NotificationPreferences from .notifier import NotifierSettings +from .notifier import UserNotificationActivity +from .notifier_enums import EMAIL_TYPES +from .notifier_enums import NOTIFICATION_FREQUENCY from .notifier_enums import NOTIFIERS from .notifier_stash import NotifierStash +logger = logging.getLogger(__name__) + + +class RateLimitException(SyftException): + public_message = "Rate limit exceeded." -@serializable() + +@serializable(canonical_name="NotifierService", version=1) class NotifierService(AbstractService): - store: DocumentStore - stash: NotifierStash # Which stash should we use? + stash: NotifierStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = NotifierStash(store=store) - def settings( # Maybe just notifier.settings + @as_result(StashException) + def settings( self, context: AuthedServiceContext, - ) -> NotifierSettings | SyftError: + ) -> NotifierSettings: """Get Notifier Settings Args: context: The request context Returns: - Union[NotifierSettings, SyftError]: Notifier Settings or SyftError + NotifierSettings | None: The notifier settings, if it exists; None otherwise. """ - result = self.stash.get(credentials=context.credentials) - if result.is_err(): - return SyftError(message="Error getting notifier settings") - - return result.ok() + return self.stash.get(credentials=context.credentials).unwrap( + public_message="Error getting notifier settings" + ) def user_settings( self, context: AuthedServiceContext, ) -> NotificationPreferences: - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - user_view = user_service.get_current_user(context) + user_view = context.server.services.user.get_current_user(context) notifications = user_view.notifications_enabled return NotificationPreferences( email=notifications[NOTIFIERS.EMAIL], @@ -65,6 +71,28 @@ def user_settings( app=notifications[NOTIFIERS.APP], ) + def _set_notifier(self, context: AuthedServiceContext, active: bool) -> SyftSuccess: + notifier = self.stash.get(credentials=context.credentials).unwrap( + public_message="Notifier settings not found." + ) + notifier.active = active + self.stash.update(credentials=context.credentials, obj=notifier).unwrap() + + active_s = "active" if active else "inactive" + return SyftSuccess(message=f"Notifier set to {active_s}") + + def set_notifier_active_to_false( + self, context: AuthedServiceContext + ) -> SyftSuccess: + """ + Essentially a duplicate of turn_off method. + """ + notifier = self.stash.get(credentials=context.credentials).unwrap() + notifier.active = False + self.stash.update(credentials=context.credentials, obj=notifier).unwrap() + return SyftSuccess(message="notifier.active set to false.") + + @as_result(SyftException) def turn_on( self, context: AuthedServiceContext, @@ -73,7 +101,7 @@ def turn_on( email_sender: str | None = None, email_server: str | None = None, email_port: int | None = 587, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """Turn on email notifications. Args: @@ -81,63 +109,53 @@ def turn_on( email_password (Optional[str]): Email email server password. Defaults to None. sender_email (Optional[str]): Email sender email. Defaults to None. Returns: - Union[SyftSuccess, SyftError]: A union type representing the success or error response. + SyftSuccess: success response. Raises: - None - + SyftException: any error that occurs during the process """ - result = self.stash.get(credentials=context.credentials) - # 1 - If something went wrong at db level, return the error - if result.is_err(): - return SyftError(message=result.err()) + notifier = self.stash.get(credentials=context.credentials).unwrap() # 2 - If one of the credentials are set alone, return an error - if ( - email_username - and not email_password - or email_password - and not email_username + if (email_username and not email_password) or ( + not email_username and email_password ): - return SyftError(message="You must provide both username and password") - - notifier = result.ok() + raise SyftException( + public_message="You must provide both username and password" + ) # 3 - If notifier doesn't have a email server / port and the user didn't provide them, return an error if not (email_server and email_port) and not notifier.email_server: - return SyftError( - message="You must provide both server and port to enable notifications." + raise SyftException( + public_message="You must provide both server and port to enable notifications." ) - print("[LOG] Got notifier from db") + logger.debug("Got notifier from db") + skip_auth: bool = False # If no new credentials provided, check for existing ones if not (email_username and email_password): if not (notifier.email_username and notifier.email_password): - return SyftError( - message="No valid token has been added to the domain." - + "You can add a pair of SMTP credentials via " - + ".settings.enable_notifications(email=<>, password=<>)" - ) + skip_auth = True else: - print("[LOG] No new credentials provided. Using existing ones.") + logger.debug("No new credentials provided. Using existing ones.") email_password = notifier.email_password email_username = notifier.email_username - print("[LOG] Validating credentials...") - - validation_result = notifier.validate_email_credentials( - username=email_username, - password=email_password, - server=email_server if email_server else notifier.email_server, - port=email_port if email_port else notifier.email_port, - ) - if validation_result.is_err(): - return SyftError( - message="Invalid SMTP credentials. Please check your username and password." + valid_credentials = True + if not skip_auth: + valid_credentials = notifier.validate_email_credentials( + username=email_username, + password=email_password, + server=email_server or notifier.email_server, + port=email_port or notifier.email_port, ) + if not valid_credentials: + logger.error("Invalid SMTP credentials.") + raise SyftException(public_message=("Invalid SMTP credentials.")) + notifier.email_password = email_password notifier.email_username = email_username @@ -148,106 +166,176 @@ def turn_on( # Email sender verification if not email_sender and not notifier.email_sender: - return SyftError( - message="You must provide a sender email address to enable notifications." + raise SyftException( + public_message="You must provide a sender email address to enable notifications." ) + # If email_rate_limit isn't defined yet. + if not notifier.email_rate_limit: + notifier.email_rate_limit = {PasswordResetTemplate.__name__: 3} + if email_sender: try: EmailStr._validate(email_sender) except ValueError: - return SyftError( - message="Invalid sender email address. Please check your email address." + raise SyftException( + publiccmessage="Invalid sender email address. Please check your email address." ) notifier.email_sender = email_sender notifier.active = True - print( - "[LOG] Email credentials are valid. Updating the notifier settings in the db." + logger.debug( + "Email credentials are valid. Updating the notifier settings in the db." ) - result = self.stash.update(credentials=context.credentials, settings=notifier) - if result.is_err(): - return SyftError(message=result.err()) + self.stash.update(credentials=context.credentials, obj=notifier).unwrap() + context.server.services.settings.update(context, notifications_enabled=True) return SyftSuccess(message="Notifications enabled successfully.") + @as_result(SyftException) + def set_email_batch( + self, + context: AuthedServiceContext, + email_type: EMAIL_TYPES, + frequency: NOTIFICATION_FREQUENCY, + start_time: str = "", + ) -> SyftSuccess: + if start_time == "" and frequency is not NOTIFICATION_FREQUENCY.INSTANT: + raise SyftException( + "If frequency isn't INSTANT, you must set a start time for the notifications to be dispatched." + ) + + if frequency is not NOTIFICATION_FREQUENCY.INSTANT: + start_time = start_time.lower() + try: + if "pm" in start_time or "am" in start_time: + time_obj = datetime.strptime(start_time, "%I:%M %p") + else: + time_obj = datetime.strptime(start_time, "%H:%M") + except ValueError: + raise SyftException( + "Invalid time format." + + "Please enter the start time in one of the following format examples:" + + "'14:00' or '2:00 PM'." + ) + else: + time_obj = datetime.now() + + notifier = self.stash.get(credentials=context.credentials).unwrap() + notifier.email_frequency[email_type.value] = EmailFrequency( + frequency=frequency, start_time=time_obj + ) + self.stash.update(credentials=context.credentials, obj=notifier).unwrap() + return SyftSuccess(message="Configuration set successfully.") + + @as_result(StashException) def turn_off( self, context: AuthedServiceContext, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """ Turn off email notifications service. PySyft notifications will still work. """ + notifier = self.stash.get(credentials=context.credentials).unwrap() - result = self.stash.get(credentials=context.credentials) - - if result.is_err(): - return SyftError(message=result.err()) - - notifier = result.ok() notifier.active = False - result = self.stash.update(credentials=context.credentials, settings=notifier) - if result.is_err(): - return SyftError(message=result.err()) + self.stash.update(credentials=context.credentials, obj=notifier).unwrap() + context.server.services.settings.update(context, notifications_enabled=False) return SyftSuccess(message="Notifications disabled succesfullly") + @as_result(SyftException) def activate( self, context: AuthedServiceContext, notifier_type: NOTIFIERS = NOTIFIERS.EMAIL - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """ Activate email notifications for the authenticated user. - This will only work if the domain owner has enabled notifications. + This will only work if the datasite owner has enabled notifications. """ - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - return user_service.enable_notifications(context, notifier_type=notifier_type) + return context.server.services.user.enable_notifications( + context, notifier_type=notifier_type + ).unwrap() + @as_result(SyftException) def deactivate( self, context: AuthedServiceContext, notifier_type: NOTIFIERS = NOTIFIERS.EMAIL - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """Deactivate email notifications for the authenticated user - This will only work if the domain owner has enabled notifications. + This will only work if the datasite owner has enabled notifications. """ - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - return user_service.disable_notifications(context, notifier_type=notifier_type) + result = context.server.services.user.disable_notifications( + context, notifier_type=notifier_type + ) + return result + + def is_time_to_dispatch( + self, notification_frequency: EmailFrequency, current_time: datetime + ) -> bool: + frequency = notification_frequency.frequency + start_time = notification_frequency.start_time + + # Define period_timedelta based on frequency + if frequency == NOTIFICATION_FREQUENCY.INSTANT: + return True + if frequency == NOTIFICATION_FREQUENCY.SIX_HOURS: + period = timedelta(hours=6) + elif frequency == NOTIFICATION_FREQUENCY.TWELVE_HOURS: + period = timedelta(hours=12) + elif frequency == NOTIFICATION_FREQUENCY.DAILY: + period = timedelta(days=1) + elif frequency == NOTIFICATION_FREQUENCY.WEEKLY: + period = timedelta(weeks=1) + + # Calculate how many full periods have passed since start_time + elapsed_time = current_time - start_time + if elapsed_time < timedelta(0): + return False # Current time is before the start time + + periods_elapsed = int(elapsed_time // period) + next_dispatch_time = start_time + periods_elapsed * period + + # Allow a small margin of error (e.g., 1 minute) to account for processing delays + margin = timedelta(minutes=1) + time_difference = current_time - next_dispatch_time + + # Check if current_time is within the margin of the scheduled dispatch time + return timedelta(0) <= time_difference <= margin @staticmethod + @as_result(SyftException) def init_notifier( - node: AbstractNode, + server: AbstractServer, email_username: str | None = None, email_password: str | None = None, email_sender: str | None = None, smtp_port: int | None = None, smtp_host: str | None = None, - ) -> Result[Ok, Err]: - """Initialize Notifier settings for a Node. + ) -> SyftSuccess | None: + """Initialize Notifier settings for a Server. If settings already exist, it will use the existing one. If not, it will create a new one. Args: - node: Node to initialize the notifier + server: Server to initialize the notifier active: If notifier should be active email_username: Email username to send notifications email_password: Email password to send notifications Raises: Exception: If something went wrong Returns: - Union: SyftSuccess or SyftError + SyftSuccess """ try: # Create a new NotifierStash since its a static method. - notifier_stash = NotifierStash(store=node.document_store) - result = notifier_stash.get(node.signing_key.verify_key) - if result.is_err(): - raise Exception(f"Could not create notifier: {result}") + notifier_stash = NotifierStash(store=server.db) + should_update = False # Get the notifier - notifier = result.ok() # If notifier doesn't exist, create a new one - - if not notifier: + try: + notifier = notifier_stash.get(server.signing_key.verify_key).unwrap() + should_update = True + except NotFoundException: notifier = NotifierSettings() notifier.active = False # Default to False @@ -261,48 +349,140 @@ def init_notifier( ) sender_not_set = not email_sender and not notifier.email_sender - if validation_result.is_err() or sender_not_set: - print( - "Ops something went wrong while trying to setup your notification system.", - "Please check your credentials and configuration.", - ) + + if not validation_result or sender_not_set: + logger.error("Notifier validation error") notifier.active = False else: notifier.email_password = email_password notifier.email_username = email_username notifier.email_sender = email_sender notifier.email_server = smtp_host - notifier.email_port = int(smtp_port) + notifier.email_port = smtp_port + # Default daily email rate limit per user + notifier.email_rate_limit = {PasswordResetTemplate.__name__: 3} notifier.active = True - notifier_stash.set(node.signing_key.verify_key, notifier) - return Ok("Notifier initialized successfully") - + if should_update: + notifier_stash.update( + credentials=server.signing_key.verify_key, obj=notifier + ).unwrap() + else: + notifier_stash.set(server.signing_key.verify_key, notifier).unwrap() + return SyftSuccess( + message="Notifier initialized successfully", value=notifier + ) except Exception as e: - raise Exception(f"Error initializing notifier. \n {e}") + raise SyftException.from_exception( + e, public_message=f"Error initializing notifier. {e}" + ) + + def set_email_rate_limit( + self, context: AuthedServiceContext, email_type: EMAIL_TYPES, daily_limit: int + ) -> SyftSuccess: + notifier = self.stash.get(context.credentials).unwrap( + public_message="Couldn't set the email rate limit." + ) + notifier.email_rate_limit[email_type.value] = daily_limit + self.stash.update(credentials=context.credentials, obj=notifier) + + return SyftSuccess(message="Email rate limit updated!") # This is not a public API. # This method is used by other services to dispatch notifications internally + @as_result(SyftException, RateLimitException) def dispatch_notification( self, context: AuthedServiceContext, notification: Notification - ) -> SyftError: - context.node = cast(AbstractNode, context.node) - admin_key = context.node.get_service("userservice").admin_verify_key() - notifier = self.stash.get(admin_key) - if notifier.is_err(): - return SyftError( - message="The mail service ran out of quota or some notifications failed to be delivered.\n" + ) -> SyftSuccess: + admin_key = context.server.services.user.root_verify_key + + # Silently fail on notification not delivered + try: + notifier = self.stash.get(admin_key).unwrap( + public_message="The mail service ran out of quota or some notifications failed to be delivered.\n" + "Please check the health of the mailing server." ) + except NotFoundException: + logger.debug("There is no notifier service to ship the notification") + raise SyftException( + public_message="No notifier service to ship the notification." + ) + except StashException as exc: + logger.error(f"Error getting notifier settings: {exc}") + raise SyftException( + public_message="Failed to get notifier settings. Please check the logs." + ) - notifier = notifier.ok() # If notifier is active - if notifier.active: - resp = notifier.send_notifications( - context=context, notification=notification + if notifier.active and notification.email_template is not None: + logger.debug("Checking user email activity") + + if notifier.email_activity.get(notification.email_template.__name__, None): + user_activity = notifier.email_activity[ + notification.email_template.__name__ + ].get(notification.to_user_verify_key, None) + + # If there's no user activity + if user_activity is None: + notifier.email_activity[notification.email_template.__name__][ + notification.to_user_verify_key + ] = UserNotificationActivity(count=1, date=datetime.now()) + else: # If there's a previous user activity + current_state: UserNotificationActivity = notifier.email_activity[ + notification.email_template.__name__ + ][notification.to_user_verify_key] + date_refresh = abs(datetime.now() - current_state.date).days > 1 + + limit = notifier.email_rate_limit.get( + notification.email_template.__name__, 0 + ) + still_in_limit = current_state.count < limit + # Time interval reseted. + if date_refresh: + current_state.count = 1 + current_state.date = datetime.now() + # Time interval didn't reset yet. + elif still_in_limit or not limit: + current_state.count += 1 + current_state.date = datetime.now() + else: + raise RateLimitException( + public_message="Couldn't send the email. You have surpassed the" + + " email threshold limit. Please try again later." + ) + else: + notifier.email_activity[notification.email_template.__name__] = { + notification.to_user_verify_key: UserNotificationActivity( + count=1, date=datetime.now() + ) + } + + email_frequency = notifier.email_frequency.get( + notification.email_template.__name__, + EmailFrequency(frequency=NOTIFICATION_FREQUENCY.INSTANT), ) - if resp.is_err(): - return SyftError(message=resp.err()) + + if email_frequency.frequency == NOTIFICATION_FREQUENCY.INSTANT: + notifier.send_notifications( + context=context, notification=notification + ).unwrap() + else: + queue_dict = notifier.email_queue.get( + notification.email_template.__name__, {} + ) + if len(queue_dict) == 0: + notifier.email_queue[notification.email_template.__name__] = ( + queue_dict + ) + + user_queue = queue_dict.get(notification.to_user_verify_key, []) + + if len(user_queue) == 0: + queue_dict[notification.to_user_verify_key] = user_queue + + user_queue.append(notification) + + self.stash.update(credentials=admin_key, obj=notifier).unwrap() # If notifier isn't active, return None - return SyftSuccess(message="Notifications dispatched successfully") + return SyftSuccess(message="Notification dispatched successfully") diff --git a/packages/syft/src/syft/service/notifier/notifier_stash.py b/packages/syft/src/syft/service/notifier/notifier_stash.py index e29fd3e007d..b3583173e50 100644 --- a/packages/syft/src/syft/service/notifier/notifier_stash.py +++ b/packages/syft/src/syft/service/notifier/notifier_stash.py @@ -1,83 +1,28 @@ # stdlib # third party -from result import Err -from result import Ok -from result import Result # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...types.uid import UID +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from ...util.telemetry import instrument -from ..action.action_permissions import ActionObjectPermission from .notifier import NotifierSettings -NamePartitionKey = PartitionKey(key="name", type_=str) -ActionIDsPartitionKey = PartitionKey(key="action_ids", type_=list[UID]) - @instrument -@serializable() -class NotifierStash(BaseStash): - object_type = NotifierSettings - settings: PartitionSettings = PartitionSettings( - name=NotifierSettings.__canonical_name__, object_type=NotifierSettings - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - - def admin_verify_key(self) -> SyftVerifyKey: - return self.partition.root_verify_key - - # TODO: should this method behave like a singleton? - def get(self, credentials: SyftVerifyKey) -> Result[NotifierSettings, Err]: +@serializable(canonical_name="NotifierSQLStash", version=1) +class NotifierStash(ObjectStash[NotifierSettings]): + @as_result(StashException, NotFoundException) + def get(self, credentials: SyftVerifyKey) -> NotifierSettings: """Get Settings""" - result = self.get_all(credentials) - if result.is_ok(): - settings = result.ok() - if len(settings) == 0: - return Ok( - None - ) # TODO: Stash shouldn't be empty after init. Return Err instead? - result = settings[ - 0 - ] # TODO: Should we check if theres more than one? => Report corruption - return Ok(result) - else: - return Err(message=result.err()) - - def set( - self, - credentials: SyftVerifyKey, - settings: NotifierSettings, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[NotifierSettings, Err]: - result = self.check_type(settings, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if result.is_err(): - return Err(message=result.err()) - return super().set( - credentials=credentials, obj=result.ok() - ) # TODO check if result isInstance(Ok) - - def update( - self, - credentials: SyftVerifyKey, - settings: NotifierSettings, - has_permission: bool = False, - ) -> Result[NotifierSettings, Err]: - result = self.check_type(settings, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if result.is_err(): - return Err(message=result.err()) - return super().update( - credentials=credentials, obj=result.ok() - ) # TODO check if result isInstance(Ok) + # actually get latest settings + result = self.get_all(credentials, limit=1, sort_order="desc").unwrap() + if len(result) > 0: + return result[0] + raise NotFoundException( + public_message="No settings found for the current user." + ) diff --git a/packages/syft/src/syft/service/notifier/smtp_client.py b/packages/syft/src/syft/service/notifier/smtp_client.py index 1f4df6531e5..eef25440af8 100644 --- a/packages/syft/src/syft/service/notifier/smtp_client.py +++ b/packages/syft/src/syft/service/notifier/smtp_client.py @@ -1,31 +1,26 @@ # stdlib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +import logging import smtplib # third party -from result import Err -from result import Ok -from result import Result +from pydantic import BaseModel +# relative +from ...types.errors import SyftException +from ...types.server_url import ServerURL -class SMTPClient: - SOCKET_TIMEOUT = 5 # seconds +SOCKET_TIMEOUT = 5 # seconds - def __init__( - self, - server: str, - port: int, - username: str, - password: str, - ) -> None: - if not (username and password): - raise ValueError("Both username and password must be provided") +logger = logging.getLogger(__name__) - self.username = username - self.password = password - self.server = server - self.port = port + +class SMTPClient(BaseModel): + server: str + port: int + password: str | None = None + username: str | None = None def send(self, sender: str, receiver: list[str], subject: str, body: str) -> None: if not (subject and body and receiver): @@ -37,34 +32,52 @@ def send(self, sender: str, receiver: list[str], subject: str, body: str) -> Non msg["Subject"] = subject msg.attach(MIMEText(body, "html")) - with smtplib.SMTP( - self.server, self.port, timeout=self.SOCKET_TIMEOUT - ) as server: - server.ehlo() - if server.has_extn("STARTTLS"): - server.starttls() + mail_url = ServerURL.from_url(f"smtp://{self.server}:{self.port}") + mail_url = mail_url.as_container_host() + try: + with smtplib.SMTP( + mail_url.host_or_ip, mail_url.port, timeout=SOCKET_TIMEOUT + ) as server: server.ehlo() - server.login(self.username, self.password) - text = msg.as_string() - server.sendmail(sender, ", ".join(receiver), text) - # TODO: Add error handling + if server.has_extn("STARTTLS"): + server.starttls() + server.ehlo() + if self.username and self.password: + server.login(self.username, self.password) + text = msg.as_string() + server.sendmail(sender, ", ".join(receiver), text) + return None + except Exception as e: + logger.error(f"Unable to send email. {e}") + raise SyftException( + public_message="Oops! Something went wrong while trying to send an email." + ) @classmethod def check_credentials( cls, server: str, port: int, username: str, password: str - ) -> Result[Ok, Err]: + ) -> bool: """Check if the credentials are valid. Returns: bool: True if the credentials are valid, False otherwise. """ try: - with smtplib.SMTP(server, port, timeout=cls.SOCKET_TIMEOUT) as smtp_server: + mail_url = ServerURL.from_url(f"smtp://{server}:{port}") + mail_url = mail_url.as_container_host() + + print(f"> Validating SMTP settings: {mail_url}") + with smtplib.SMTP( + mail_url.host_or_ip, mail_url.port, timeout=SOCKET_TIMEOUT + ) as smtp_server: smtp_server.ehlo() if smtp_server.has_extn("STARTTLS"): smtp_server.starttls() smtp_server.ehlo() smtp_server.login(username, password) - return Ok("Credentials are valid.") + return True except Exception as e: - return Err(e) + message = f"SMTP check_credentials failed. {e}" + print(message) + logger.error(message) + raise SyftException(public_message=str(e)) diff --git a/packages/syft/src/syft/service/object_search/migration_state_service.py b/packages/syft/src/syft/service/object_search/migration_state_service.py deleted file mode 100644 index ae415584d3c..00000000000 --- a/packages/syft/src/syft/service/object_search/migration_state_service.py +++ /dev/null @@ -1,73 +0,0 @@ -# stdlib - -# relative -from ...serde.serializable import serializable -from ...store.document_store import DocumentStore -from ..context import AuthedServiceContext -from ..response import SyftError -from ..service import AbstractService -from ..service import service_method -from .object_migration_state import SyftMigrationStateStash -from .object_migration_state import SyftObjectMigrationState - - -@serializable() -class MigrateStateService(AbstractService): - store: DocumentStore - stash: SyftMigrationStateStash - - def __init__(self, store: DocumentStore) -> None: - self.store = store - self.stash = SyftMigrationStateStash(store=store) - - @service_method(path="migration", name="get_version") - def get_version( - self, context: AuthedServiceContext, canonical_name: str - ) -> int | SyftError: - """Search for the metadata for an object.""" - - result = self.stash.get_by_name( - canonical_name=canonical_name, credentials=context.credentials - ) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - migration_state = result.ok() - - if migration_state is None: - return SyftError( - message=f"No migration state exists for canonical name: {canonical_name}" - ) - - return migration_state.current_version - - @service_method(path="migration", name="get_state") - def get_state( - self, context: AuthedServiceContext, canonical_name: str - ) -> bool | SyftError: - result = self.stash.get_by_name( - canonical_name=canonical_name, credentials=context.credentials - ) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - return result.ok() - - @service_method(path="migration", name="register_migration_state") - def register_migration_state( - self, - context: AuthedServiceContext, - current_version: int, - canonical_name: str, - ) -> SyftObjectMigrationState | SyftError: - obj = SyftObjectMigrationState( - current_version=current_version, canonical_name=canonical_name - ) - result = self.stash.set(migration_state=obj, credentials=context.credentials) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - return result.ok() diff --git a/packages/syft/src/syft/service/object_search/object_migration_state.py b/packages/syft/src/syft/service/object_search/object_migration_state.py deleted file mode 100644 index f5b3a043ea1..00000000000 --- a/packages/syft/src/syft/service/object_search/object_migration_state.py +++ /dev/null @@ -1,82 +0,0 @@ -# stdlib - -# third party -from result import Result - -# relative -from ...node.credentials import SyftVerifyKey -from ...serde.serializable import serializable -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SyftMigrationRegistry -from ...types.syft_object import SyftObject -from ..action.action_permissions import ActionObjectPermission - - -@serializable() -class SyftObjectMigrationState(SyftObject): - __canonical_name__ = "SyftObjectMigrationState" - __version__ = SYFT_OBJECT_VERSION_2 - - __attr_unique__ = ["canonical_name"] - - canonical_name: str - current_version: int - - @property - def latest_version(self) -> int | None: - available_versions = SyftMigrationRegistry.get_versions( - canonical_name=self.canonical_name, - ) - if not available_versions: - return None - - return sorted(available_versions, reverse=True)[0] - - @property - def supported_versions(self) -> list: - return SyftMigrationRegistry.get_versions(self.canonical_name) - - -KlassNamePartitionKey = PartitionKey(key="canonical_name", type_=str) - - -@serializable() -class SyftMigrationStateStash(BaseStash): - object_type = SyftObjectMigrationState - settings: PartitionSettings = PartitionSettings( - name=SyftObjectMigrationState.__canonical_name__, - object_type=SyftObjectMigrationState, - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - - def set( - self, - credentials: SyftVerifyKey, - migration_state: SyftObjectMigrationState, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[SyftObjectMigrationState, str]: - res = self.check_type(migration_state, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().set( - credentials=credentials, - obj=res.ok(), - add_permissions=add_permissions, - add_storage_permission=add_storage_permission, - ignore_duplicates=ignore_duplicates, - ) - - def get_by_name( - self, canonical_name: str, credentials: SyftVerifyKey - ) -> Result[SyftObjectMigrationState, str]: - qks = KlassNamePartitionKey.with_obj(canonical_name) - return self.query_one(credentials=credentials, qks=qks) diff --git a/packages/syft/src/syft/service/output/output_service.py b/packages/syft/src/syft/service/output/output_service.py index 3e8ed9e8ffd..cdf567a5955 100644 --- a/packages/syft/src/syft/service/output/output_service.py +++ b/packages/syft/src/syft/service/output/output_service.py @@ -1,38 +1,30 @@ # stdlib -from typing import Any from typing import ClassVar # third party from pydantic import model_validator -from result import Result # relative -from ...client.api import APIRegistry -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime +from ...types.result import as_result from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syncable_object import SyncableSyftObject from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_object import ActionObject +from ..action.action_permissions import ActionObjectREAD from ..context import AuthedServiceContext -from ..response import SyftError from ..service import AbstractService from ..service import TYPE_TO_SERVICE from ..service import service_method +from ..user.user_roles import ADMIN_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL -CreatedAtPartitionKey = PartitionKey(key="created_at", type_=DateTime) -UserCodeIdPartitionKey = PartitionKey(key="user_code_id", type_=UID) -OutputPolicyIdPartitionKey = PartitionKey(key="output_policy_id", type_=UID) - @serializable() class ExecutionOutput(SyncableSyftObject): @@ -48,6 +40,7 @@ class ExecutionOutput(SyncableSyftObject): # Required for __attr_searchable__, set by model_validator user_code_id: UID + job_id: UID | None = None # Output policy is not a linked object because its saved on the usercode output_policy_id: UID | None = None @@ -56,6 +49,7 @@ class ExecutionOutput(SyncableSyftObject): "user_code_id", "created_at", "output_policy_id", + "job_id", ] __repr_attrs__: ClassVar[list[str]] = [ "created_at", @@ -66,9 +60,11 @@ class ExecutionOutput(SyncableSyftObject): @model_validator(mode="before") @classmethod - def add_user_code_id(cls, values: dict) -> dict: + def add_searchable_link_ids(cls, values: dict) -> dict: if "user_code_link" in values: values["user_code_id"] = values["user_code_link"].object_uid + if values.get("job_link"): + values["job_id"] = values["job_link"].object_uid return values @classmethod @@ -77,7 +73,7 @@ def from_ids( output_ids: UID | list[UID] | dict[str, UID], user_code_id: UID, executing_user_verify_key: SyftVerifyKey, - node_uid: UID, + server_uid: UID, job_id: UID | None = None, output_policy_id: UID | None = None, input_ids: dict[str, UID] | None = None, @@ -95,7 +91,7 @@ def from_ids( object_uid=user_code_id, object_type=UserCode, service_type=UserCodeService, - node_uid=node_uid, + server_uid=server_uid, ) if job_id: @@ -103,10 +99,13 @@ def from_ids( object_uid=job_id, object_type=Job, service_type=JobService, - node_uid=node_uid, + server_uid=server_uid, ) else: job_link = None + + if input_ids is not None: + input_ids = {k: v for k, v in input_ids.items() if isinstance(v, UID)} return cls( output_ids=output_ids, user_code_link=user_code_link, @@ -118,14 +117,7 @@ def from_ids( @property def outputs(self) -> list[ActionObject] | dict[str, ActionObject] | None: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - raise ValueError( - f"Can't access the api. Please log in to {self.syft_node_location}" - ) + api = self.get_api() action_service = api.services.action # TODO: error handling for action_service.get @@ -169,11 +161,7 @@ def check_input_ids(self, kwargs: dict[str, UID]) -> bool: return False return True - @property - def job_id(self) -> UID | None: - return self.job_link.object_uid if self.job_link else None - - def get_sync_dependencies(self, api: Any = None) -> list[UID]: + def get_sync_dependencies(self, context: AuthedServiceContext) -> list[UID]: # Output ids, user code id, job id res = [] @@ -185,49 +173,41 @@ def get_sync_dependencies(self, api: Any = None) -> list[UID]: return res -@instrument -@serializable() -class OutputStash(BaseUIDStoreStash): - object_type = ExecutionOutput - settings: PartitionSettings = PartitionSettings( - name=ExecutionOutput.__canonical_name__, object_type=ExecutionOutput - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store) - self.store = store - self.settings = self.settings - self._object_type = self.object_type - +@serializable(canonical_name="OutputStashSQL", version=1) +class OutputStash(ObjectStash[ExecutionOutput]): + @as_result(StashException) def get_by_user_code_id( self, credentials: SyftVerifyKey, user_code_id: UID - ) -> Result[list[ExecutionOutput], str]: - qks = QueryKeys( - qks=[UserCodeIdPartitionKey.with_obj(user_code_id)], - ) - return self.query_all( - credentials=credentials, qks=qks, order_by=CreatedAtPartitionKey - ) - + ) -> list[ExecutionOutput]: + return self.get_all( + credentials=credentials, + filters={"user_code_id": user_code_id}, + ).unwrap() + + @as_result(StashException) + def get_by_job_id( + self, credentials: SyftVerifyKey, job_id: UID + ) -> ExecutionOutput | None: + return self.get_one( + credentials=credentials, + filters={"job_id": job_id}, + ).unwrap() + + @as_result(StashException) def get_by_output_policy_id( self, credentials: SyftVerifyKey, output_policy_id: UID - ) -> Result[list[ExecutionOutput], str]: - qks = QueryKeys( - qks=[OutputPolicyIdPartitionKey.with_obj(output_policy_id)], - ) - return self.query_all( - credentials=credentials, qks=qks, order_by=CreatedAtPartitionKey - ) + ) -> list[ExecutionOutput]: + return self.get_all( + credentials=credentials, + filters={"output_policy_id": output_policy_id}, + ).unwrap() -@instrument -@serializable() +@serializable(canonical_name="OutputService", version=1) class OutputService(AbstractService): - store: DocumentStore stash: OutputStash - def __init__(self, store: DocumentStore): - self.store = store + def __init__(self, store: DBManager): self.stash = OutputStash(store=store) @service_method( @@ -244,19 +224,18 @@ def create( job_id: UID | None = None, output_policy_id: UID | None = None, input_ids: dict[str, UID] | None = None, - ) -> ExecutionOutput | SyftError: + ) -> ExecutionOutput: output = ExecutionOutput.from_ids( output_ids=output_ids, user_code_id=user_code_id, executing_user_verify_key=executing_user_verify_key, - node_uid=context.node.id, # type: ignore + server_uid=context.server.id, # type: ignore job_id=job_id, output_policy_id=output_policy_id, input_ids=input_ids, ) - res = self.stash.set(context.credentials, output) - return res + return self.stash.set(context.credentials, output).unwrap() @service_method( path="output.get_by_user_code_id", @@ -265,14 +244,55 @@ def create( ) def get_by_user_code_id( self, context: AuthedServiceContext, user_code_id: UID - ) -> list[ExecutionOutput] | SyftError: - result = self.stash.get_by_user_code_id( - credentials=context.node.verify_key, # type: ignore + ) -> list[ExecutionOutput]: + return self.stash.get_by_user_code_id( + credentials=context.server.verify_key, # type: ignore user_code_id=user_code_id, - ) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + ).unwrap() + + @service_method( + path="output.has_output_read_permissions", + name="has_output_read_permissions", + roles=GUEST_ROLE_LEVEL, + ) + def has_output_read_permissions( + self, + context: AuthedServiceContext, + user_code_id: UID, + user_verify_key: SyftVerifyKey, + ) -> bool: + all_outputs = self.get_by_user_code_id(context, user_code_id) + for output in all_outputs: + # TODO tech debt: unclear why code owner can see outputhistory without permissions. + # It is not a security issue (output history has no data) it is confusing for user + # if not self.stash.has_permission( + # ActionObjectREAD(uid=output.id, credentials=user_verify_key) + # ): + # continue + + # Check if all output ActionObjects have permissions + result_ids = output.output_id_list + permissions = [ + ActionObjectREAD(uid=_id.id, credentials=user_verify_key) + for _id in result_ids + ] + if context.server.services.action.stash.has_permissions(permissions): + return True + + return False + + @service_method( + path="output.get_by_job_id", + name="get_by_job_id", + roles=ADMIN_ROLE_LEVEL, + ) + def get_by_job_id( + self, context: AuthedServiceContext, job_id: UID + ) -> ExecutionOutput: + return self.stash.get_by_job_id( + credentials=context.server.verify_key, # type: ignore + job_id=job_id, + ).unwrap() @service_method( path="output.get_by_output_policy_id", @@ -281,23 +301,23 @@ def get_by_user_code_id( ) def get_by_output_policy_id( self, context: AuthedServiceContext, output_policy_id: UID - ) -> list[ExecutionOutput] | SyftError: - result = self.stash.get_by_output_policy_id( - credentials=context.node.verify_key, # type: ignore + ) -> list[ExecutionOutput]: + return self.stash.get_by_output_policy_id( + credentials=context.server.verify_key, # type: ignore output_policy_id=output_policy_id, # type: ignore - ) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + ).unwrap() + + @service_method( + path="output.get", + name="get", + roles=GUEST_ROLE_LEVEL, + ) + def get(self, context: AuthedServiceContext, id: UID) -> ExecutionOutput: + return self.stash.get_by_uid(context.credentials, id).unwrap() @service_method(path="output.get_all", name="get_all", roles=GUEST_ROLE_LEVEL) - def get_all( - self, context: AuthedServiceContext - ) -> list[ExecutionOutput] | SyftError: - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + def get_all(self, context: AuthedServiceContext) -> list[ExecutionOutput]: + return self.stash.get_all(context.credentials).unwrap() TYPE_TO_SERVICE[ExecutionOutput] = OutputService diff --git a/packages/syft/src/syft/service/policy/policy.py b/packages/syft/src/syft/service/policy/policy.py index 95dc78241eb..4d3f80de699 100644 --- a/packages/syft/src/syft/service/policy/policy.py +++ b/packages/syft/src/syft/service/policy/policy.py @@ -12,47 +12,77 @@ from inspect import Signature from io import StringIO import sys -import types from typing import Any -from typing import cast +from typing import ClassVar +from typing import TYPE_CHECKING # third party from RestrictedPython import compile_restricted -from result import Err -from result import Ok -from result import Result +from pydantic import field_validator +from pydantic import model_validator +import requests # relative -from ...abstract_node import AbstractNode -from ...abstract_node import NodeType +from ...abstract_server import ServerType from ...client.api import APIRegistry -from ...client.api import NodeIdentity -from ...node.credentials import SyftVerifyKey +from ...client.api import RemoteFunction +from ...client.api import ServerIdentity from ...serde.recursive_primitives import recursive_serde_register_type from ...serde.serializable import serializable -from ...store.document_store import PartitionKey +from ...server.credentials import SyftVerifyKey +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...types.datetime import DateTime -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject +from ...types.syft_object_registry import SyftObjectRegistry from ...types.transforms import TransformContext from ...types.transforms import generate_id from ...types.transforms import transform from ...types.twin_object import TwinObject from ...types.uid import UID from ...util.util import is_interpreter_jupyter +from ..action.action_endpoint import CustomEndpointActionObject from ..action.action_object import ActionObject +from ..action.action_permissions import ActionObjectPermission +from ..action.action_permissions import ActionPermission from ..code.code_parse import GlobalsVisitor from ..code.unparse import unparse from ..context import AuthedServiceContext from ..context import ChangeContext -from ..context import NodeServiceContext +from ..context import ServerServiceContext from ..dataset.dataset import Asset -from ..response import SyftError -from ..response import SyftSuccess -PolicyUserVerifyKeyPartitionKey = PartitionKey( - key="user_verify_key", type_=SyftVerifyKey -) +# Use this for return type enums: +# class MyEnum(Enum): +# MEMBER1 = ("Value1", True) +# MEMBER2 = ("Value2", False) +# MEMBER3 = ("Value3", True) + +# def __init__(self, value: str, flag: bool): +# self._value_ = value +# self.flag = flag + +# # Example usage: +# for member in MyEnum: +# print(f"Name: {member.name}, Value: {member.value}, Flag: {member.flag}") + + +class InputPolicyValidEnum(Enum): + VALID = "valid" + INVALID = "invalid" + + +class OutputPolicyValidEnum(Enum): + VALID = "valid" + INVALID = "invalid" + NOT_APPROVED = "not_approved" + + +DEFAULT_USER_POLICY_VERSION = 1 + PyCodeObject = Any @@ -69,13 +99,15 @@ def extract_uid(v: Any) -> UID: def filter_only_uids(results: Any) -> list[UID] | dict[str, UID] | UID: + # Prevent checking for __len__ on ActionObject (creates an Action) + if isinstance(results, ActionObject): + return extract_uid(results) + if not hasattr(results, "__len__"): results = [results] if isinstance(results, list): - output_list = [] - for v in results: - output_list.append(extract_uid(v)) + output_list = [extract_uid(v) for v in results] return output_list elif isinstance(results, dict): output_dict = {} @@ -88,7 +120,8 @@ def filter_only_uids(results: Any) -> list[UID] | dict[str, UID] | UID: class Policy(SyftObject): # version __canonical_name__: str = "Policy" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 + has_safe_serde: ClassVar[bool] = True id: UID init_kwargs: dict[Any, Any] = {} @@ -113,29 +146,29 @@ def policy_code(self) -> str: op_code += "\n" return op_code - def is_valid(self, *args: list, **kwargs: dict) -> SyftSuccess | SyftError: # type: ignore - return SyftSuccess(message="Policy is valid.") + def is_valid(self, *args: list, **kwargs: dict) -> bool: # type: ignore + return True def public_state(self) -> Any: raise NotImplementedError -@serializable() +@serializable(canonical_name="UserPolicyStatus", version=1) class UserPolicyStatus(Enum): SUBMITTED = "submitted" DENIED = "denied" APPROVED = "approved" -def partition_by_node(kwargs: dict[str, Any]) -> dict[NodeIdentity, dict[str, UID]]: +def partition_by_server(kwargs: dict[str, Any]) -> dict[ServerIdentity, dict[str, UID]]: # relative from ...client.api import APIRegistry - from ...client.api import NodeIdentity + from ...client.api import RemoteFunction + from ...client.api import ServerIdentity from ...types.twin_object import TwinObject from ..action.action_object import ActionObject # fetches the all the current api's connected - api_list = APIRegistry.get_all_api() output_kwargs = {} for k, v in kwargs.items(): uid = v @@ -143,32 +176,211 @@ def partition_by_node(kwargs: dict[str, Any]) -> dict[NodeIdentity, dict[str, UI uid = v.id if isinstance(v, TwinObject): uid = v.id + if isinstance(v, RemoteFunction): + uid = v.custom_function_actionobject_id().unwrap() if isinstance(v, Asset): uid = v.action_id if not isinstance(uid, UID): raise Exception(f"Input {k} must have a UID not {type(v)}") _obj_exists = False - for api in api_list: - if api.services.action.exists(uid): - node_identity = NodeIdentity.from_api(api) - if node_identity not in output_kwargs: - output_kwargs[node_identity] = {k: uid} - else: - output_kwargs[node_identity].update({k: uid}) - - _obj_exists = True - break + for identity, api in APIRegistry.__api_registry__.items(): + try: + if api.services.action.exists(uid): + server_identity = ServerIdentity.from_api(api) + if server_identity not in output_kwargs: + output_kwargs[server_identity] = {k: uid} + else: + output_kwargs[server_identity].update({k: uid}) + + _obj_exists = True + break + except (requests.exceptions.ConnectionError, SyftException): + # To handle the cases , where there an old api objects in + # in APIRegistry + continue + except Exception as e: + print(f"Error in partition_by_server with identity {identity}", e) + raise e if not _obj_exists: - raise Exception(f"Input data {k}:{uid} does not belong to any Domain") + raise Exception(f"Input data {k}:{uid} does not belong to any Datasite") return output_kwargs +@serializable() +class PolicyRule(SyftObject): + __canonical_name__ = "PolicyRule" + __version__ = SYFT_OBJECT_VERSION_1 + + kw: str + requires_input: bool = True + + def is_met( + self, context: AuthedServiceContext, action_object: ActionObject + ) -> bool: + return False + + +@serializable() +class CreatePolicyRule(SyftObject): + __canonical_name__ = "CreatePolicyRule" + __version__ = SYFT_OBJECT_VERSION_1 + + val: Any + + +@serializable() +class CreatePolicyRuleConstant(CreatePolicyRule): + __canonical_name__ = "CreatePolicyRuleConstant" + __version__ = SYFT_OBJECT_VERSION_1 + + val: Any + klass: None | type = None + + @model_validator(mode="before") + @classmethod + def set_klass(cls, data: Any) -> Any: + val = data["val"] + if isinstance(val, RemoteFunction): + klass = CustomEndpointActionObject + else: + klass = type(val) + data["klass"] = klass + return data + + @field_validator("val", mode="after") + @classmethod + def idify_endpoints(cls, value: str) -> str: + if isinstance(value, RemoteFunction): + return value.custom_function_actionobject_id().unwrap() + return value + + def to_policy_rule(self, kw: Any) -> PolicyRule: + return Constant(kw=kw, val=self.val, klass=self.klass) + + +@serializable() +class Matches(PolicyRule): + __canonical_name__ = "Matches" + __version__ = SYFT_OBJECT_VERSION_1 + + val: UID + + def is_met( + self, context: AuthedServiceContext, action_object: ActionObject + ) -> bool: + return action_object.id == self.val + + +@serializable() +class Constant(PolicyRule): + __canonical_name__ = "PreFill" + __version__ = SYFT_OBJECT_VERSION_1 + + val: Any + klass: type + requires_input: bool = False + + @property + def value(self) -> Any: + return self.val + + def is_met(self, context: AuthedServiceContext, *args: Any, **kwargs: Any) -> bool: + return True + + @as_result(SyftException) + def transform_kwarg(self, context: AuthedServiceContext, val: Any) -> Any: + if isinstance(self.val, UID): + if issubclass(self.klass, CustomEndpointActionObject): + obj = context.server.services.action.get( + context.as_root_context(), self.val + ) + return obj.syft_action_data + return self.val + + def _get_dict_for_user_code_repr(self) -> dict[str, Any]: + return self._coll_repr_() + + def _coll_repr_(self) -> dict[str, Any]: + return { + "klass": self.klass.__qualname__, + "val": str(self.val), + } + + +@serializable() +class UserOwned(PolicyRule): + __canonical_name__ = "UserOwned" + __version__ = SYFT_OBJECT_VERSION_1 + + # str, float, int, bool, dict, list, set, tuple + + type: ( + type[str] + | type[float] + | type[int] + | type[bool] + | type[dict] + | type[list] + | type[set] + | type[tuple] + | None + ) + + def is_owned( + self, context: AuthedServiceContext, action_object: ActionObject + ) -> bool: + action_store = context.server.services.action.stash + return action_store.has_permission( + ActionObjectPermission( + action_object.id, ActionPermission.OWNER, context.credentials + ) + ) + + def is_met( + self, context: AuthedServiceContext, action_object: ActionObject + ) -> bool: + return type(action_object.syft_action_data) == self.type and self.is_owned( + context, action_object + ) + + +def user_code_arg2id(arg: Any) -> UID: + if isinstance(arg, ActionObject): + uid = arg.id + elif isinstance(arg, TwinObject): + uid = arg.id + elif isinstance(arg, Asset): + uid = arg.action_id + elif isinstance(arg, RemoteFunction): + # TODO: Beach Fix + # why do we need another call to the server to get the UID? + uid = arg.custom_function_actionobject_id().unwrap() + else: + uid = arg + return uid + + +def retrieve_item_from_db(id: UID, context: AuthedServiceContext) -> ActionObject: + # relative + from ...service.action.action_object import TwinMode + + root_context = AuthedServiceContext( + server=context.server, credentials=context.server.verify_key + ) + return context.server.services.action._get( + context=root_context, + uid=id, + twin_mode=TwinMode.NONE, + has_permission=True, + ).unwrap() + + class InputPolicy(Policy): __canonical_name__ = "InputPolicy" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 def __init__(self, *args: Any, **kwargs: Any) -> None: if "init_kwargs" in kwargs: @@ -176,44 +388,38 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: del kwargs["init_kwargs"] else: # TODO: remove this tech debt, dont remove the id mapping functionality - init_kwargs = partition_by_node(kwargs) + init_kwargs = partition_by_server(kwargs) super().__init__(*args, init_kwargs=init_kwargs, **kwargs) - def _is_valid( + def is_valid( # type: ignore self, context: AuthedServiceContext, usr_input_kwargs: dict, - code_item_id: UID, - ) -> Result[bool, str]: + ) -> bool: raise NotImplementedError def filter_kwargs( self, kwargs: dict[Any, Any], context: AuthedServiceContext, - code_item_id: UID, ) -> dict[Any, Any]: raise NotImplementedError @property - def inputs(self) -> dict[NodeIdentity, Any]: + def inputs(self) -> dict[ServerIdentity, Any]: return self.init_kwargs - def _inputs_for_context(self, context: ChangeContext) -> dict | SyftError: - user_node_view = NodeIdentity.from_change_context(context) - inputs = self.inputs[user_node_view] - if context.node is None: - return SyftError(f"context {context}'s node is None") + def _inputs_for_context(self, context: ChangeContext) -> dict: + user_server_view = ServerIdentity.from_change_context(context) + inputs = self.inputs[user_server_view] root_context = AuthedServiceContext( - node=context.node, credentials=context.approving_user_credentials + server=context.server, credentials=context.approving_user_credentials ).as_root_context() - action_service = context.node.get_service("actionservice") for var_name, uid in inputs.items(): - action_object = action_service.get(uid=uid, context=root_context) - if action_object.is_err(): - return SyftError(message=action_object.err()) - action_object_value = action_object.ok() + action_object_value = context.server.services.action.get( + uid=uid, context=root_context + ).unwrap() # resolve syft action data from blob store if isinstance(action_object_value, TwinObject): action_object_value.private_obj.syft_action_data # noqa: B018 @@ -224,86 +430,237 @@ def _inputs_for_context(self, context: ChangeContext) -> dict | SyftError: return inputs +@serializable() +class MixedInputPolicy(InputPolicy): + # version + __canonical_name__ = "MixedInputPolicy" + __version__ = SYFT_OBJECT_VERSION_1 + + kwarg_rules: dict[ServerIdentity, dict[str, PolicyRule]] + + def __init__( + self, init_kwargs: Any = None, client: Any = None, *args: Any, **kwargs: Any + ) -> None: + if init_kwargs is not None: + kwarg_rules = init_kwargs + kwargs = {} + else: + server_identity = self.find_server_identity(kwargs, client) + kwarg_rules_current_server = {} + for kw, arg in kwargs.items(): + if isinstance( + arg, UID | Asset | ActionObject | TwinObject | RemoteFunction + ): + kwarg_rules_current_server[kw] = Matches( + kw=kw, val=user_code_arg2id(arg) + ) + elif arg in [str, float, int, bool, dict, list, set, tuple]: # type: ignore[unreachable] + kwarg_rules_current_server[kw] = UserOwned(kw=kw, type=arg) + elif isinstance(arg, CreatePolicyRule): + kwarg_rules_current_server[kw] = arg.to_policy_rule(kw) + else: + raise ValueError("Incorrect argument") + kwarg_rules = {server_identity: kwarg_rules_current_server} + + super().__init__( + *args, kwarg_rules=kwarg_rules, init_kwargs=kwarg_rules, **kwargs + ) + + @as_result(SyftException) + def transform_kwargs( + self, context: AuthedServiceContext, kwargs: dict[str, Any] + ) -> dict[str, Any]: + for _, rules in self.kwarg_rules.items(): + for kw, rule in rules.items(): + if hasattr(rule, "transform_kwarg"): + kwargs[kw] = rule.transform_kwarg( + context, kwargs.get(kw, None) + ).unwrap() + return kwargs + + def find_server_identity( + self, kwargs: dict[str, Any], client: Any = None + ) -> ServerIdentity: + if client is not None: + return ServerIdentity.from_api(client.api) + + apis = APIRegistry.get_all_api() + matches = set() + has_ids = False + for val in kwargs.values(): + # we mostly get the UID here because we don't want to store all those + # other objects, so we need to create a global UID obj lookup service + if isinstance( + val, UID | Asset | ActionObject | TwinObject | RemoteFunction + ): + has_ids = True + id = user_code_arg2id(val) + for api in apis: + # TODO: Beach Fix + # here be dragons, we need to refactor this since the existance + # depends on the type and service + # also the whole ServerIdentity needs to be removed + check_endpoints = [ + api.services.action.exists, + api.services.api.exists, + ] + for check_endpoint in check_endpoints: + result = check_endpoint(id) + if result: + break # stop looking + if result: + server_identity = ServerIdentity.from_api(api) + matches.add(server_identity) + + if len(matches) == 0: + if not has_ids: + if len(apis) == 1: + return ServerIdentity.from_api(api) + else: + raise ValueError( + "Multiple Server Identities, please only login to one client (for this policy) and try again" + ) + else: + raise ValueError("No Server Identities") + if len(matches) > 1: + # TODO: Beach Fix + raise ValueError("Multiple Server Identities") + # we need to fix this as its possible we could + # grab the wrong API and call a different user context in jupyter testing + pass # just grab the first one + return matches.pop() + + def filter_kwargs( # type: ignore[override] + self, + kwargs: dict[str, UID], + context: AuthedServiceContext, + ) -> dict[Any, Any]: + try: + res = {} + for _, rules in self.kwarg_rules.items(): + for kw, rule in rules.items(): + if rule.requires_input: + passed_id = kwargs[kw] + actionobject: ActionObject = retrieve_item_from_db( + passed_id, context + ) + rule_check_args = (actionobject,) + else: + rule_check_args = () # type: ignore + # TODO + actionobject = rule.value + if not rule.is_met(context, *rule_check_args): + raise SyftException(public_message=f"{rule} is not met") + else: + res[kw] = actionobject + except Exception as e: + raise SyftException.from_exception( + e, public_message="failed to filter kwargs" + ) + return res + + def is_valid( # type: ignore[override] + self, + context: AuthedServiceContext, + usr_input_kwargs: dict, + ) -> bool: + filtered_input_kwargs = self.filter_kwargs( + kwargs=usr_input_kwargs, + context=context, + ) + expected_input_kwargs = set() + + for _inp_kwargs in self.inputs.values(): + for k in _inp_kwargs.keys(): + if k not in usr_input_kwargs and k not in filtered_input_kwargs: + raise SyftException( + public_message=f"Function missing required keyword argument: '{k}'" + ) + expected_input_kwargs.update(_inp_kwargs.keys()) + + permitted_input_kwargs = list(filtered_input_kwargs.keys()) + not_approved_kwargs = set(expected_input_kwargs) - set(permitted_input_kwargs) + + if len(not_approved_kwargs) > 0: + raise SyftException( + public_message=f"Input arguments: {not_approved_kwargs} to the function are not approved yet." + ) + + return True + + +@as_result(SyftException, NotFoundException, StashException) def retrieve_from_db( - code_item_id: UID, allowed_inputs: dict[str, UID], context: AuthedServiceContext -) -> Result[dict[str, Any], str]: + allowed_inputs: dict[str, UID], context: AuthedServiceContext +) -> dict[str, Any]: # relative from ...service.action.action_object import TwinMode - context.node = cast(AbstractNode, context.node) + if TYPE_CHECKING: + # relative + pass - action_service = context.node.get_service("actionservice") code_inputs = {} - # When we are retrieving the code from the database, we need to use the node's + # When we are retrieving the code from the database, we need to use the server's # verify key as the credentials. This is because when we approve the code, we # we allow the private data to be used only for this specific code. # but we are not modifying the permissions of the private data - root_context = AuthedServiceContext( - node=context.node, credentials=context.node.verify_key + server=context.server, credentials=context.server.verify_key ) - if context.node.node_type == NodeType.DOMAIN: - for var_name, arg_id in allowed_inputs.items(): - kwarg_value = action_service._get( - context=root_context, - uid=arg_id, - twin_mode=TwinMode.NONE, - has_permission=True, - ) - if kwarg_value.is_err(): - return Err(kwarg_value.err()) - code_inputs[var_name] = kwarg_value.ok() - elif context.node.node_type == NodeType.ENCLAVE: - dict_object = action_service.get(context=root_context, uid=code_item_id) - if dict_object.is_err(): - return Err(dict_object.err()) - for value in dict_object.ok().syft_action_data.values(): - code_inputs.update(value) - - else: - raise Exception( - f"Invalid Node Type for Code Submission:{context.node.node_type}" + if context.server.server_type != ServerType.DATASITE: + raise SyftException( + public_message=f"Invalid server type for code submission: {context.server.server_type}" ) - return Ok(code_inputs) + for var_name, arg_id in allowed_inputs.items(): + code_inputs[var_name] = context.server.services.action._get( + context=root_context, + uid=arg_id, + twin_mode=TwinMode.NONE, + has_permission=True, + ).unwrap() + return code_inputs + + +@as_result(SyftException) def allowed_ids_only( - allowed_inputs: dict[NodeIdentity, Any], + allowed_inputs: dict[ServerIdentity, Any], kwargs: dict[str, Any], context: AuthedServiceContext, -) -> dict[str, UID]: - context.node = cast(AbstractNode, context.node) - if context.node.node_type == NodeType.DOMAIN: - node_identity = NodeIdentity( - node_name=context.node.name, - node_id=context.node.id, - verify_key=context.node.signing_key.verify_key, - ) - allowed_inputs = allowed_inputs.get(node_identity, {}) - elif context.node.node_type == NodeType.ENCLAVE: - base_dict = {} - for key in allowed_inputs.values(): - base_dict.update(key) - allowed_inputs = base_dict - else: - raise Exception( - f"Invalid Node Type for Code Submission:{context.node.node_type}" +) -> dict[ServerIdentity, UID]: + if context.server.server_type != ServerType.DATASITE: + raise SyftException( + public_message=f"Invalid server type for code submission: {context.server.server_type}" ) + + allowed_inputs_for_server = None + for identity, inputs in allowed_inputs.items(): + if identity.server_id == context.server.id: + allowed_inputs_for_server = inputs + break + if allowed_inputs_for_server is None: + allowed_inputs_for_server = {} + filtered_kwargs = {} - for key in allowed_inputs.keys(): + for key in allowed_inputs_for_server.keys(): if key in kwargs: value = kwargs[key] uid = value + if not isinstance(uid, UID): uid = getattr(value, "id", None) - if uid != allowed_inputs[key]: - raise Exception( - f"Input with uid: {uid} for `{key}` not in allowed inputs: {allowed_inputs}" + if uid != allowed_inputs_for_server[key]: + raise SyftException( + public_message=f"Input with uid: {uid} for `{key}` not in allowed inputs: {allowed_inputs}" ) + filtered_kwargs[key] = value + return filtered_kwargs @@ -311,66 +668,58 @@ def allowed_ids_only( class ExactMatch(InputPolicy): # version __canonical_name__ = "ExactMatch" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - def filter_kwargs( + # TODO: Improve exception handling here + def filter_kwargs( # type: ignore self, kwargs: dict[Any, Any], context: AuthedServiceContext, - code_item_id: UID, - ) -> Result[dict[Any, Any], str]: - try: - allowed_inputs = allowed_ids_only( - allowed_inputs=self.inputs, kwargs=kwargs, context=context - ) + ) -> dict[Any, Any]: + allowed_inputs = allowed_ids_only( + allowed_inputs=self.inputs, kwargs=kwargs, context=context + ).unwrap() - results = retrieve_from_db( - code_item_id=code_item_id, - allowed_inputs=allowed_inputs, - context=context, - ) - except Exception as e: - return Err(str(e)) - return results + return retrieve_from_db( + allowed_inputs=allowed_inputs, + context=context, + ).unwrap() - def _is_valid( + def is_valid( # type: ignore self, context: AuthedServiceContext, usr_input_kwargs: dict, - code_item_id: UID, - ) -> Result[bool, str]: + ) -> bool: filtered_input_kwargs = self.filter_kwargs( kwargs=usr_input_kwargs, context=context, - code_item_id=code_item_id, ) - if filtered_input_kwargs.is_err(): - return filtered_input_kwargs - - filtered_input_kwargs = filtered_input_kwargs.ok() - expected_input_kwargs = set() for _inp_kwargs in self.inputs.values(): for k in _inp_kwargs.keys(): if k not in usr_input_kwargs: - return Err(f"Function missing required keyword argument: '{k}'") + raise SyftException( + public_message=f"Function missing required keyword argument: '{k}'" + ) expected_input_kwargs.update(_inp_kwargs.keys()) permitted_input_kwargs = list(filtered_input_kwargs.keys()) + not_approved_kwargs = set(expected_input_kwargs) - set(permitted_input_kwargs) if len(not_approved_kwargs) > 0: - return Err( - f"Input arguments: {not_approved_kwargs} to the function are not approved yet." + raise SyftException( + public_message=f"Function arguments: {not_approved_kwargs} are not approved yet." ) - return Ok(True) + + return True @serializable() class OutputHistory(SyftObject): # version __canonical_name__ = "OutputHistory" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 output_time: DateTime outputs: list[UID] | dict[str, UID] | None = None @@ -380,16 +729,17 @@ class OutputHistory(SyftObject): class OutputPolicy(Policy): # version __canonical_name__ = "OutputPolicy" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 output_kwargs: list[str] = [] - node_uid: UID | None = None + server_uid: UID | None = None output_readers: list[SyftVerifyKey] = [] - def apply_output( + def apply_to_output( self, - context: NodeServiceContext, + context: ServerServiceContext, outputs: Any, + update_policy: bool = True, ) -> Any: # output_uids: Union[Dict[str, Any], list] = filter_only_uids(outputs) # if isinstance(output_uids, UID): @@ -403,67 +753,51 @@ def apply_output( return outputs - def is_valid(self, context: AuthedServiceContext) -> SyftSuccess | SyftError: # type: ignore + def is_valid(self, context: AuthedServiceContext | None) -> bool: # type: ignore raise NotImplementedError() @serializable() class OutputPolicyExecuteCount(OutputPolicy): __canonical_name__ = "OutputPolicyExecuteCount" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 limit: int - @property - def count(self) -> SyftError | int: - api = APIRegistry.api_for(self.syft_node_location, self.syft_client_verify_key) - if api is None: - raise ValueError( - f"api is None. You must login to {self.syft_node_location}" - ) - output_history = api.services.output.get_by_output_policy_id(self.id) + # def is_valid(self, context: AuthedServiceContext) -> bool: + # return self.count().unwrap() < self.limit - if isinstance(output_history, SyftError): - return output_history - return len(output_history) + # @as_result(SyftException) + # def count(self) -> int: + # api = self.get_api() + # output_history = api.services.output.get_by_output_policy_id(self.id) + # return len(output_history) - @property - def is_valid(self) -> SyftSuccess | SyftError: # type: ignore - execution_count = self.count - is_valid = execution_count < self.limit - if is_valid: - return SyftSuccess( - message=f"Policy is still valid. count: {execution_count} < limit: {self.limit}" - ) - return SyftError( - message=f"Policy is no longer valid. count: {execution_count} >= limit: {self.limit}" - ) + def count(self, context: AuthedServiceContext | None = None) -> int: + # client side + if context is None: + output_service = self.get_api().services.output + output_history = output_service.get_by_output_policy_id(self.id) + else: + # server side + output_history = context.server.services.output.get_by_output_policy_id( + context, self.id + ) # raises - def _is_valid(self, context: AuthedServiceContext) -> SyftSuccess | SyftError: - context.node = cast(AbstractNode, context.node) - output_service = context.node.get_service("outputservice") - output_history = output_service.get_by_output_policy_id(context, self.id) - if isinstance(output_history, SyftError): - return output_history - execution_count = len(output_history) - - is_valid = execution_count < self.limit - if is_valid: - return SyftSuccess( - message=f"Policy is still valid. count: {execution_count} < limit: {self.limit}" - ) - return SyftError( - message=f"Policy is no longer valid. count: {execution_count} >= limit: {self.limit}" - ) + return len(output_history) + + def is_valid(self, context: AuthedServiceContext | None = None) -> bool: # type: ignore + return self.count(context) < self.limit def public_state(self) -> dict[str, int]: - return {"limit": self.limit, "count": self.count} + # TODO: this count is not great, fix it. + return {"limit": self.limit, "count": self.count().unwrap()} @serializable() class OutputPolicyExecuteOnce(OutputPolicyExecuteCount): __canonical_name__ = "OutputPolicyExecuteOnce" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 limit: int = 1 @@ -471,7 +805,7 @@ class OutputPolicyExecuteOnce(OutputPolicyExecuteCount): SingleExecutionExactOutput = OutputPolicyExecuteOnce -@serializable() +@serializable(canonical_name="CustomPolicy", version=1) class CustomPolicy(type): # capture the init_kwargs transparently def __call__(cls, *args: Any, **kwargs: Any) -> None: @@ -480,15 +814,16 @@ def __call__(cls, *args: Any, **kwargs: Any) -> None: return obj -recursive_serde_register_type(CustomPolicy) +recursive_serde_register_type(CustomPolicy, canonical_name="CustomPolicy", version=1) -@serializable() +@serializable(canonical_name="CustomOutputPolicy", version=1) class CustomOutputPolicy(metaclass=CustomPolicy): - def apply_output( + def apply_to_output( self, - context: NodeServiceContext, + context: ServerServiceContext, outputs: Any, + update_policy: bool = True, ) -> Any | None: return outputs @@ -508,6 +843,7 @@ class UserInputPolicy(InputPolicy): pass +@serializable() class EmpyInputPolicy(InputPolicy): __canonical_name__ = "EmptyInputPolicy" pass @@ -520,10 +856,11 @@ class CustomInputPolicy(metaclass=CustomPolicy): @serializable() class UserPolicy(Policy): __canonical_name__: str = "UserPolicy" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 + has_safe_serde: ClassVar[bool] = False id: UID - node_uid: UID | None = None + server_uid: UID | None = None user_verify_key: SyftVerifyKey raw_code: str parsed_code: str @@ -542,10 +879,11 @@ def byte_code(self) -> PyCodeObject | None: def policy_code(self) -> str: return self.raw_code - def apply_output( + def apply_to_output( self, - context: NodeServiceContext, + context: ServerServiceContext, outputs: Any, + update_policy: bool = True, ) -> Any | None: return outputs @@ -590,7 +928,7 @@ def get_code_from_class(policy: type[CustomPolicy]) -> str: @serializable() class SubmitUserPolicy(Policy): __canonical_name__ = "SubmitUserPolicy" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore[assignment] code: str @@ -649,17 +987,19 @@ def process_class_code(raw_code: str, class_name: str) -> str: v = GlobalsVisitor() v.visit(tree) if len(tree.body) != 1 or not isinstance(tree.body[0], ast.ClassDef): - raise Exception( - "Class code should only contain the Class definition for your policy" + raise SyftException( + public_message="Class code should only contain the Class definition for your policy" ) old_class = tree.body[0] if len(old_class.bases) != 1 or old_class.bases[0].attr not in [ CustomInputPolicy.__name__, CustomOutputPolicy.__name__, ]: - raise Exception( - f"Class code should either implement {CustomInputPolicy.__name__} " - f"or {CustomOutputPolicy.__name__}" + raise SyftException( + public_message=( + f"Class code should either implement {CustomInputPolicy.__name__}" + f" or {CustomOutputPolicy.__name__}" + ) ) # TODO: changes the bases @@ -675,8 +1015,9 @@ def process_class_code(raw_code: str, class_name: str) -> str: new_class = tree.body[0] # TODO add this manually for stmt in new_class.body: - if isinstance(stmt, ast.FunctionDef) and stmt.name == "__init__": - stmt.name = "__user_init__" + if isinstance(stmt, ast.FunctionDef): + if stmt.name == "__init__": + stmt.name = "__user_init__" # change the module that the code will reference # this is required for the @serializable to mount it in the right path for serde @@ -708,14 +1049,16 @@ def process_class_code(raw_code: str, class_name: str) -> str: "Tuple", "Type", ] - for typing_type in typing_types: - new_body.append( - ast.ImportFrom( - module="typing", - names=[ast.alias(name=typing_type, asname=typing_type)], - level=0, - ) + new_body.append( + ast.ImportFrom( + module="typing", + names=[ + ast.alias(name=typing_type, asname=typing_type) + for typing_type in typing_types + ], + level=0, ) + ) new_body.append(new_class) module = ast.Module(new_body, type_ignores=[]) try: @@ -728,7 +1071,7 @@ def process_class_code(raw_code: str, class_name: str) -> str: def check_class_code(context: TransformContext) -> TransformContext: # TODO: define the proper checking for this case based on the ideas from UserCode # check for no globals - # check for Policy template -> __init__, apply_output, public_state + # check for Policy template -> __init__, apply_to_output, public_state # parse init signature # check dangerous libraries, maybe compile_restricted already does that if context.output is None: @@ -797,19 +1140,34 @@ def submit_policy_code_to_user_code() -> list[Callable]: ] -def add_class_to_user_module(klass: type, unique_name: str) -> type: - klass.__module__ = "syft.user" - klass.__name__ = unique_name - # syft absolute - import syft as sy +def register_policy_class(klass: type, unique_name: str) -> None: + nonrecursive = False + _serialize = None + _deserialize = None + attributes = list(klass.model_fields.keys()) + exclude_attrs: list = [] + serde_overrides: dict = {} + hash_exclude_attrs: list = [] + cls = klass + attribute_types: list = [] + version = 1 + + serde_attributes = ( + nonrecursive, + _serialize, + _deserialize, + attributes, + exclude_attrs, + serde_overrides, + hash_exclude_attrs, + cls, + attribute_types, + version, + ) - if not hasattr(sy, "user"): - user_module = types.ModuleType("user") - sys.modules["syft"].user = user_module - user_module = sy.user - setattr(user_module, unique_name, klass) - sys.modules["syft"].user = user_module - return klass + SyftObjectRegistry.register_cls( + canonical_name=unique_name, version=version, serde_attributes=serde_attributes + ) def execute_policy_code(user_policy: UserPolicy) -> Any: @@ -823,14 +1181,18 @@ def execute_policy_code(user_policy: UserPolicy) -> Any: sys.stdout = stdout sys.stderr = stderr - class_name = f"{user_policy.unique_name}" - if class_name in user_policy.__object_version_registry__.keys(): - policy_class = user_policy.__object_version_registry__[class_name] - else: + class_name = user_policy.unique_name + + try: + policy_class = SyftObjectRegistry.get_serde_class( + class_name, version=DEFAULT_USER_POLICY_VERSION + ) + except Exception: exec(user_policy.byte_code) # nosec policy_class = eval(user_policy.unique_name) # nosec - policy_class = add_class_to_user_module(policy_class, user_policy.unique_name) + policy_class.model_rebuild() + register_policy_class(policy_class, user_policy.unique_name) sys.stdout = stdout_ sys.stderr = stderr_ @@ -838,7 +1200,10 @@ def execute_policy_code(user_policy: UserPolicy) -> Any: return policy_class except Exception as e: - print("execute_byte_code failed", e, file=stderr_) + print( + f"execute_byte_code failed because of {e}, with the following code\n\n{user_policy.parsed_code}", + file=stderr_, + ) finally: sys.stdout = stdout_ @@ -849,13 +1214,37 @@ def load_policy_code(user_policy: UserPolicy) -> Any: try: policy_class = execute_policy_code(user_policy) return policy_class - except Exception as e: - raise Exception(f"Exception loading code. {user_policy}. {e}") + except SyftException as exc: + raise SyftException.from_exception( + exc, public_message=f"Exception loading code. {user_policy}." + ) def init_policy(user_policy: UserPolicy, init_args: dict[str, Any]) -> Any: policy_class = load_policy_code(user_policy) policy_object = policy_class() + + # Unwrapp {ServerIdentity : {x: y}} -> {x: y} + # Tech debt : For input policies, we required to have ServerIdentity args beforehand, + # therefore at this stage we had to return back to the normal args. + # Maybe there's better way to do it. + if len(init_args) and isinstance(list(init_args.keys())[0], ServerIdentity): + unwrapped_init_kwargs = init_args + if len(init_args) > 1: + raise SyftException( + public_message="You shoudn't have more than one Server Identity." + ) + # Otherwise, unwrap it + init_args = init_args[list(init_args.keys())[0]] + init_args = {k: v for k, v in init_args.items() if k != "id"} + + # For input policies, this initializer wouldn't work properly: + # 1 - Passing {ServerIdentity: {kwargs:UIDs}} as keyword args doesn't work since keys must be strings + # 2 - Passing {kwargs: UIDs} in this initializer would not trigger the partition servers from the + # InputPolicy initializer. + # The cleanest way to solve it is by checking if it's an Input Policy, and then, setting it manually. policy_object.__user_init__(**init_args) + if isinstance(policy_object, InputPolicy): + policy_object.init_kwargs = unwrapped_init_kwargs return policy_object diff --git a/packages/syft/src/syft/service/policy/policy_service.py b/packages/syft/src/syft/service/policy/policy_service.py index 23b89dd478d..1e5f430d109 100644 --- a/packages/syft/src/syft/service/policy/policy_service.py +++ b/packages/syft/src/syft/service/policy/policy_service.py @@ -2,10 +2,9 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager from ...types.uid import UID from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import TYPE_TO_SERVICE @@ -15,45 +14,31 @@ from .user_policy_stash import UserPolicyStash -@serializable() +@serializable(canonical_name="PolicyService", version=1) class PolicyService(AbstractService): - store: DocumentStore stash: UserPolicyStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = UserPolicyStash(store=store) @service_method(path="policy.get_all", name="get_all") - def get_all_user_policy( - self, context: AuthedServiceContext - ) -> list[UserPolicy] | SyftError: - result = self.stash.get_all(context.credentials) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) - - @service_method(path="policy.add", name="add") + def get_all_user_policy(self, context: AuthedServiceContext) -> list[UserPolicy]: + return self.stash.get_all(context.credentials).unwrap() + + @service_method(path="policy.add", name="add", unwrap_on_success=False) def add_user_policy( self, context: AuthedServiceContext, policy_code: SubmitUserPolicy | UserPolicy, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: if isinstance(policy_code, SubmitUserPolicy): policy_code = policy_code.to(UserPolicy, context=context) - result = self.stash.set(context.credentials, policy_code) - if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="Policy Code Submitted") + result = self.stash.set(context.credentials, policy_code).unwrap() + return SyftSuccess(message="Policy Code Submitted", value=result) @service_method(path="policy.get_by_uid", name="get_by_uid") - def get_policy_by_uid( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(context.credentials, uid=uid) - if result.is_ok(): - return result.ok() - return SyftError(message=result.err()) + def get_policy_by_uid(self, context: AuthedServiceContext, uid: UID) -> UserPolicy: + return self.stash.get_by_uid(context.credentials, uid=uid).unwrap() TYPE_TO_SERVICE[UserPolicy] = UserPolicy diff --git a/packages/syft/src/syft/service/policy/user_policy_stash.py b/packages/syft/src/syft/service/policy/user_policy_stash.py index fdb568e41e9..9e3a103280b 100644 --- a/packages/syft/src/syft/service/policy/user_policy_stash.py +++ b/packages/syft/src/syft/service/policy/user_policy_stash.py @@ -1,31 +1,22 @@ # stdlib -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from .policy import PolicyUserVerifyKeyPartitionKey +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from .policy import UserPolicy -@serializable() -class UserPolicyStash(BaseUIDStoreStash): - object_type = UserPolicy - settings: PartitionSettings = PartitionSettings( - name=UserPolicy.__canonical_name__, object_type=UserPolicy - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - +@serializable(canonical_name="UserPolicySQLStash", version=1) +class UserPolicyStash(ObjectStash[UserPolicy]): + @as_result(StashException, NotFoundException) def get_all_by_user_verify_key( self, credentials: SyftVerifyKey, user_verify_key: SyftVerifyKey - ) -> Result[list[UserPolicy], str]: - qks = QueryKeys(qks=[PolicyUserVerifyKeyPartitionKey.with_obj(user_verify_key)]) - return self.query_one(credentials=credentials, qks=qks) + ) -> list[UserPolicy]: + return self.get_all( + credentials=credentials, + filters={"user_verify_key": user_verify_key}, + ).unwrap() diff --git a/packages/syft/src/syft/service/project/project.py b/packages/syft/src/syft/service/project/project.py index aa8048f788e..5ed21f5007d 100644 --- a/packages/syft/src/syft/service/project/project.py +++ b/packages/syft/src/syft/service/project/project.py @@ -17,56 +17,55 @@ from typing_extensions import Self # relative -from ...client.api import NodeIdentity +from ...client.api import ServerIdentity from ...client.client import SyftClient from ...client.client import SyftClientSessionCache -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable from ...serde.serialize import _serialize -from ...service.metadata.node_metadata import NodeMetadataV3 +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...service.metadata.server_metadata import ServerMetadata from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime +from ...types.errors import SyftException from ...types.identity import Identity from ...types.identity import UserIdentity -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject from ...types.syft_object import short_qual_name from ...types.transforms import TransformContext from ...types.transforms import rename from ...types.transforms import transform from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE +from ...util.decorators import deprecated from ...util.markdown import markdown_as_class_with_fields from ...util.util import full_name_with_qualname from ..code.user_code import SubmitUserCode -from ..network.network_service import NodePeer -from ..network.routes import NodeRoute +from ..network.network_service import ServerPeer +from ..network.routes import ServerRoute from ..network.routes import connection_to_route from ..request.request import Request from ..request.request import RequestStatus -from ..response import SyftError -from ..response import SyftException from ..response import SyftInfo from ..response import SyftNotReady from ..response import SyftSuccess from ..user.user import UserView -@serializable() +@serializable(canonical_name="EventAlreadyAddedException", version=1) class EventAlreadyAddedException(SyftException): pass -@transform(NodeMetadataV3, NodeIdentity) -def metadata_to_node_identity() -> list[Callable]: - return [rename("id", "node_id"), rename("name", "node_name")] +@transform(ServerMetadata, ServerIdentity) +def metadata_to_server_identity() -> list[Callable]: + return [rename("id", "server_id"), rename("name", "server_name")] +@serializable() class ProjectEvent(SyftObject): __canonical_name__ = "ProjectEvent" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __hash_exclude_attrs__ = ["event_hash", "signature"] @@ -109,26 +108,28 @@ def rebase(self, project: Project) -> Self: return self @property - def valid(self) -> SyftSuccess | SyftError: + def valid(self) -> SyftSuccess: if self.signature is None: - return SyftError(message="Sign event first") + raise SyftException(public_message="Sign event first") try: # Recompute hash event_hash_bytes, current_hash = create_project_event_hash(self) if current_hash != self.event_hash: - raise Exception( - f"Event hash {current_hash} does not match {self.event_hash}" + raise SyftException( + public_message=f"Event hash {current_hash} does not match {self.event_hash}" ) if self.creator_verify_key is None: - return SyftError(message=f"{self}'s creator_verify_key is None") + raise SyftException( + public_message=f"{self}'s creator_verify_key is None" + ) self.creator_verify_key.verify_key.verify(event_hash_bytes, self.signature) return SyftSuccess(message="Event signature is valid") except Exception as e: - return SyftError(message=f"Failed to validate message. {e}") + raise SyftException(public_message=f"Failed to validate message. {e}") def valid_descendant( self, project: Project, prev_event: Self | None - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: valid = self.valid if not valid: return valid @@ -143,14 +144,14 @@ def valid_descendant( prev_seq_no = 0 if self.prev_event_uid != prev_event_id: - return SyftError( - message=f"{self} prev_event_uid: {self.prev_event_uid} " + raise SyftException( + public_message=f"{self} prev_event_uid: {self.prev_event_uid} " "does not match {prev_event_id}" ) if self.prev_event_hash != prev_event_hash: - return SyftError( - message=f"{self} prev_event_hash: {self.prev_event_hash} " + raise SyftException( + public_message=f"{self} prev_event_hash: {self.prev_event_hash} " "does not match {prev_event_hash}" ) @@ -159,14 +160,14 @@ def valid_descendant( and (self.seq_no is not None) and (self.seq_no != prev_seq_no + 1) ): - return SyftError( - message=f"{self} seq_no: {self.seq_no} " + raise SyftException( + public_message=f"{self} seq_no: {self.seq_no} " "is not subsequent to {prev_seq_no}" ) if self.project_id != project.id: - return SyftError( - message=f"{self} project_id: {self.project_id} " + raise SyftException( + public_message=f"{self} project_id: {self.project_id} " "does not match {project.id}" ) @@ -176,8 +177,9 @@ def valid_descendant( parent_event.allowed_sub_types is not None and type(self) not in parent_event.allowed_sub_types ): - return SyftError( - message=f"{self} is not a valid subevent" f"for {parent_event}" + raise SyftException( + public_message=f"{self} is not a valid subevent" + f"for {parent_event}" ) return SyftSuccess(message=f"{self} is valid descendant of {prev_event}") @@ -195,7 +197,7 @@ def sign(self, signing_key: SyftSigningKey) -> None: signed_obj = signing_key.signing_key.sign(event_hash_bytes) self.signature = signed_obj._signature - def publish(self, project: Project) -> SyftSuccess | SyftError: + def publish(self, project: Project) -> SyftSuccess: try: result = project.add_event(self) return result @@ -205,12 +207,12 @@ def publish(self, project: Project) -> SyftSuccess | SyftError: class ProjectEventAddObject(ProjectEvent): __canonical_name__ = "ProjectEventAddObject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 class ProjectEventAddLink(ProjectEvent): __canonical_name__ = "ProjectEventAddLink" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 # Project Sub Event are the events which tend to describe the main events @@ -224,7 +226,7 @@ class ProjectEventAddLink(ProjectEvent): # such that only allowed events could be the sub type of the main event class ProjectSubEvent(ProjectEvent): __canonical_name__ = "ProjectSubEvent" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 parent_event_id: UID @@ -232,7 +234,7 @@ class ProjectSubEvent(ProjectEvent): @serializable() class ProjectThreadMessage(ProjectSubEvent): __canonical_name__ = "ProjectThreadMessage" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 message: str @@ -240,7 +242,7 @@ class ProjectThreadMessage(ProjectSubEvent): @serializable() class ProjectMessage(ProjectEventAddObject): __canonical_name__ = "ProjectMessage" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 message: str allowed_sub_types: list[type] = [ProjectThreadMessage] @@ -252,7 +254,7 @@ def reply(self, message: str) -> ProjectMessage: @serializable() class ProjectRequestResponse(ProjectSubEvent): __canonical_name__ = "ProjectRequestResponse" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 response: bool @@ -260,7 +262,7 @@ class ProjectRequestResponse(ProjectSubEvent): @serializable() class ProjectRequest(ProjectEventAddObject): __canonical_name__ = "ProjectRequest" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 linked_request: LinkedObject allowed_sub_types: list[type] = [ProjectRequestResponse] @@ -269,7 +271,8 @@ class ProjectRequest(ProjectEventAddObject): @classmethod def _validate_linked_request(cls, v: Any) -> LinkedObject: if isinstance(v, Request): - linked_request = LinkedObject.from_obj(v, node_uid=v.node_uid) + linked_request = LinkedObject.from_obj(v, server_uid=v.server_uid) + linked_request.syft_server_location = v.syft_server_location return linked_request elif isinstance(v, LinkedObject): return v @@ -298,19 +301,17 @@ def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: return markdown_as_class_with_fields(self, repr_dict) def approve(self) -> ProjectRequestResponse: - result = self.request.approve() - if isinstance(result, SyftError): - return result + self.request.approve().unwrap() return ProjectRequestResponse(response=True, parent_event_id=self.id) def accept_by_depositing_result( self, result: Any, force: bool = False - ) -> SyftError | SyftSuccess: + ) -> SyftSuccess: return self.request.accept_by_depositing_result(result=result, force=force) # TODO: To add deny requests, when deny functionality is added - def status(self, project: Project) -> SyftInfo | SyftError | None: + def status(self, project: Project) -> SyftInfo | None: """Returns the status of the request. Args: @@ -327,8 +328,8 @@ def status(self, project: Project) -> SyftInfo | SyftError | None: "No one has responded to the request yet. Kindly recheck later 🙂" ) elif len(responses) > 1: - return SyftError( - message="The Request Contains more than one Response" + raise SyftException( + public_message="The Request Contains more than one Response" "which is currently not possible" "The request should contain only one response" "Kindly re-submit a new request" @@ -336,8 +337,8 @@ def status(self, project: Project) -> SyftInfo | SyftError | None: ) response = responses[0] if not isinstance(response, ProjectRequestResponse): - return SyftError( # type: ignore[unreachable] - message=f"Response : {type(response)} is not of type ProjectRequestResponse" + raise SyftException( # type: ignore[unreachable] + public_message=f"Response : {type(response)} is not of type ProjectRequestResponse" ) print("Request Status : ", "Approved" if response.response else "Denied") @@ -531,7 +532,7 @@ def poll_answer_wizard(poll: ProjectMultipleChoicePoll) -> int: @serializable() class AnswerProjectPoll(ProjectSubEvent): __canonical_name__ = "AnswerProjectPoll" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 answer: int @@ -539,7 +540,7 @@ class AnswerProjectPoll(ProjectSubEvent): @serializable() class ProjectMultipleChoicePoll(ProjectEventAddObject): __canonical_name__ = "ProjectPoll" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 question: str choices: list[str] @@ -557,7 +558,7 @@ def answer(self, answer: int) -> ProjectMessage: def status( self, project: Project, pretty_print: bool = True - ) -> dict | SyftError | SyftInfo | None: + ) -> dict | SyftInfo | None: """Returns the status of the poll Args: @@ -576,8 +577,8 @@ def status( respondents = {} for poll_answer in poll_answers[::-1]: if not isinstance(poll_answer, AnswerProjectPoll): - return SyftError( # type: ignore[unreachable] - message=f"Poll answer: {type(poll_answer)} is not of type AnswerProjectPoll" + raise SyftException( # type: ignore[unreachable] + public_message=f"Poll answer: {type(poll_answer)} is not of type AnswerProjectPoll" ) creator_verify_key = poll_answer.creator_verify_key @@ -600,7 +601,7 @@ class ConsensusModel: pass -@serializable() +@serializable(canonical_name="DemocraticConsensusModel", version=1) class DemocraticConsensusModel(ConsensusModel): threshold: float = 50 @@ -618,15 +619,15 @@ def add_code_request_to_project( code: SubmitUserCode, client: SyftClient | Any, reason: str | None = None, -) -> SyftError | SyftSuccess: +) -> SyftSuccess: # TODO: fix the mypy issue if not isinstance(code, SubmitUserCode): - return SyftError( # type: ignore[unreachable] - message=f"Currently we are only support creating requests for SubmitUserCode: {type(code)}" + raise SyftException( # type: ignore[unreachable] + public_message=f"Currently we are only support creating requests for SubmitUserCode: {type(code)}" ) if not isinstance(client, SyftClient): - return SyftError(message="Client should be a valid SyftClient") + raise SyftException(public_message="Client should be a valid SyftClient") if reason is None: reason = f"Code Request for Project: {project.name} has been submitted by {project.created_by}" @@ -634,18 +635,16 @@ def add_code_request_to_project( submitted_req = client.api.services.code.request_code_execution( code=code, reason=reason ) - if isinstance(submitted_req, SyftError): - return submitted_req - - request_event = ProjectRequest(linked_request=submitted_req) + request_event = ProjectRequest( + linked_request=submitted_req, + syft_client_verify_key=client.verify_key, + syft_server_location=client.id, + ) if isinstance(project, ProjectSubmit) and project.bootstrap_events is not None: project.bootstrap_events.append(request_event) else: - result = project.add_event(request_event) - if isinstance(result, SyftError): - return result - + project.add_event(request_event) return SyftSuccess( message=f"Code request for '{code.func_name}' successfully added to '{project.name}' Project. " f"To see code requests by a client, run `[your_client].code`" @@ -655,25 +654,25 @@ def add_code_request_to_project( @serializable() class Project(SyftObject): __canonical_name__ = "Project" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __repr_attrs__ = ["name", "description", "created_by"] __attr_unique__ = ["name"] - # TODO: re-add users, members, leader_node_peer + # TODO: re-add users, members, leader_server_peer __hash_exclude_attrs__ = [ "user_signing_key", "start_hash", "users", "members", - "leader_node_peer", + "leader_server_peer", "event_id_hashmap", ] id: UID | None = None # type: ignore[assignment] name: str description: str | None = None - members: list[NodeIdentity] + members: list[ServerIdentity] users: list[UserIdentity] = [] username: str | None = None created_by: str @@ -686,8 +685,8 @@ class Project(SyftObject): event_id_hashmap: dict[UID, ProjectEvent] = {} # Project sync - state_sync_leader: NodeIdentity - leader_node_peer: NodePeer | None = None + state_sync_leader: ServerIdentity + leader_server_peer: ServerPeer | None = None # Unused consensus_model: ConsensusModel @@ -705,12 +704,7 @@ def _coll_repr_(self) -> dict: def _repr_html_(self) -> Any: return ( - f""" - - """ - + "
    " + "
    " + f"

    {self.name}

    " + f"

    {self.description}

    " + f"

    Created by: {self.username} ({self.created_by})

    " @@ -719,7 +713,7 @@ def _repr_html_(self) -> Any: + "
    " ) - def _broadcast_event(self, project_event: ProjectEvent) -> SyftSuccess | SyftError: + def _broadcast_event(self, project_event: ProjectEvent) -> SyftSuccess: leader_client = self.get_leader_client(self.user_signing_key) return leader_client.api.services.project.broadcast_event(project_event) @@ -736,35 +730,40 @@ def key_in_project(self, verify_key: SyftVerifyKey) -> bool: def get_identity_from_key( self, verify_key: SyftVerifyKey - ) -> list[NodeIdentity | UserIdentity]: + ) -> list[ServerIdentity | UserIdentity]: identities: list[Identity] = self.get_all_identities() for identity in identities: if identity.verify_key == verify_key: return identity - return SyftError(message=f"Member with verify key: {verify_key} not found") + raise SyftException( + public_message=f"Member with verify key: {verify_key} not found" + ) def get_leader_client(self, signing_key: SyftSigningKey) -> SyftClient: - if self.leader_node_peer is None: - raise Exception("Leader node peer is not set") + if self.leader_server_peer is None: + raise Exception("Leader server peer is not set") if signing_key is None: raise Exception("Signing key is required to create leader client") verify_key = signing_key.verify_key - leader_client = SyftClientSessionCache.get_client_by_uid_and_verify_key( - verify_key=verify_key, node_uid=self.leader_node_peer.id + cached_leader_client = SyftClientSessionCache.get_client_by_uid_and_verify_key( + verify_key=verify_key, server_uid=self.leader_server_peer.id ) - if leader_client is None: - leader_client = self.leader_node_peer.client_with_key(signing_key) + if cached_leader_client is None: + leader_client: SyftClient = self.leader_server_peer.client_with_key( + signing_key + ).unwrap() SyftClientSessionCache.add_client_by_uid_and_verify_key( verify_key=verify_key, - node_uid=leader_client.id, + server_uid=leader_client.id, syft_client=leader_client, ) + return leader_client - return leader_client + return cached_leader_client def has_permission(self, verify_key: SyftVerifyKey) -> bool: # Currently the permission function, initially checks only if the @@ -775,15 +774,13 @@ def has_permission(self, verify_key: SyftVerifyKey) -> bool: def _append_event( self, event: ProjectEvent, credentials: SyftSigningKey - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: prev_event = self.events[-1] if self.events else None valid = event.valid_descendant(self, prev_event) if not valid: return valid result = self._broadcast_event(event) - if isinstance(result, SyftError): - return result if isinstance(result, SyftNotReady): # If the client if out of sync, sync project updates from leader self.sync() @@ -792,7 +789,7 @@ def _append_event( # Retrying broadcasting the event to leader # recursively call _append_event as due to network latency the event could reach late # and other events would be being streamed to the leader - # This scenario could lead to starvation of node trying to sync with the leader + # This scenario could lead to starvation of server trying to sync with the leader # This would be solved in our future leaderless approach return self._append_event(event=event, credentials=credentials) @@ -809,7 +806,7 @@ def add_event( self, event: ProjectEvent, credentials: SyftSigningKey | SyftClient | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: if event.id in self.event_ids: raise EventAlreadyAddedException(f"Event already added. {event}") @@ -819,7 +816,9 @@ def add_event( credentials = credentials.credentials if not isinstance(credentials, SyftSigningKey): - raise Exception(f"Adding an event requires a signing key. {credentials}") + raise SyftException( + public=f"Adding an event requires a signing key. {credentials}" + ) event.creator_verify_key = credentials.verify_key event._pre_add_update(self) @@ -829,7 +828,7 @@ def add_event( result = self._append_event(event, credentials=credentials) return result - def validate_events(self, debug: bool = False) -> SyftSuccess | SyftError: + def validate_events(self, debug: bool = False) -> SyftSuccess: current_hash = self.start_hash def valid_str(current_hash: int) -> str: @@ -850,9 +849,6 @@ def valid_str(current_hash: int) -> str: f"{icon} {type(event).__name__}: {event.id} " f"after {type(prev_event).__name__}: {prev_event.id}" ) - - if not result: - return result last_event = event return SyftSuccess(message=valid_str(current_hash)) @@ -921,16 +917,9 @@ def create_code_request( obj: SubmitUserCode, client: SyftClient | None = None, reason: str | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: if client is None: - leader_client = self.get_leader_client(self.user_signing_key) - res = add_code_request_to_project( - project=self, - code=obj, - client=leader_client, - reason=reason, - ) - return res + client = self.get_leader_client(self.user_signing_key) return add_code_request_to_project( project=self, code=obj, @@ -939,11 +928,11 @@ def create_code_request( ) def get_messages(self) -> list[ProjectMessage | ProjectThreadMessage]: - messages = [] - for event in self.events: - if isinstance(event, ProjectMessage | ProjectThreadMessage): - messages.append(event) - return messages + return [ + event + for event in self.events + if isinstance(event, (ProjectMessage | ProjectThreadMessage)) + ] @property def messages(self) -> str: @@ -965,21 +954,19 @@ def messages(self) -> str: def get_last_seq_no(self) -> int: return len(self.events) - def send_message(self, message: str) -> SyftSuccess | SyftError: + def send_message(self, message: str) -> SyftSuccess: message_event = ProjectMessage(message=message) - result = self.add_event(message_event) - if isinstance(result, SyftSuccess): - return SyftSuccess(message="Message sent successfully") - return result + self.add_event(message_event) + return SyftSuccess(message="Message sent successfully") def reply_message( self, reply: str, message: UID | ProjectMessage | ProjectThreadMessage, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: if isinstance(message, UID): if message not in self.event_ids: - return SyftError(message=f"Message id: {message} not found") + raise SyftException(public_message=f"Message id: {message} not found") message = self.event_id_hashmap[message] reply_event: ProjectMessage | ProjectThreadMessage @@ -990,21 +977,19 @@ def reply_message( message=reply, parent_event_id=message.parent_event_id ) else: - return SyftError( - message=f"You can only reply to a message: {type(message)}" + raise SyftException( + public_message=f"You can only reply to a message: {type(message)}" "Kindly re-check the msg" ) - result = self.add_event(reply_event) - if isinstance(result, SyftSuccess): - return SyftSuccess(message="Reply sent successfully") - return result + self.add_event(reply_event) + return SyftSuccess(message="Reply sent successfully") def create_poll( self, question: str | None = None, choices: list[str] | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: if ( question is None or choices is None @@ -1014,76 +999,63 @@ def create_poll( question, choices = poll_creation_wizard() poll_event = ProjectMultipleChoicePoll(question=question, choices=choices) - result = self.add_event(poll_event) - if isinstance(result, SyftSuccess): - return SyftSuccess(message="Poll created successfully") - return result + self.add_event(poll_event) + return SyftSuccess(message="Poll created successfully") def answer_poll( self, poll: UID | ProjectMultipleChoicePoll, answer: int | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: if isinstance(poll, UID): if poll not in self.event_ids: - return SyftError(message=f"Poll id: {poll} not found") + raise SyftException(public_message=f"Poll id: {poll} not found") poll = self.event_id_hashmap[poll] if not isinstance(poll, ProjectMultipleChoicePoll): - return SyftError( # type: ignore[unreachable] - message=f"You can only reply to a poll: {type(poll)}" + raise SyftException( # type: ignore[unreachable] + public_message=f"You can only reply to a poll: {type(poll)}" "Kindly re-check the poll" ) if not isinstance(answer, int) or answer <= 0 or answer > len(poll.choices): answer = poll_answer_wizard(poll) - answer_event = poll.answer(answer) - - result = self.add_event(answer_event) - if isinstance(result, SyftSuccess): - return SyftSuccess(message="Poll answered successfully") - return result + self.add_event(answer_event) + return SyftSuccess(message="Poll answered successfully") def add_request( self, request: Request, - ) -> SyftSuccess | SyftError: - linked_request = LinkedObject.from_obj(request, node_uid=request.node_uid) + ) -> SyftSuccess: + linked_request = LinkedObject.from_obj(request, server_uid=request.server_uid) request_event = ProjectRequest(linked_request=linked_request) - result = self.add_event(request_event) - - if isinstance(result, SyftSuccess): - return SyftSuccess(message="Request created successfully") - return result + self.add_event(request_event) + return SyftSuccess(message="Request created successfully") # Since currently we do not have the notion of denying a request # Adding only approve request, which would later be used to approve or deny a request def approve_request( self, request: UID | ProjectRequest, - ) -> SyftError | SyftSuccess: + ) -> SyftSuccess: if isinstance(request, UID): if request not in self.event_ids: - return SyftError(message=f"Request id: {request} not found") + raise SyftException(public_message=f"Request id: {request} not found") request = self.event_id_hashmap[request] request_event: ProjectRequestResponse if isinstance(request, ProjectRequest): request_event = request.approve() - if isinstance(request_event, SyftError): - return request_event else: - return SyftError( # type: ignore[unreachable] - message=f"You can only approve a request: {type(request)}" + raise SyftException( # type: ignore[unreachable] + public_message=f"You can only approve a request: {type(request)}" "Kindly re-check the request" ) - result = self.add_event(request_event) - if isinstance(result, SyftSuccess): - return SyftSuccess(message="Request approved successfully") - return result + self.add_event(request_event) + return SyftSuccess(message="Request approved successfully") - def sync(self, verbose: bool | None = True) -> SyftSuccess | SyftError: + def sync(self, verbose: bool | None = True) -> SyftSuccess: """Sync the latest project with the state sync leader""" leader_client = self.get_leader_client(self.user_signing_key) @@ -1091,9 +1063,6 @@ def sync(self, verbose: bool | None = True) -> SyftSuccess | SyftError: unsynced_events = leader_client.api.services.project.sync( project_id=self.id, seq_no=self.get_last_seq_no() ) - if isinstance(unsynced_events, SyftError): - return unsynced_events - # UI progress bar for syncing if verbose and unsynced_events: with Progress() as progress: @@ -1140,14 +1109,14 @@ def pending_requests(self) -> int: @serializable(without=["bootstrap_events", "clients"]) class ProjectSubmit(SyftObject): __canonical_name__ = "ProjectSubmit" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __hash_exclude_attrs__ = [ "start_hash", "users", "members", "clients", - "leader_node_route", + "leader_server_route", "bootstrap_events", ] @@ -1160,7 +1129,7 @@ class ProjectSubmit(SyftObject): # Init args name: str description: str | None = None - members: list[SyftClient] | list[NodeIdentity] + members: list[SyftClient] | list[ServerIdentity] # These will be automatically populated users: list[UserIdentity] = [] @@ -1170,8 +1139,8 @@ class ProjectSubmit(SyftObject): start_hash: str = "" # Project sync args - leader_node_route: NodeRoute | None = None - state_sync_leader: NodeIdentity | None = None + leader_server_route: ServerRoute | None = None + state_sync_leader: ServerIdentity | None = None bootstrap_events: list[ProjectEvent] | None = [] # Unused at the moment @@ -1182,10 +1151,10 @@ def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) # Preserve member SyftClients in a private variable clients - # self.members will be List[NodeIdentity] on the node i.e. self.clients = [] + # self.members will be List[ServerIdentity] on the server i.e. self.clients = [] self.clients = self.get_syft_clients(self.members) - # If ProjectSubmit is being re-created at node side + # If ProjectSubmit is being re-created at server side if len(self.clients) == 0: return @@ -1196,22 +1165,17 @@ def __init__(self, *args: Any, **kwargs: Any): self.users = [UserIdentity.from_client(client) for client in self.clients] # Assign logged in user name as project creator - if isinstance(self.clients[0].me, UserView): - self.username = self.clients[0].me.name + if isinstance(self.clients[0].account, UserView): + self.username = self.clients[0].account.name else: self.username = "" - # Convert SyftClients to NodeIdentities - self.members = list(map(self.to_node_identity, self.members)) + # Convert SyftClients to ServerIdentities + self.members = list(map(self.to_server_identity, self.members)) def _repr_html_(self) -> Any: return ( - f""" - - """ - + "
    " + "
    " + f"

    {self.name}

    " + f"

    {self.description}

    " + f"

    Created by: {self.username} ({self.created_by})

    " @@ -1221,8 +1185,8 @@ def _repr_html_(self) -> Any: @field_validator("members", mode="before") @classmethod def verify_members( - cls, val: list[SyftClient] | list[NodeIdentity] - ) -> list[SyftClient] | list[NodeIdentity]: + cls, val: list[SyftClient] | list[ServerIdentity] + ) -> list[SyftClient] | list[ServerIdentity]: # SyftClients must be logged in by the same emails clients = cls.get_syft_clients(val) if len(clients) > 0: @@ -1235,25 +1199,25 @@ def verify_members( @staticmethod def get_syft_clients( - vals: list[SyftClient] | list[NodeIdentity], + vals: list[SyftClient] | list[ServerIdentity], ) -> list[SyftClient]: return [client for client in vals if isinstance(client, SyftClient)] @staticmethod - def to_node_identity(val: SyftClient | NodeIdentity) -> NodeIdentity: - if isinstance(val, NodeIdentity): + def to_server_identity(val: SyftClient | ServerIdentity) -> ServerIdentity: + if isinstance(val, ServerIdentity): return val elif isinstance(val, SyftClient) and val.metadata is not None: - metadata = val.metadata.to(NodeMetadataV3) - return metadata.to(NodeIdentity) + metadata = val.metadata.to(ServerMetadata) + return metadata.to(ServerIdentity) else: raise SyftException( - f"members must be SyftClient or NodeIdentity. Received: {type(val)}" + f"members must be SyftClient or ServerIdentity. Received: {type(val)}" ) def create_code_request( self, obj: SubmitUserCode, client: SyftClient, reason: str | None = None - ) -> SyftError | SyftSuccess: + ) -> SyftSuccess: return add_code_request_to_project( project=self, code=obj, @@ -1261,41 +1225,43 @@ def create_code_request( reason=reason, ) + @deprecated( + reason="Project.start has been renamed to Project.send", return_syfterror=True + ) def start(self, return_all_projects: bool = False) -> Project | list[Project]: + return self.send(return_all_projects=return_all_projects) + + def send(self, return_all_projects: bool = False) -> Project | list[Project]: # Currently we are assuming that the first member is the leader # This would be changed in our future leaderless approach leader = self.clients[0] followers = self.clients[1:] - try: - # Check for DS role across all members - self._pre_submit_checks(self.clients) + # Check for DS role across all members + self._pre_submit_checks(self.clients) - # Exchange route between leaders and followers - self._exchange_routes(leader, followers) + # Exchange route between leaders and followers + self._exchange_routes(leader, followers) - # create project for each node - projects_map = self._create_projects(self.clients) + # create project for each server + projects_map = self._create_projects(self.clients) - # bootstrap project with pending events on leader node's project - self._bootstrap_events(projects_map[leader]) + # bootstrap project with pending events on leader server's project + self._bootstrap_events(projects_map[leader.id]) # type: ignore - if return_all_projects: - return list(projects_map.values()) + if return_all_projects: + return list(projects_map.values()) - return projects_map[leader] - except SyftException as exp: - return SyftError(message=str(exp)) + return projects_map[leader.id] # type: ignore def _pre_submit_checks(self, clients: list[SyftClient]) -> bool: try: - # Check if the user can create projects for client in clients: - result = client.api.services.project.can_create_project() - if isinstance(result, SyftError): - raise SyftException(result.message) + client.api.services.project.can_create_project() except Exception: - raise SyftException("Only Data Scientists can create projects") + raise SyftException( + public_message="Only Data Scientists can create projects" + ) return True @@ -1303,24 +1269,20 @@ def _exchange_routes(self, leader: SyftClient, followers: list[SyftClient]) -> N # Since we are implementing a leader based system # To be able to optimize exchanging routes. # We require only the leader to exchange routes with all the members - # Meaning if we could guarantee, that the leader node is able to reach the members + # Meaning if we could guarantee, that the leader server is able to reach the members # the project events could be broadcasted to all the members for follower in followers: - result = leader.exchange_route(follower) - if isinstance(result, SyftError): - raise SyftException(result.message) + leader.exchange_route(follower) - self.leader_node_route = connection_to_route(leader.connection) + self.leader_server_route = connection_to_route(leader.connection) - def _create_projects(self, clients: list[SyftClient]) -> dict[SyftClient, Project]: - projects: dict[SyftClient, Project] = {} + def _create_projects(self, clients: list[SyftClient]) -> dict[UID, Project]: + projects: dict[UID, Project] = {} for client in clients: - result = client.api.services.project.create_project(project=self) - if isinstance(result, SyftError): - raise SyftException(result.message) - projects[client] = result + result = client.api.services.project.create_project(project=self).value + projects[client.id] = result # type: ignore return projects @@ -1330,9 +1292,7 @@ def _bootstrap_events(self, leader_project: Project) -> None: while len(self.bootstrap_events) > 0: event = self.bootstrap_events.pop(0) - result = leader_project.add_event(event) - if isinstance(result, SyftError): - raise SyftException(result.message) + leader_project.add_event(event) def add_members_as_owners(members: list[SyftVerifyKey]) -> set[str]: @@ -1357,7 +1317,7 @@ def check_permissions(context: TransformContext) -> TransformContext: return context if len(context.output["members"]) > 1: - # more than 1 node + # more than 1 server pass # check at least one owner if len(context.output["project_permissions"]) == 0: diff --git a/packages/syft/src/syft/service/project/project_service.py b/packages/syft/src/syft/service/project/project_service.py index bced9e64f2b..2df2fd42e1c 100644 --- a/packages/syft/src/syft/service/project/project_service.py +++ b/packages/syft/src/syft/service/project/project_service.py @@ -1,25 +1,26 @@ # stdlib -from typing import cast + +# stdlib # relative -from ...abstract_node import AbstractNode from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument from ..context import AuthedServiceContext -from ..notification.notification_service import NotificationService from ..notification.notifications import CreateNotification from ..response import SyftError -from ..response import SyftNotReady from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES from ..service import TYPE_TO_SERVICE from ..service import service_method +from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL -from ..user.user_roles import ONLY_DATA_SCIENTIST_ROLE_LEVEL from ..user.user_roles import ServiceRole from .project import Project from .project import ProjectEvent @@ -29,231 +30,234 @@ from .project_stash import ProjectStash -@instrument -@serializable() +@serializable(canonical_name="ProjectService", version=1) class ProjectService(AbstractService): - store: DocumentStore stash: ProjectStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = ProjectStash(store=store) + @as_result(SyftException) + def validate_project_leader( + self, context: AuthedServiceContext, project: Project + ) -> None: + if project.state_sync_leader.verify_key != context.server.verify_key: + error_msg = "Only the project leader can do this operation" + raise SyftException(public_message=error_msg) + + @as_result(SyftException) + def validate_user_permission_for_project( + self, context: AuthedServiceContext, project: Project + ) -> None: + if not project.has_permission(context.credentials): + error_msg = "User does not have permission to sync events" + raise SyftException(public_message=error_msg) + + @as_result(StashException) + def project_exists( + self, context: AuthedServiceContext, project: ProjectSubmit + ) -> bool: + credentials = context.server.verify_key + try: + self.stash.get_by_uid(credentials=credentials, uid=project.id).unwrap() + return True + except NotFoundException: + return False + + @as_result(SyftException) + def validate_project_event_seq( + self, project_event: ProjectEvent, project: Project + ) -> None: + if project_event.seq_no is None: + raise SyftException(public_message=f"{project_event}.seq_no is None") + if project_event.seq_no <= len(project.events) and len(project.events) > 0: + # TODO: We need a way to handle alert returns... + # e.g. here used to be: + # SyftNotReady(message="Project out of sync event") + raise SyftException(public_message="Project events are out of sync") + if project_event.seq_no > len(project.events) + 1: + raise SyftException(public_message="Project events are out of order") + + def is_project_leader( + self, context: AuthedServiceContext, project: Project + ) -> bool: + return context.credentials == project.state_sync_leader.verify_key + @service_method( path="project.can_create_project", name="can_create_project", - roles=ONLY_DATA_SCIENTIST_ROLE_LEVEL, + roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def can_create_project(self, context: AuthedServiceContext) -> bool | SyftError: - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - role = user_service.get_role_for_credentials(credentials=context.credentials) - if role == ServiceRole.DATA_SCIENTIST: + def can_create_project(self, context: AuthedServiceContext) -> bool: + role = context.server.services.user.get_role_for_credentials( + credentials=context.credentials + ).unwrap() + + if role >= ServiceRole.DATA_SCIENTIST: return True - return SyftError(message="User cannot create projects") + + raise SyftException( + public_message="You do not have permission to create projects. Contact your admin." + ) @service_method( path="project.create_project", name="create_project", - roles=ONLY_DATA_SCIENTIST_ROLE_LEVEL, + roles=DATA_SCIENTIST_ROLE_LEVEL, + unwrap_on_success=False, ) def create_project( self, context: AuthedServiceContext, project: ProjectSubmit - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """Start a Project""" - - check_role = self.can_create_project(context) - if isinstance(check_role, SyftError): - return check_role - - context.node = cast(AbstractNode, context.node) - - try: - # Check if the project with given id already exists - project_id_check = self.stash.get_by_uid( - credentials=context.node.verify_key, uid=project.id - ) - - if project_id_check.is_err(): - return SyftError(message=f"{project_id_check.err()}") - - if project_id_check.ok() is not None: - return SyftError( - message=f"Project with id: {project.id} already exists." - ) - - project_obj: Project = project.to(Project, context=context) - - # Updating the leader node route of the project object - # In case the current node, is the leader, they would input their node route - # For the followers, they would check if the leader is their node peer - # using the leader's verify_key - # If the follower do not have the leader as its peer in its routes - # They would raise as error - leader_node = project_obj.state_sync_leader - - # If the current node is a follower - # For followers the leader node route is retrieved from its peer - if leader_node.verify_key != context.node.verify_key: - network_service = context.node.get_service("networkservice") - peer = network_service.stash.get_for_verify_key( - credentials=context.node.verify_key, - verify_key=leader_node.verify_key, - ) - if peer.is_err(): - this_node_id = context.node.id.short() if context.node.id else "" - return SyftError( - message=( - f"Leader Node(id={leader_node.id.short()}) is not a " - f"peer of this Node(id={this_node_id})" - ) + self.can_create_project(context) + + project_exists = self.project_exists(context, project).unwrap() + if project_exists: + raise SyftException(public_message=f"Project {project.id} already exists") + + _project: Project = project.to(Project, context=context) + + # Updating the leader server route of the project object + # In case the current server, is the leader, they would input their server route + # For the followers, they would check if the leader is their server peer + # using the leader's verify_key + # If the follower do not have the leader as its peer in its routes + # They would raise as error + leader_server = _project.state_sync_leader + + # If the current server is a follower + # For followers the leader server route is retrieved from its peer + if leader_server.verify_key != context.server.verify_key: + # FIX: networkservice stash to new BaseStash + peer_id = context.server.id.short() if context.server.id else "" + leader_server_peer = ( + context.server.services.network.stash.get_by_verify_key( + credentials=context.server.verify_key, + verify_key=leader_server.verify_key, + ).unwrap( + public_message=( + f"Leader Server(id={leader_server.id.short()}) is not a " + f"peer of this Server(id={peer_id})" ) - leader_node_peer = peer.ok() + ) + ) + else: + # for the leader server, as it does not have route information to itself + # we rely on the data scientist to provide the route + # the route is then validated by the leader + if project.leader_server_route is not None: + leader_server_peer = project.leader_server_route.validate_with_context( + context=context + ).unwrap() else: - # for the leader node, as it does not have route information to itself - # we rely on the data scientist to provide the route - # the route is then validated by the leader - if project.leader_node_route is not None: - leader_node_peer = project.leader_node_route.validate_with_context( - context=context - ) - else: - return SyftError( - message=f"project {project}'s leader_node_route is None" - ) - - project_obj.leader_node_peer = leader_node_peer - - # This should always be the last call before flushing to DB - project_obj.start_hash = create_project_hash(project_obj)[1] - - result = self.stash.set(context.credentials, project_obj) - if result.is_err(): - return SyftError(message=str(result.err())) + raise SyftException( + public_message=f"project {project}'s leader_server_route is None" + ) - project_obj_store = result.ok() - project_obj_store = self.add_signing_key_to_project( - context, project_obj_store - ) + _project.leader_server_peer = leader_server_peer + # This should always be the last call before flushing to DB + _project.start_hash = create_project_hash(_project)[1] - return project_obj_store + stored_project: Project = self.stash.set(context.credentials, _project).unwrap() + stored_project = self.add_signing_key_to_project(context, stored_project) - except Exception as e: - print("Failed to submit Project", e) - raise e + return SyftSuccess(message="Project successfully created", value=stored_project) @service_method( path="project.add_event", name="add_event", roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, ) def add_event( self, context: AuthedServiceContext, project_event: ProjectEvent - ) -> SyftSuccess | SyftError: - """To add events to a projects""" - context.node = cast(AbstractNode, context.node) + ) -> SyftSuccess: + """Add events to a project""" # Event object should be received from the leader of the project + if not isinstance(project_event, ProjectEvent): + raise SyftException( + public_message="project_event should be a ProjectEvent object" + ) + + project = self.stash.get_by_uid( + context.server.verify_key, uid=project_event.project_id + ).unwrap() - # retrieve the project object by node verify key - project_obj = self.stash.get_by_uid( - context.node.verify_key, uid=project_event.project_id + # FIX: MERGE: Rename function below + self.validate_project_leader(context, project).unwrap( + public_message="Project Events should be passed to leader by broadcast endpoint" ) - if project_obj.is_err(): - return SyftError(message=str(project_obj.err())) - project: Project = project_obj.ok() - if project.state_sync_leader.verify_key == context.node.verify_key: - return SyftError( - message="Project Events should be passed to leader by broadcast endpoint" + if not self.is_project_leader(context, project): + raise SyftException( + public_message="Only the leader of the project can add events" ) - if context.credentials != project.state_sync_leader.verify_key: - return SyftError(message="Only the leader of the project can add events") project.events.append(project_event) project.event_id_hashmap[project_event.id] = project_event - message_result = self.check_for_project_request(project, project_event, context) - if isinstance(message_result, SyftError): - return message_result + # TODO: better name for the function should be check_and_notify or something? + self.check_for_project_request(project, project_event, context) - # updating the project object using root verify key of node - result = self.stash.update(context.node.verify_key, project) + updated_project = self.stash.update(context.server.verify_key, project).unwrap() - if result.is_err(): - return SyftError(message=str(result.err())) return SyftSuccess( - message=f"Project event {project_event.id} added successfully " + message=f"Project event {project_event.id} added successfully", + value=updated_project, ) @service_method( path="project.broadcast_event", name="broadcast_event", roles=GUEST_ROLE_LEVEL, + unwrap_on_success=False, ) def broadcast_event( self, context: AuthedServiceContext, project_event: ProjectEvent - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """To add events to a projects""" # Only the leader of the project could add events to the projects # Any Event to be added to the project should be sent to the leader of the project # The leader broadcasts the event to all the members of the project - context.node = cast(AbstractNode, context.node) - project_obj = self.stash.get_by_uid( - context.node.verify_key, uid=project_event.project_id - ) - - if project_obj.is_err(): - return SyftError(message=str(project_obj.err())) - - project = project_obj.ok() - if not project.has_permission(context.credentials): - return SyftError(message="User does not have permission to add events") - if project.state_sync_leader.verify_key != context.node.verify_key: - return SyftError( - message="Only the leader of the project can broadcast events" - ) + project = self.stash.get_by_uid( + context.server.verify_key, uid=project_event.project_id + ).unwrap() - if project_event.seq_no is None: - return SyftError(message=f"{project_event}.seq_no is None") - if project_event.seq_no <= len(project.events) and len(project.events) > 0: - return SyftNotReady(message="Project out of sync event") - if project_event.seq_no > len(project.events) + 1: - return SyftError(message="Project event out of order!") + self.validate_project_leader(context, project).unwrap( + public_message="Only the leader of the project can broadcast events" + ) + self.validate_user_permission_for_project(context, project) + self.validate_project_event_seq(project_event, project).unwrap() project.events.append(project_event) project.event_id_hashmap[project_event.id] = project_event - message_result = self.check_for_project_request(project, project_event, context) - if isinstance(message_result, SyftError): - return message_result + self.check_for_project_request(project, project_event, context) # Broadcast the event to all the members of the project - network_service = context.node.get_service("networkservice") for member in project.members: - if member.verify_key != context.node.verify_key: - # Retrieving the NodePeer Object to communicate with the node - peer = network_service.stash.get_for_verify_key( - credentials=context.node.verify_key, + if member.verify_key != context.server.verify_key: + # Retrieving the ServerPeer Object to communicate with the server + peer = context.server.services.network.stash.get_by_verify_key( + credentials=context.server.verify_key, verify_key=member.verify_key, + ).unwrap( + public_message=f"Leader server does not have peer {member.name}-{member.id.short()}" + + ". Please exchange routes with the peer." ) + remote_client = peer.client_with_context(context=context).unwrap( + public_message=f"Failed to create remote client for peer: {peer.id}." + ) + remote_client.api.services.project.add_event(project_event) - if peer.is_err(): - return SyftError( - message=f"Leader node does not have peer {member.name}-{member.id.short()}" - + " Kindly exchange routes with the peer" - ) - peer = peer.ok() - client = peer.client_with_context(context) - event_result = client.api.services.project.add_event(project_event) - if isinstance(event_result, SyftError): - return event_result - - result = self.stash.update(context.node.verify_key, project) + updated_project = self.stash.update(context.server.verify_key, project).unwrap() - if result.is_err(): - return SyftError(message=str(result.err())) - return SyftSuccess(message="Successfully Broadcasted Event") + return SyftSuccess( + message=f"Event #{project_event.seq_no} of {project.name} broadcasted successfully", + value=updated_project, + ) @service_method( path="project.sync", @@ -262,46 +266,29 @@ def broadcast_event( ) def sync( self, context: AuthedServiceContext, project_id: UID, seq_no: int - ) -> list[ProjectEvent] | SyftError: - """To fetch unsynced events from the project""" - context.node = cast(AbstractNode, context.node) - # Event object should be received from the leader of the project - - # retrieve the project object by node verify key - project_obj = self.stash.get_by_uid(context.node.verify_key, uid=project_id) - if project_obj.is_err(): - return SyftError(message=str(project_obj.err())) - - project: Project = project_obj.ok() - if project.state_sync_leader.verify_key != context.node.verify_key: - return SyftError( - message="Project Events should be synced only with the leader" + ) -> list[ProjectEvent]: + """Given a starting event seq_no, gets all following events from a project""" + if seq_no < 0: + raise SyftException( + public_message="Input seq_no should be a non negative integer" ) - if not project.has_permission(context.credentials): - return SyftError(message="User does not have permission to sync events") + # Event object should be received from the leader of the project + project = self.stash.get_by_uid( + context.server.verify_key, uid=project_id + ).unwrap() - if seq_no < 0: - return SyftError(message="Input seq_no should be a non negative integer") + self.validate_project_leader(context, project) + self.validate_user_permission_for_project(context, project) - # retrieving unsycned events based on seq_no return project.events[seq_no:] @service_method(path="project.get_all", name="get_all", roles=GUEST_ROLE_LEVEL) - def get_all(self, context: AuthedServiceContext) -> list[Project] | SyftError: - result = self.stash.get_all( - context.credentials, - ) - if result.is_err(): - return SyftError(message=str(result.err())) - - projects = result.ok() + def get_all(self, context: AuthedServiceContext) -> list[Project]: + projects: list[Project] = self.stash.get_all(context.credentials).unwrap() for idx, project in enumerate(projects): - result = self.add_signing_key_to_project(context, project) - if isinstance(result, SyftError): - return result - projects[idx] = result + projects[idx] = self.add_signing_key_to_project(context, project) return projects @@ -310,90 +297,93 @@ def get_all(self, context: AuthedServiceContext) -> list[Project] | SyftError: name="get_by_name", roles=GUEST_ROLE_LEVEL, ) - def get_by_name( - self, context: AuthedServiceContext, name: str - ) -> Project | SyftError: - result = self.stash.get_by_name(context.credentials, project_name=name) - if result.is_err(): - return SyftError(message=str(result.err())) - elif result.ok(): - project = result.ok() - return self.add_signing_key_to_project(context, project) - return SyftError(message=f'Project(name="{name}") does not exist') + def get_by_name(self, context: AuthedServiceContext, name: str) -> Project: + try: + project = self.stash.get_by_name( + context.credentials, project_name=name + ).unwrap() + except NotFoundException as exc: + raise NotFoundException.from_exception( + exc, public_message="Project '{name}' does not exist" + ) + + return self.add_signing_key_to_project(context, project) @service_method( path="project.get_by_uid", name="get_by_uid", roles=GUEST_ROLE_LEVEL, ) - def get_by_uid( - self, context: AuthedServiceContext, uid: UID - ) -> Project | SyftError: - context.node = cast(AbstractNode, context.node) - result = self.stash.get_by_uid( - credentials=context.node.verify_key, - uid=uid, - ) - if result.is_err(): - return SyftError(message=str(result.err())) - elif result.ok(): - return result.ok() - return SyftError(message=f'Project(id="{uid}") does not exist') + def get_by_uid(self, context: AuthedServiceContext, uid: UID) -> Project: + try: + credentials = context.server.verify_key + return self.stash.get_by_uid(credentials=credentials, uid=uid).unwrap() + except NotFoundException as exc: + raise NotFoundException.from_exception( + exc, public_message=f"Project {uid} not found" + ) + + as_result(StashException, NotFoundException) def add_signing_key_to_project( self, context: AuthedServiceContext, project: Project - ) -> Project | SyftError: + ) -> Project: + try: + user = context.server.services.user.stash.get_by_verify_key( + credentials=context.credentials, verify_key=context.credentials + ).unwrap() + except NotFoundException as exc: + raise NotFoundException.from_exception( + exc, public_message="User not found! Please register the user first" + ) # Automatically infuse signing key of user - # requesting get_all() or creating the project object - context.node = cast(AbstractNode, context.node) - user_service = context.node.get_service("userservice") - user = user_service.stash.get_by_verify_key( - credentials=context.credentials, verify_key=context.credentials - ) - if user.is_err(): - return SyftError(message=str(user.err())) - - user = user.ok() - if not user: - return SyftError(message="User not found! Kindly register user first") - project.user_signing_key = user.signing_key return project + # TODO: Glob Notification error here + @as_result(SyftException) def check_for_project_request( self, project: Project, project_event: ProjectEvent, context: AuthedServiceContext, - ) -> SyftSuccess | SyftError: - """To check for project request event and create a message for the root user + ) -> None: + # TODO: Should we really raise an exception if notification fails to be sent? + # Maybe logging and moving on is better? + """ + Checks if there are any ProjectEvent requests and messages the admin + in case there is one. + + This method raises an exception if the notification fails to send. Args: project (Project): Project object project_event (ProjectEvent): Project event object - context (AuthedServiceContext): Context of the node + context (AuthedServiceContext): Context of the server Returns: - Union[SyftSuccess, SyftError]: SyftSuccess if message is created else SyftError + SyftSuccess: SyftSuccess if message is created else SyftError """ - context.node = cast(AbstractNode, context.node) if ( isinstance(project_event, ProjectRequest) - and project_event.linked_request.node_uid == context.node.id + and project_event.linked_request.server_uid == context.server.id ): link = LinkedObject.with_context(project, context=context) + message = CreateNotification( - subject=f"A new request has been added to the Project: {project.name}.", + subject=f"A new request has been added to the project {project.name}", from_user_verify_key=context.credentials, - to_user_verify_key=context.node.verify_key, + to_user_verify_key=context.server.verify_key, linked_obj=link, ) - method = context.node.get_service_method(NotificationService.send) - result = method(context=context, notification=message) + + # TODO: Update noteificationservice result + result = context.server.services.notification.send( + context=context, notification=message + ) if isinstance(result, SyftError): - return result - return SyftSuccess(message="Successfully Validated Project Request") + raise SyftException(public_message=result) TYPE_TO_SERVICE[Project] = ProjectService diff --git a/packages/syft/src/syft/service/project/project_stash.py b/packages/syft/src/syft/service/project/project_stash.py index 0866db4b252..13dab37bdea 100644 --- a/packages/syft/src/syft/service/project/project_stash.py +++ b/packages/syft/src/syft/service/project/project_stash.py @@ -1,53 +1,31 @@ # stdlib # third party -from result import Result # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...store.document_store import UIDPartitionKey -from ...types.uid import UID -from ...util.telemetry import instrument -from ..request.request import Request -from ..response import SyftError +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from .project import Project -VerifyKeyPartitionKey = PartitionKey(key="user_verify_key", type_=SyftVerifyKey) -NamePartitionKey = PartitionKey(key="name", type_=str) - - -@instrument -@serializable() -class ProjectStash(BaseUIDStoreStash): - object_type = Project - settings: PartitionSettings = PartitionSettings( - name=Project.__canonical_name__, object_type=Project - ) +@serializable(canonical_name="ProjectSQLStash", version=1) +class ProjectStash(ObjectStash[Project]): + @as_result(StashException) def get_all_for_verify_key( - self, credentials: SyftVerifyKey, verify_key: VerifyKeyPartitionKey - ) -> Result[list[Request], SyftError]: - if isinstance(verify_key, str): - verify_key = SyftVerifyKey.from_string(verify_key) - qks = QueryKeys(qks=[VerifyKeyPartitionKey.with_obj(verify_key)]) - return self.query_all( + self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey + ) -> list[Project]: + return self.get_all( credentials=credentials, - qks=qks, - ) + filters={"user_verify_key": verify_key}, + ).unwrap() - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[Project | None, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - return self.query_one(credentials=credentials, qks=qks) - - def get_by_name( - self, credentials: SyftVerifyKey, project_name: str - ) -> Result[Project | None, str]: - qks = QueryKeys(qks=[NamePartitionKey.with_obj(project_name)]) - return self.query_one(credentials=credentials, qks=qks) + @as_result(StashException, NotFoundException) + def get_by_name(self, credentials: SyftVerifyKey, project_name: str) -> Project: + return self.get_one( + credentials=credentials, + filters={"name": project_name}, + ).unwrap() diff --git a/packages/syft/src/syft/service/queue/base_queue.py b/packages/syft/src/syft/service/queue/base_queue.py index 415c1b110d5..edfc1eea461 100644 --- a/packages/syft/src/syft/service/queue/base_queue.py +++ b/packages/syft/src/syft/service/queue/base_queue.py @@ -1,23 +1,26 @@ # stdlib from typing import Any from typing import ClassVar +from typing import TYPE_CHECKING # relative from ...serde.serializable import serializable from ...service.context import AuthedServiceContext -from ...store.document_store import BaseStash from ...types.uid import UID -from ..response import SyftError from ..response import SyftSuccess from ..worker.worker_stash import WorkerStash +if TYPE_CHECKING: + # relative + from .queue_stash import QueueStash -@serializable() + +@serializable(canonical_name="QueueClientConfig", version=1) class QueueClientConfig: pass -@serializable() +@serializable(canonical_name="AbstractMessageHandler", version=1) class AbstractMessageHandler: queue_name: ClassVar[str] @@ -26,7 +29,11 @@ def handle_message(message: bytes, syft_worker_id: UID) -> None: raise NotImplementedError -@serializable(attrs=["message_handler", "queue_name", "address"]) +@serializable( + attrs=["message_handler", "queue_name", "address"], + canonical_name="QueueConsumer", + version=1, +) class QueueConsumer: message_handler: AbstractMessageHandler queue_name: str @@ -42,7 +49,7 @@ def close(self) -> None: raise NotImplementedError -@serializable() +@serializable(canonical_name="QueueProducer", version=1) class QueueProducer: queue_name: str @@ -61,13 +68,13 @@ def close(self) -> None: raise NotImplementedError -@serializable() +@serializable(canonical_name="QueueClient", version=1) class QueueClient: def __init__(self, config: QueueClientConfig) -> None: raise NotImplementedError -@serializable() +@serializable(canonical_name="QueueConfig", version=1) class QueueConfig: """Base Queue configuration""" @@ -75,7 +82,7 @@ class QueueConfig: client_config: QueueClientConfig -@serializable() +@serializable(canonical_name="BaseQueueManager", version=1) class BaseQueueManager: config: QueueConfig @@ -86,7 +93,7 @@ def __init__(self, config: QueueConfig): def post_init(self) -> None: pass - def close(self) -> SyftError | SyftSuccess: + def close(self) -> SyftSuccess: raise NotImplementedError def create_consumer( @@ -102,13 +109,13 @@ def create_consumer( def create_producer( self, queue_name: str, - queue_stash: type[BaseStash], + queue_stash: "QueueStash", context: AuthedServiceContext, worker_stash: WorkerStash, ) -> QueueProducer: raise NotImplementedError - def send(self, message: bytes, queue_name: str) -> SyftSuccess | SyftError: + def send(self, message: bytes, queue_name: str) -> SyftSuccess: raise NotImplementedError @property diff --git a/packages/syft/src/syft/service/queue/queue.py b/packages/syft/src/syft/service/queue/queue.py index 8cccc3cb579..31af7d11060 100644 --- a/packages/syft/src/syft/service/queue/queue.py +++ b/packages/syft/src/syft/service/queue/queue.py @@ -1,27 +1,33 @@ # stdlib +from enum import Enum +import logging +from multiprocessing import Process import threading +from threading import Thread import time from typing import Any +from typing import TYPE_CHECKING from typing import cast # third party import psutil -from result import Err -from result import Ok -from result import Result # relative -from ...node.credentials import SyftVerifyKey -from ...node.worker_settings import WorkerSettings from ...serde.deserialize import _deserialize as deserialize from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...server.worker_settings import WorkerSettings from ...service.context import AuthedServiceContext -from ...store.document_store import BaseStash +from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime +from ...types.errors import SyftException from ...types.uid import UID from ..job.job_stash import Job -from ..job.job_stash import JobStash from ..job.job_stash import JobStatus +from ..notification.email_templates import FailedJobTemplate +from ..notification.notification_service import CreateNotification +from ..notifier.notifier_enums import NOTIFIERS +from ..queue.queue_service import QueueService from ..response import SyftError from ..response import SyftSuccess from ..worker.worker_stash import WorkerStash @@ -33,12 +39,25 @@ from .queue_stash import QueueItem from .queue_stash import Status +if TYPE_CHECKING: + # relative + from .queue_stash import QueueStash + +logger = logging.getLogger(__name__) + + +@serializable(canonical_name="WorkerType", version=1) +class ConsumerType(str, Enum): + Thread = "thread" + Process = "process" + Synchronous = "synchronous" + class MonitorThread(threading.Thread): def __init__( self, queue_item: QueueItem, - worker: Any, # should be of type Worker(Node), but get circular import error + worker: Any, # should be of type Worker(Server), but get circular import error credentials: SyftVerifyKey, interval: int = 5, ) -> None: @@ -58,23 +77,31 @@ def monitor(self) -> None: # Implement the monitoring logic here job = self.worker.job_stash.get_by_uid( self.credentials, self.queue_item.job_id - ).ok() - if job is None or job.status != JobStatus.INTERRUPTED: - return - else: - job.resolved = True + ).unwrap() + if job and job.status == JobStatus.TERMINATING: + self.terminate(job) + for subjob in job.subjobs: + self.terminate(subjob) + self.queue_item.status = Status.INTERRUPTED self.queue_item.resolved = True self.worker.queue_stash.set_result(self.credentials, self.queue_item) - self.worker.job_stash.set_result(self.credentials, job) - process = psutil.Process(job.job_pid) - process.terminate() + # How about subjobs of subjobs? def stop(self) -> None: self.stop_requested.set() + def terminate(self, job: Job) -> None: + job.resolved = True + job.status = JobStatus.INTERRUPTED + self.worker.job_stash.set_result(self.credentials, job) + try: + psutil.Process(job.job_pid).terminate() + except psutil.Error as e: + logger.warning(f"Failed to terminate job {job.id}: {e}") -@serializable() + +@serializable(canonical_name="QueueManager", version=1) class QueueManager(BaseQueueManager): config: QueueConfig @@ -82,7 +109,7 @@ def post_init(self) -> None: self.client_config = self.config.client_config self._client = self.config.client_type(self.client_config) - def close(self) -> SyftError | SyftSuccess: + def close(self) -> SyftSuccess: return self._client.close() def create_consumer( @@ -106,7 +133,7 @@ def create_consumer( def create_producer( self, queue_name: str, - queue_stash: type[BaseStash], + queue_stash: "QueueStash", context: AuthedServiceContext, worker_stash: WorkerStash, ) -> QueueProducer: @@ -121,7 +148,7 @@ def send( self, message: bytes, queue_name: str, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: return self._client.send_message( message=message, queue_name=queue_name, @@ -150,21 +177,24 @@ def handle_message_multiprocessing( queue_config.client_config.n_consumers = 0 # relative - from ...node.node import Node + from ...server.server import Server - worker = Node( + worker = Server( id=worker_settings.id, name=worker_settings.name, signing_key=worker_settings.signing_key, - document_store_config=worker_settings.document_store_config, - action_store_config=worker_settings.action_store_config, + db_config=worker_settings.db_config, blob_storage_config=worker_settings.blob_store_config, + server_side_type=worker_settings.server_side_type, queue_config=queue_config, is_subprocess=True, migrate=False, + deployment_type=worker_settings.deployment_type, ) - job_item = worker.job_stash.get_by_uid(credentials, queue_item.job_id).ok() + # otherwise it reads it from env, resulting in the wrong credentials + worker.id = worker_settings.id + worker.signing_key = worker_settings.signing_key # Set monitor thread for this job. monitor_thread = MonitorThread(queue_item, worker, credentials) @@ -173,13 +203,14 @@ def handle_message_multiprocessing( if queue_item.service == "user": queue_item.service = "userservice" - try: - call_method = getattr(worker.get_service(queue_item.service), queue_item.method) + # in case of error + result = None + try: role = worker.get_role_for_credentials(credentials=credentials) context = AuthedServiceContext( - node=worker, + server=worker, credentials=credentials, role=role, job_id=queue_item.job_id, @@ -187,104 +218,94 @@ def handle_message_multiprocessing( ) # relative - from ...node.node import AuthNodeContextRegistry + from ...server.server import AuthServerContextRegistry - AuthNodeContextRegistry.set_node_context( - node_uid=worker.id, + AuthServerContextRegistry.set_server_context( + server_uid=worker.id, context=context, user_verify_key=credentials, ) - result: Any = call_method(context, *queue_item.args, **queue_item.kwargs) + call_method = getattr(worker.get_service(queue_item.service), queue_item.method) + result = call_method(context, *queue_item.args, **queue_item.kwargs) + status = Status.COMPLETED + job_status = JobStatus.COMPLETED + except Exception as e: + root_context = AuthedServiceContext( + server=context.server, + credentials=worker.signing_key.verify_key, # type: ignore + ) + link = LinkedObject.with_context( + queue_item, context=root_context, service_type=QueueService + ) + message = CreateNotification( + subject=f"Job {queue_item.job_id} failed!", + from_user_verify_key=worker.signing_key.verify_key, # type: ignore + to_user_verify_key=credentials, + linked_obj=link, + notifier_types=[NOTIFIERS.EMAIL], + email_template=FailedJobTemplate, + ) + method = worker.services.notification.send + result = method(context=root_context, notification=message) - if isinstance(result, Ok): - status = Status.COMPLETED - job_status = JobStatus.COMPLETED - result = result.ok() - elif isinstance(result, SyftError) or isinstance(result, Err): - status = Status.ERRORED - job_status = JobStatus.ERRORED - except Exception as e: # nosec status = Status.ERRORED job_status = JobStatus.ERRORED - # stdlib - - raise e - # result = SyftError( - # message=f"Failed with exception: {e}, {traceback.format_exc()}" - # ) - # print("HAD AN ERROR WHILE HANDLING MESSAGE", result.message) + logger.exception("Unhandled error in handle_message_multiprocessing") + error_msg = e.public_message if isinstance(e, SyftException) else str(e) + result = SyftError(message=error_msg) queue_item.result = result queue_item.resolved = True queue_item.status = status # get new job item to get latest iter status - job_item = worker.job_stash.get_by_uid(credentials, job_item.id).ok() - - # if result.is_ok(): + job_item = worker.job_stash.get_by_uid(credentials, queue_item.job_id).unwrap( + public_message=f"Job {queue_item.job_id} not found!" + ) - job_item.node_uid = worker.id + job_item.server_uid = worker.id # type: ignore[assignment] job_item.result = result job_item.resolved = True job_item.status = job_status - worker.queue_stash.set_result(credentials, queue_item) - worker.job_stash.set_result(credentials, job_item) + worker.queue_stash.set_result(credentials, queue_item).unwrap( + public_message="Failed to set result into QueueItem after running" + ) + worker.job_stash.set_result(credentials, job_item).unwrap( + public_message="Failed to set job after running" + ) # Finish monitor thread monitor_thread.stop() -def evaluate_can_run_job( - job_id: UID, job_stash: JobStash, credentials: SyftVerifyKey -) -> Result[Job, str]: - """Evaluate if a Job can be executed by the user. - - A Job cannot be executed if any of the following are met: - - User doesn't have permission to the job. - - Job is either marked Completed or result is available. - - Job is Cancelled or Interrupted. - """ - res = job_stash.get_by_uid(credentials, job_id) - - # User doesn't have access to job - if res.is_err(): - return res - - job_item = res.ok() - - if job_item.status == JobStatus.COMPLETED or job_item.resolved: - return Err(f"Job: {job_id} already Completed.") - elif job_item.status == JobStatus.INTERRUPTED: - return Err(f"Job interrupted. Job Id: {job_id}") - - return Ok(job_item) - - -@serializable() +@serializable(canonical_name="APICallMessageHandler", version=1) class APICallMessageHandler(AbstractMessageHandler): queue_name = "api_call" @staticmethod def handle_message(message: bytes, syft_worker_id: UID) -> None: # relative - from ...node.node import Node + from ...server.server import Server queue_item = deserialize(message, from_bytes=True) + queue_item = cast(QueueItem, queue_item) worker_settings = queue_item.worker_settings + if worker_settings is None: + raise ValueError("Worker settings are missing in the queue item.") queue_config = worker_settings.queue_config queue_config.client_config.create_producer = False queue_config.client_config.n_consumers = 0 - worker = Node( + worker = Server( id=worker_settings.id, name=worker_settings.name, signing_key=worker_settings.signing_key, - document_store_config=worker_settings.document_store_config, - action_store_config=worker_settings.action_store_config, - blob_storage_config=worker_settings.blob_store_config, + db_config=worker_settings.db_config, + server_side_type=worker_settings.server_side_type, + deployment_type=worker_settings.deployment_type, queue_config=queue_config, is_subprocess=True, migrate=False, @@ -295,57 +316,50 @@ def handle_message(message: bytes, syft_worker_id: UID) -> None: worker.signing_key = worker_settings.signing_key credentials = queue_item.syft_client_verify_key - - res = evaluate_can_run_job(queue_item.job_id, worker.job_stash, credentials) - if res.is_err(): - raise Exception(res.value) - job_item: Job = res.ok() + try: + job_item: Job = worker.job_stash.get_by_uid( + credentials, queue_item.job_id + ).unwrap() # type: ignore + except SyftException as exc: + logger.warning(exc._private_message or exc.public_message) + raise queue_item.status = Status.PROCESSING - queue_item.node_uid = worker.id + queue_item.server_uid = worker.id job_item.status = JobStatus.PROCESSING - job_item.node_uid = cast(UID, worker.id) + job_item.server_uid = worker.id # type: ignore[assignment] job_item.updated_at = DateTime.now() - # try: - # worker_name = os.getenv("DOCKER_WORKER_NAME", None) - # docker_worker = worker.worker_stash.get_worker_by_name( - # credentials, worker_name - # ).ok() - # job_item.job_worker_id = str(docker_worker.container_id) - # except Exception: - # job_item.job_worker_id = str(worker.id) if syft_worker_id is not None: job_item.job_worker_id = syft_worker_id - queue_result = worker.queue_stash.set_result(credentials, queue_item) - if isinstance(queue_result, SyftError): - raise Exception(f"{queue_result.err()}") + worker.queue_stash.set_result(credentials, queue_item).unwrap() + worker.job_stash.set_result(credentials, job_item).unwrap() - job_result = worker.job_stash.set_result(credentials, job_item) - if isinstance(job_result, SyftError): - raise Exception(f"{job_result.err()}") - - if queue_config.thread_workers: - # stdlib - from threading import Thread + logger.info( + f"Handling queue item: id={queue_item.id}, method={queue_item.method} " + f"args={queue_item.args}, kwargs={queue_item.kwargs} " + f"service={queue_item.service}, as={queue_config.consumer_type}" + ) + if queue_config.consumer_type == ConsumerType.Thread: thread = Thread( target=handle_message_multiprocessing, args=(worker_settings, queue_item, credentials), ) thread.start() thread.join() - else: - # stdlib - from multiprocessing import Process - + elif queue_config.consumer_type == ConsumerType.Process: + # if psutil.pid_exists(job_item.job_pid): + # psutil.Process(job_item.job_pid).terminate() process = Process( target=handle_message_multiprocessing, args=(worker_settings, queue_item, credentials), ) process.start() job_item.job_pid = process.pid - worker.job_stash.set_result(credentials, job_item) + worker.job_stash.set_result(credentials, job_item).unwrap() process.join() + elif queue_config.consumer_type == ConsumerType.Synchronous: + handle_message_multiprocessing(worker_settings, queue_item, credentials) diff --git a/packages/syft/src/syft/service/queue/queue_service.py b/packages/syft/src/syft/service/queue/queue_service.py index d1cf119076a..b98f344745d 100644 --- a/packages/syft/src/syft/service/queue/queue_service.py +++ b/packages/syft/src/syft/service/queue/queue_service.py @@ -2,38 +2,14 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import DocumentStore -from ...types.uid import UID -from ...util.telemetry import instrument -from ..context import AuthedServiceContext -from ..response import SyftError +from ...store.db.db import DBManager from ..service import AbstractService -from ..service import service_method -from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL -from .queue_stash import QueueItem from .queue_stash import QueueStash -@instrument -@serializable() +@serializable(canonical_name="QueueService", version=1) class QueueService(AbstractService): - store: DocumentStore stash: QueueStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = QueueStash(store=store) - - @service_method( - path="queue.get_subjobs", - name="get_subjobs", - roles=DATA_SCIENTIST_ROLE_LEVEL, - ) - def get_subjobs( - self, context: AuthedServiceContext, uid: UID - ) -> list[QueueItem] | SyftError: - res = self.stash.get_by_parent_id(context.credentials, uid=uid) - if res.is_err(): - return SyftError(message=res.err()) - else: - return res.ok() diff --git a/packages/syft/src/syft/service/queue/queue_stash.py b/packages/syft/src/syft/service/queue/queue_stash.py index 969c064c8bc..c43caa302be 100644 --- a/packages/syft/src/syft/service/queue/queue_stash.py +++ b/packages/syft/src/syft/service/queue/queue_stash.py @@ -1,33 +1,31 @@ # stdlib +from collections.abc import Callable from enum import Enum from typing import Any -# third party -from result import Ok -from result import Result - # relative -from ...node.credentials import SyftVerifyKey -from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...store.document_store import UIDPartitionKey +from ...server.credentials import SyftVerifyKey +from ...server.worker_settings import WorkerSettings +from ...server.worker_settings import WorkerSettingsV1 +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject -from ...types.syft_object import SYFT_OBJECT_VERSION_3 -from ...types.syft_object import SYFT_OBJECT_VERSION_4 +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_migration import migrate +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject +from ...types.transforms import TransformContext from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_permissions import ActionObjectPermission -from ..response import SyftError -from ..response import SyftSuccess +__all__ = ["QueueItem"] -@serializable() + +@serializable(canonical_name="Status", version=1) class Status(str, Enum): CREATED = "created" PROCESSING = "processing" @@ -36,18 +34,38 @@ class Status(str, Enum): INTERRUPTED = "interrupted" -StatusPartitionKey = PartitionKey(key="status", type_=Status) +@serializable() +class QueueItemV1(SyftObject): + __canonical_name__ = "QueueItem" + __version__ = SYFT_OBJECT_VERSION_1 + + __attr_searchable__ = ["status", "worker_pool"] + + id: UID + server_uid: UID + result: Any | None = None + resolved: bool = False + status: Status = Status.CREATED + + method: str + service: str + args: list + kwargs: dict[str, Any] + job_id: UID | None = None + worker_settings: WorkerSettingsV1 | None = None + has_execute_permissions: bool = False + worker_pool: LinkedObject @serializable() class QueueItem(SyftObject): __canonical_name__ = "QueueItem" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_2 - __attr_searchable__ = ["status"] + __attr_searchable__ = ["status", "worker_pool_id"] id: UID - node_uid: UID + server_uid: UID result: Any | None = None resolved: bool = False status: Status = Status.CREATED @@ -67,102 +85,152 @@ def __repr__(self) -> str: def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: return f": {self.status}" + @property + def worker_pool_id(self) -> UID: + return self.worker_pool.object_uid + @property def is_action(self) -> bool: return self.service_path == "Action" and self.method_name == "execute" @property - def action(self) -> Any | SyftError: + def action(self) -> Any: if self.is_action: return self.kwargs["action"] - return SyftError(message="QueueItem not an Action") + raise SyftException(public_message="QueueItem not an Action") @serializable() class ActionQueueItem(QueueItem): __canonical_name__ = "ActionQueueItem" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_2 method: str = "execute" service: str = "actionservice" -@instrument @serializable() -class QueueStash(BaseStash): - object_type = QueueItem - settings: PartitionSettings = PartitionSettings( - name=QueueItem.__canonical_name__, object_type=QueueItem - ) +class APIEndpointQueueItem(QueueItem): + __canonical_name__ = "APIEndpointQueueItem" + __version__ = SYFT_OBJECT_VERSION_2 + + method: str + service: str = "apiservice" - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable() +class ActionQueueItemV1(QueueItemV1): + __canonical_name__ = "ActionQueueItem" + __version__ = SYFT_OBJECT_VERSION_1 + + method: str = "execute" + service: str = "actionservice" + + +@serializable() +class APIEndpointQueueItemV1(QueueItemV1): + __canonical_name__ = "APIEndpointQueueItem" + __version__ = SYFT_OBJECT_VERSION_1 + + method: str + service: str = "apiservice" + + +@serializable(canonical_name="QueueSQLStash", version=1) +class QueueStash(ObjectStash[QueueItem]): + # FIX: Check the return value for None. set_result is used extensively + @as_result(StashException) def set_result( self, credentials: SyftVerifyKey, item: QueueItem, add_permissions: list[ActionObjectPermission] | None = None, - ) -> Result[QueueItem | None, str]: + ) -> QueueItem | None: if item.resolved: - valid = self.check_type(item, self.object_type) - if valid.is_err(): - return SyftError(message=valid.err()) - return super().update(credentials, item, add_permissions) + self.check_type(item, self.object_type).unwrap() + return super().update(credentials, item, add_permissions).unwrap() + # TODO: should we log this? return None + @as_result(SyftException) def set_placeholder( self, credentials: SyftVerifyKey, item: QueueItem, add_permissions: list[ActionObjectPermission] | None = None, - ) -> Result[QueueItem, str]: + ) -> QueueItem: # 🟡 TODO 36: Needs distributed lock if not item.resolved: - exists = self.get_by_uid(credentials, item.id) - if exists.is_ok() and exists.ok() is None: - valid = self.check_type(item, self.object_type) - if valid.is_err(): - return SyftError(message=valid.err()) - return super().set(credentials, item, add_permissions) - return item - - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[QueueItem | None, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - item = self.query_one(credentials=credentials, qks=qks) + try: + self.get_by_uid(credentials, item.id).unwrap() + except NotFoundException: + self.check_type(item, self.object_type) + return super().set(credentials, item, add_permissions).unwrap() return item - def pop( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[QueueItem | None, str]: - item = self.get_by_uid(credentials=credentials, uid=uid) - self.delete_by_uid(credentials=credentials, uid=uid) - return item + @as_result(StashException) + def pop(self, credentials: SyftVerifyKey, uid: UID) -> QueueItem | None: + try: + item = self.get_by_uid(credentials=credentials, uid=uid).unwrap() + except NotFoundException: + # TODO: Handle NotfoundException in code? + return None - def pop_on_complete( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[QueueItem | None, str]: - item = self.get_by_uid(credentials=credentials, uid=uid) - if item.is_ok(): - queue_item = item.ok() - if queue_item.status == Status.COMPLETED: - self.delete_by_uid(credentials=credentials, uid=uid) + self.delete_by_uid(credentials=credentials, uid=uid).unwrap() return item - def delete_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[SyftSuccess, str]: - qk = UIDPartitionKey.with_obj(uid) - result = super().delete(credentials=credentials, qk=qk) - if result.is_ok(): - return Ok(SyftSuccess(message=f"ID: {uid} deleted")) - return result + @as_result(StashException) + def pop_on_complete(self, credentials: SyftVerifyKey, uid: UID) -> QueueItem: + queue_item = self.get_by_uid(credentials=credentials, uid=uid).unwrap() + if queue_item.status == Status.COMPLETED: + self.delete_by_uid(credentials=credentials, uid=uid) + return queue_item + @as_result(StashException) def get_by_status( self, credentials: SyftVerifyKey, status: Status - ) -> Result[list[QueueItem], str]: - qks = QueryKeys(qks=StatusPartitionKey.with_obj(status)) + ) -> list[QueueItem]: + return self.get_all( + credentials=credentials, + filters={"status": status}, + ).unwrap() + + @as_result(StashException) + def _get_by_worker_pool( + self, credentials: SyftVerifyKey, worker_pool: LinkedObject + ) -> list[QueueItem]: + worker_pool_id = worker_pool.object_uid + + return self.get_all( + credentials=credentials, + filters={"worker_pool_id": worker_pool_id}, + ).unwrap() + + +def upgrade_worker_settings_for_queue(context: TransformContext) -> TransformContext: + if context.output and context.output["worker_settings"] is not None: + worker_settings_old: WorkerSettingsV1 | None = context.output["worker_settings"] + if worker_settings_old is None: + return context + + worker_settings = worker_settings_old.migrate_to( + WorkerSettings.__version__, context=context.to_server_context() + ) + context.output["worker_settings"] = worker_settings + + return context + + +@migrate(QueueItemV1, QueueItem) +def migrate_queue_item_from_v1_to_v2() -> list[Callable]: + return [upgrade_worker_settings_for_queue] + + +@migrate(ActionQueueItemV1, ActionQueueItem) +def migrate_action_queue_item_v1_to_v2() -> list[Callable]: + return [upgrade_worker_settings_for_queue] + - return self.query_all(credentials=credentials, qks=qks) +@migrate(APIEndpointQueueItemV1, APIEndpointQueueItem) +def migrate_api_endpoint_queue_item_v1_to_v2() -> list[Callable]: + return [upgrade_worker_settings_for_queue] diff --git a/packages/syft/src/syft/service/queue/zmq_client.py b/packages/syft/src/syft/service/queue/zmq_client.py new file mode 100644 index 00000000000..1c68ded7537 --- /dev/null +++ b/packages/syft/src/syft/service/queue/zmq_client.py @@ -0,0 +1,197 @@ +# stdlib +from collections import defaultdict +import logging +import socketserver + +# relative +from ...serde.serializable import serializable +from ...service.context import AuthedServiceContext +from ...types.errors import SyftException +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SyftObject +from ...types.uid import UID +from ...util.util import get_queue_address +from ..response import SyftSuccess +from ..worker.worker_stash import WorkerStash +from .base_queue import AbstractMessageHandler +from .base_queue import QueueClient +from .base_queue import QueueClientConfig +from .base_queue import QueueConfig +from .queue import ConsumerType +from .queue_stash import QueueStash +from .zmq_consumer import ZMQConsumer +from .zmq_producer import ZMQProducer + +logger = logging.getLogger(__name__) + + +@serializable() +class ZMQClientConfig(SyftObject, QueueClientConfig): + __canonical_name__ = "ZMQClientConfig" + __version__ = SYFT_OBJECT_VERSION_1 + + id: UID | None = None # type: ignore[assignment] + hostname: str = "127.0.0.1" + queue_port: int | None = None + # TODO: setting this to false until we can fix the ZMQ + # port issue causing tests to randomly fail + create_producer: bool = False + n_consumers: int = 0 + consumer_service: str | None = None + + +@serializable(attrs=["host"], canonical_name="ZMQClient", version=1) +class ZMQClient(QueueClient): + """ZMQ Client for creating producers and consumers.""" + + producers: dict[str, ZMQProducer] + consumers: defaultdict[str, list[ZMQConsumer]] + + def __init__(self, config: ZMQClientConfig) -> None: + self.host = config.hostname + self.producers = {} + self.consumers = defaultdict(list) + self.config = config + + @staticmethod + def _get_free_tcp_port(host: str) -> int: + with socketserver.TCPServer((host, 0), None) as s: + free_port = s.server_address[1] + + return free_port + + def add_producer( + self, + queue_name: str, + port: int | None = None, + queue_stash: QueueStash | None = None, + worker_stash: WorkerStash | None = None, + context: AuthedServiceContext | None = None, + ) -> ZMQProducer: + """Add a producer of a queue. + + A queue can have at most one producer attached to it. + """ + + if port is None: + if self.config.queue_port is None: + self.config.queue_port = self._get_free_tcp_port(self.host) + port = self.config.queue_port + else: + port = self.config.queue_port + + logger.info( + f"Adding producer for queue: {queue_name} on: {get_queue_address(port)}" + ) + producer = ZMQProducer( + queue_name=queue_name, + queue_stash=queue_stash, + port=port, + context=context, + worker_stash=worker_stash, + ) + self.producers[queue_name] = producer + return producer + + def add_consumer( + self, + queue_name: str, + message_handler: AbstractMessageHandler, + service_name: str, + address: str | None = None, + worker_stash: WorkerStash | None = None, + syft_worker_id: UID | None = None, + ) -> ZMQConsumer: + """Add a consumer to a queue + + A queue should have at least one producer attached to the group. + + """ + + if address is None: + address = get_queue_address(self.config.queue_port) + + consumer = ZMQConsumer( + queue_name=queue_name, + message_handler=message_handler, + address=address, + service_name=service_name, + syft_worker_id=syft_worker_id, + worker_stash=worker_stash, + ) + self.consumers[queue_name].append(consumer) + + return consumer + + def send_message( + self, + message: bytes, + queue_name: str, + worker: bytes | None = None, + ) -> SyftSuccess: + producer = self.producers.get(queue_name) + if producer is None: + raise SyftException( + public_message=f"No producer attached for queue: {queue_name}. Please add a producer for it." + ) + try: + producer.send(message=message, worker=worker) + except Exception as e: + # stdlib + raise SyftException( + public_message=f"Failed to send message to: {queue_name} with error: {e}" + ) + return SyftSuccess( + message=f"Successfully queued message to : {queue_name}", + ) + + def close(self) -> SyftSuccess: + try: + for consumers in self.consumers.values(): + for consumer in consumers: + # make sure look is stopped + consumer.close() + + for producer in self.producers.values(): + # make sure loop is stopped + producer.close() + # close existing connection. + except Exception as e: + raise SyftException(public_message=f"Failed to close connection: {e}") + + return SyftSuccess(message="All connections closed.") + + def purge_queue(self, queue_name: str) -> SyftSuccess: + if queue_name not in self.producers: + raise SyftException( + public_message=f"No producer running for : {queue_name}" + ) + + producer = self.producers[queue_name] + + # close existing connection. + producer.close() + + # add a new connection + self.add_producer(queue_name=queue_name, address=producer.address) # type: ignore + + return SyftSuccess(message=f"Queue: {queue_name} successfully purged") + + def purge_all(self) -> SyftSuccess: + for queue_name in self.producers: + self.purge_queue(queue_name=queue_name) + + return SyftSuccess(message="Successfully purged all queues.") + + +@serializable(canonical_name="ZMQQueueConfig", version=1) +class ZMQQueueConfig(QueueConfig): + def __init__( + self, + client_type: type[ZMQClient] | None = None, + client_config: ZMQClientConfig | None = None, + consumer_type: ConsumerType = ConsumerType.Process, + ): + self.client_type = client_type or ZMQClient + self.client_config: ZMQClientConfig = client_config or ZMQClientConfig() + self.consumer_type = consumer_type diff --git a/packages/syft/src/syft/service/queue/zmq_common.py b/packages/syft/src/syft/service/queue/zmq_common.py new file mode 100644 index 00000000000..35331541c86 --- /dev/null +++ b/packages/syft/src/syft/service/queue/zmq_common.py @@ -0,0 +1,120 @@ +# stdlib +import threading +import time +from typing import Any + +# third party +from pydantic import field_validator + +# relative +from ...server.credentials import SyftVerifyKey +from ...types.base import SyftBaseModel +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.uid import UID +from ..worker.worker_pool import SyftWorker +from ..worker.worker_stash import WorkerStash + +# Producer/Consumer heartbeat interval (in seconds) +HEARTBEAT_INTERVAL_SEC = 2 + +# Thread join timeout (in seconds) +THREAD_TIMEOUT_SEC = 5 + +# Max duration (in ms) to wait for ZMQ poller to return +ZMQ_POLLER_TIMEOUT_MSEC = 1000 + +# Duration (in seconds) after which a worker without a heartbeat will be marked as expired +WORKER_TIMEOUT_SEC = 60 + +# Duration (in seconds) after which producer without a heartbeat will be marked as expired +PRODUCER_TIMEOUT_SEC = 60 + +# Lock for working on ZMQ socket +ZMQ_SOCKET_LOCK = threading.Lock() + +MAX_RECURSION_NESTED_ACTIONOBJECTS = 5 + + +class ZMQHeader: + """Enum for ZMQ headers""" + + W_WORKER = b"MDPW01" + + +class ZMQCommand: + """Enum for ZMQ commands""" + + W_READY = b"0x01" + W_REQUEST = b"0x02" + W_REPLY = b"0x03" + W_HEARTBEAT = b"0x04" + W_DISCONNECT = b"0x05" + + +class Timeout: + def __init__(self, offset_sec: float): + self.__offset = float(offset_sec) + self.__next_ts: float = 0.0 + + self.reset() + + @property + def next_ts(self) -> float: + return self.__next_ts + + def reset(self) -> None: + self.__next_ts = self.now() + self.__offset + + def has_expired(self) -> bool: + return self.now() >= self.__next_ts + + @staticmethod + def now() -> float: + return time.time() + + +class Service: + def __init__(self, name: str) -> None: + self.name = name + self.requests: list[bytes] = [] + self.waiting: list[Worker] = [] # List of waiting workers + + +class Worker(SyftBaseModel): + address: bytes + identity: bytes + service: Service | None = None + syft_worker_id: UID | None = None + expiry_t: Timeout = Timeout(WORKER_TIMEOUT_SEC) + + @field_validator("syft_worker_id", mode="before") + @classmethod + def set_syft_worker_id(cls, v: Any) -> Any: + if isinstance(v, str): + return UID(v) + return v + + def has_expired(self) -> bool: + return self.expiry_t.has_expired() + + def get_expiry(self) -> float: + return self.expiry_t.next_ts + + def reset_expiry(self) -> None: + self.expiry_t.reset() + + @as_result(SyftException) + def _syft_worker( + self, stash: WorkerStash, credentials: SyftVerifyKey + ) -> SyftWorker | None: + return stash.get_by_uid( + credentials=credentials, uid=self.syft_worker_id + ).unwrap() + + def __str__(self) -> str: + svc = self.service.name if self.service else None + return ( + f"Worker(addr={self.address!r}, id={self.identity!r}, service={svc}, " + f"syft_worker_id={self.syft_worker_id!r})" + ) diff --git a/packages/syft/src/syft/service/queue/zmq_consumer.py b/packages/syft/src/syft/service/queue/zmq_consumer.py new file mode 100644 index 00000000000..b2f6d4b9a6e --- /dev/null +++ b/packages/syft/src/syft/service/queue/zmq_consumer.py @@ -0,0 +1,302 @@ +# stdlib +import logging +import subprocess # nosec +import threading +from threading import Event + +# third party +import zmq +from zmq import Frame +from zmq.error import ContextTerminated + +# relative +from ...serde.deserialize import _deserialize +from ...serde.serializable import serializable +from ...types.uid import UID +from ..worker.worker_pool import ConsumerState +from ..worker.worker_stash import WorkerStash +from .base_queue import AbstractMessageHandler +from .base_queue import QueueConsumer +from .zmq_common import HEARTBEAT_INTERVAL_SEC +from .zmq_common import PRODUCER_TIMEOUT_SEC +from .zmq_common import THREAD_TIMEOUT_SEC +from .zmq_common import Timeout +from .zmq_common import ZMQCommand +from .zmq_common import ZMQHeader +from .zmq_common import ZMQ_POLLER_TIMEOUT_MSEC +from .zmq_common import ZMQ_SOCKET_LOCK + +logger = logging.getLogger(__name__) + + +def last_created_port() -> int: + command = ( + "lsof -i -P -n | grep '*:[0-9]* (LISTEN)' | grep python | awk '{print $9, $1, $2}' | " + "sort -k2,2 -k3,3n | tail -n 1 | awk '{print $1}' | cut -d':' -f2" + ) + # 1. Lists open files (including network connections) with lsof -i -P -n + # 2. Filters for listening ports with grep '*:[0-9]* (LISTEN)' + # 3. Further filters for Python processes with grep python + # 4. Sorts based on the 9th field (which is likely the port number) with sort -k9 + # 5. Takes the last 10 entries with tail -n 10 + # 6. Prints only the 9th field (port and address) with awk '{print $9}' + # 7. Extracts only the port number with cut -d':' -f2 + + process = subprocess.Popen( # nosec + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ) + out, err = process.communicate() + return int(out.decode("utf-8").strip()) + + +@serializable(attrs=["_subscriber"], canonical_name="ZMQConsumer", version=1) +class ZMQConsumer(QueueConsumer): + def __init__( + self, + message_handler: AbstractMessageHandler, + address: str, + queue_name: str, + service_name: str, + syft_worker_id: UID | None = None, + worker_stash: WorkerStash | None = None, + verbose: bool = False, + ) -> None: + self.address = address + self.message_handler = message_handler + self.service_name = service_name + self.queue_name = queue_name + self.context = zmq.Context() + self.poller = zmq.Poller() + self.socket = None + self.verbose = verbose + self.id = UID().short() + self._stop = Event() + self.syft_worker_id = syft_worker_id + self.worker_stash = worker_stash + self.post_init() + + @classmethod + def default(cls, address: str | None = None, **kwargs: dict) -> "ZMQConsumer": + # relative + from ...types.uid import UID + from ..worker.utils import DEFAULT_WORKER_POOL_NAME + from .queue import APICallMessageHandler + + if address is None: + try: + address = f"tcp://localhost:{last_created_port()}" + except Exception: + raise Exception( + "Could not auto-assign ZMQConsumer address. Please provide one." + ) + print(f"Auto-assigning ZMQConsumer address: {address}. Please verify.") + default_kwargs = { + "message_handler": APICallMessageHandler, + "queue_name": APICallMessageHandler.queue_name, + "service_name": DEFAULT_WORKER_POOL_NAME, + "syft_worker_id": UID(), + "verbose": True, + "address": address, + } + + for key, value in kwargs.items(): + if key in default_kwargs: + default_kwargs[key] = value + + return cls(**default_kwargs) + + def reconnect_to_producer(self) -> None: + """Connect or reconnect to producer""" + if self.socket: + self.poller.unregister(self.socket) # type: ignore[unreachable] + self.socket.close() + self.socket = self.context.socket(zmq.DEALER) + self.socket.linger = 0 + self.socket.setsockopt_string(zmq.IDENTITY, self.id) + self.socket.connect(self.address) + self.poller.register(self.socket, zmq.POLLIN) + + logger.info(f"Connecting Worker id={self.id} to broker addr={self.address}") + + # Register queue with the producer + self.send_to_producer( + ZMQCommand.W_READY, + [self.service_name.encode(), str(self.syft_worker_id).encode()], + ) + + def post_init(self) -> None: + self.thread: threading.Thread | None = None + self.heartbeat_t = Timeout(HEARTBEAT_INTERVAL_SEC) + self.producer_ping_t = Timeout(PRODUCER_TIMEOUT_SEC) + self.reconnect_to_producer() + + def disconnect_from_producer(self) -> None: + self.send_to_producer(ZMQCommand.W_DISCONNECT) + + def close(self) -> None: + self.disconnect_from_producer() + self._stop.set() + try: + if self.thread is not None: + self.thread.join(timeout=THREAD_TIMEOUT_SEC) + if self.thread is not None and self.thread.is_alive(): + logger.error( + f"ZMQConsumer thread join timed out during closing. " + f"SyftWorker id {self.syft_worker_id}, " + f"service name {self.service_name}." + ) + self.thread = None + self.poller.unregister(self.socket) + except Exception as e: + logger.error("Failed to unregister worker.", exc_info=e) + finally: + self.socket.close() + self.context.destroy() + # self._stop.clear() + + def send_to_producer( + self, + command: bytes, + msg: bytes | list | None = None, + ) -> None: + """Send message to producer. + + If no msg is provided, creates one internally + """ + if self.socket.closed: + logger.warning("Socket is closed. Cannot send message.") + return + + if msg is None: + msg = [] + elif not isinstance(msg, list): + msg = [msg] + + # ZMQConsumer send frames: [empty, header, command, ...data] + core = [b"", ZMQHeader.W_WORKER, command] + msg = core + msg + + if command != ZMQCommand.W_HEARTBEAT: + logger.info(f"ZMQ Consumer send: {core}") + + with ZMQ_SOCKET_LOCK: + try: + self.socket.send_multipart(msg) + except zmq.ZMQError as e: + logger.error("ZMQConsumer send error", exc_info=e) + + def _run(self) -> None: + """Send reply, if any, to producer and wait for next request.""" + try: + while True: + if self._stop.is_set(): + logger.info("ZMQConsumer thread stopped") + return + + try: + items = self.poller.poll(ZMQ_POLLER_TIMEOUT_MSEC) + except ContextTerminated: + logger.info("Context terminated") + return + except Exception as e: + logger.error("ZMQ poll error", exc_info=e) + continue + + if items: + msg = self.socket.recv_multipart() + + # mark as alive + self.set_producer_alive() + + if len(msg) < 3: + logger.error(f"ZMQConsumer invalid recv: {msg}") + continue + + # Message frames recieved by consumer: + # [empty, header, command, ...data] + (_, _, command, *data) = msg + + if command != ZMQCommand.W_HEARTBEAT: + # log everything except the last frame which contains serialized data + logger.info(f"ZMQConsumer recv: {msg[:-4]}") + + if command == ZMQCommand.W_REQUEST: + # Call Message Handler + try: + message = data.pop() + self.associate_job(message) + self.message_handler.handle_message( + message=message, + syft_worker_id=self.syft_worker_id, + ) + except Exception as e: + logger.exception("Couldn't handle message", exc_info=e) + finally: + self.clear_job() + elif command == ZMQCommand.W_HEARTBEAT: + self.set_producer_alive() + elif command == ZMQCommand.W_DISCONNECT: + self.reconnect_to_producer() + else: + logger.error(f"ZMQConsumer invalid command: {command}") + else: + if not self.is_producer_alive(): + logger.info("Producer check-alive timed out. Reconnecting.") + self.reconnect_to_producer() + self.set_producer_alive() + + if not self._stop.is_set(): + self.send_heartbeat() + + except zmq.ZMQError as e: + if e.errno == zmq.ETERM: + logger.info("zmq.ETERM") + else: + logger.exception("zmq.ZMQError", exc_info=e) + except Exception as e: + logger.exception("ZMQConsumer thread exception", exc_info=e) + + def set_producer_alive(self) -> None: + self.producer_ping_t.reset() + + def is_producer_alive(self) -> bool: + # producer timer is within timeout + return not self.producer_ping_t.has_expired() + + def send_heartbeat(self) -> None: + if self.heartbeat_t.has_expired() and self.is_producer_alive(): + self.send_to_producer(ZMQCommand.W_HEARTBEAT) + self.heartbeat_t.reset() + + def run(self) -> None: + self.thread = threading.Thread(target=self._run) + self.thread.start() + + def associate_job(self, message: Frame) -> None: + try: + queue_item = _deserialize(message, from_bytes=True) + self._set_worker_job(queue_item.job_id) + except Exception as e: + logger.exception("Could not associate job", exc_info=e) + + def clear_job(self) -> None: + self._set_worker_job(None) + + def _set_worker_job(self, job_id: UID | None) -> None: + if self.worker_stash is not None: + consumer_state = ( + ConsumerState.IDLE if job_id is None else ConsumerState.CONSUMING + ) + res = self.worker_stash.update_consumer_state( + credentials=self.worker_stash.root_verify_key, + worker_uid=self.syft_worker_id, + consumer_state=consumer_state, + ) + if res.is_err(): + logger.error( + f"Failed to update consumer state for {self.service_name}-{self.id}, error={res.err()}" + ) + + @property + def alive(self) -> bool: + return not self.socket.closed and self.is_producer_alive() diff --git a/packages/syft/src/syft/service/queue/zmq_producer.py b/packages/syft/src/syft/service/queue/zmq_producer.py new file mode 100644 index 00000000000..197f87ab283 --- /dev/null +++ b/packages/syft/src/syft/service/queue/zmq_producer.py @@ -0,0 +1,469 @@ +# stdlib +from binascii import hexlify +import itertools +import logging +import sys +import threading +from threading import Event +from time import sleep +from typing import Any + +# third party +import zmq +from zmq import LINGER + +# relative +from ...serde.serializable import serializable +from ...serde.serialize import _serialize as serialize +from ...service.action.action_object import ActionObject +from ...service.context import AuthedServiceContext +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.uid import UID +from ...util.util import get_queue_address +from ..service import AbstractService +from ..worker.worker_pool import ConsumerState +from ..worker.worker_stash import WorkerStash +from .base_queue import QueueProducer +from .queue_stash import ActionQueueItem +from .queue_stash import QueueStash +from .queue_stash import Status +from .zmq_common import HEARTBEAT_INTERVAL_SEC +from .zmq_common import Service +from .zmq_common import THREAD_TIMEOUT_SEC +from .zmq_common import Timeout +from .zmq_common import Worker +from .zmq_common import ZMQCommand +from .zmq_common import ZMQHeader +from .zmq_common import ZMQ_POLLER_TIMEOUT_MSEC +from .zmq_common import ZMQ_SOCKET_LOCK + +logger = logging.getLogger(__name__) + + +@serializable(canonical_name="ZMQProducer", version=1) +class ZMQProducer(QueueProducer): + INTERNAL_SERVICE_PREFIX = b"mmi." + + def __init__( + self, + queue_name: str, + queue_stash: QueueStash, + worker_stash: WorkerStash, + port: int, + context: AuthedServiceContext, + ) -> None: + self.id = UID().short() + self.port = port + self.queue_stash = queue_stash + self.worker_stash = worker_stash + self.queue_name = queue_name + self.auth_context = context + self._stop = Event() + self.post_init() + + @property + def address(self) -> str: + return get_queue_address(self.port) + + def post_init(self) -> None: + """Initialize producer state.""" + + self.services: dict[str, Service] = {} + self.workers: dict[bytes, Worker] = {} + self.waiting: list[Worker] = [] + self.heartbeat_t = Timeout(HEARTBEAT_INTERVAL_SEC) + self.context = zmq.Context(1) + self.socket = self.context.socket(zmq.ROUTER) + self.socket.setsockopt(LINGER, 1) + self.socket.setsockopt_string(zmq.IDENTITY, self.id) + self.poll_workers = zmq.Poller() + self.poll_workers.register(self.socket, zmq.POLLIN) + self.bind(f"tcp://*:{self.port}") + self.thread: threading.Thread | None = None + self.producer_thread: threading.Thread | None = None + + def close(self) -> None: + self._stop.set() + try: + if self.thread: + self.thread.join(THREAD_TIMEOUT_SEC) + if self.thread.is_alive(): + logger.error( + f"ZMQProducer message sending thread join timed out during closing. " + f"Queue name {self.queue_name}, " + ) + self.thread = None + + if self.producer_thread: + self.producer_thread.join(THREAD_TIMEOUT_SEC) + if self.producer_thread.is_alive(): + logger.error( + f"ZMQProducer queue thread join timed out during closing. " + f"Queue name {self.queue_name}, " + ) + self.producer_thread = None + + self.poll_workers.unregister(self.socket) + except Exception as e: + logger.exception("Failed to unregister poller.", exc_info=e) + finally: + self.socket.close() + self.context.destroy() + + @property + def action_service(self) -> AbstractService: + if self.auth_context.server is not None: + return self.auth_context.server.services.action + else: + raise Exception(f"{self.auth_context} does not have a server.") + + @as_result(SyftException) + def contains_unresolved_action_objects(self, arg: Any, recursion: int = 0) -> bool: + """recursively check collections for unresolved action objects""" + if isinstance(arg, UID): + arg = self.action_service.get(self.auth_context, arg) + return self.contains_unresolved_action_objects( + arg, recursion=recursion + 1 + ).unwrap() + if isinstance(arg, ActionObject): + if not arg.syft_resolved: + arg = self.action_service.get(self.auth_context, arg) + if not arg.syft_resolved: + return True + arg = arg.syft_action_data + + value = False + if isinstance(arg, list): + for elem in arg: + value = self.contains_unresolved_action_objects( + elem, recursion=recursion + 1 + ).unwrap() + if value: + return True + if isinstance(arg, dict): + for elem in arg.values(): + value = self.contains_unresolved_action_objects( + elem, recursion=recursion + 1 + ).unwrap() + if value: + return True + return value + + def read_items(self) -> None: + while True: + if self._stop.is_set(): + break + try: + sleep(1) + + # Items to be queued + items_to_queue = self.queue_stash.get_by_status( + self.queue_stash.root_verify_key, + status=Status.CREATED, + ).unwrap() + + items_to_queue = [] if items_to_queue is None else items_to_queue + + # Queue Items that are in the processing state + items_processing = self.queue_stash.get_by_status( + self.queue_stash.root_verify_key, + status=Status.PROCESSING, + ).unwrap() + + items_processing = [] if items_processing is None else items_processing + + for item in itertools.chain(items_to_queue, items_processing): + # TODO: if resolving fails, set queueitem to errored, and jobitem as well + if item.status == Status.CREATED: + if isinstance(item, ActionQueueItem): + action = item.kwargs["action"] + if ( + self.contains_unresolved_action_objects( + action.args + ).unwrap() + or self.contains_unresolved_action_objects( + action.kwargs + ).unwrap() + ): + continue + + msg_bytes = serialize(item, to_bytes=True) + worker_pool = item.worker_pool.resolve_with_context( + self.auth_context + ).unwrap() + service_name = worker_pool.name + service: Service | None = self.services.get(service_name) + + # Skip adding message if corresponding service/pool + # is not registered. + if service is None: + continue + + # append request message to the corresponding service + # This list is processed in dispatch method. + + # TODO: Logic to evaluate the CAN RUN Condition + item.status = Status.PROCESSING + self.queue_stash.update( + item.syft_client_verify_key, item + ).unwrap(public_message=f"failed to update queue item {item}") + service.requests.append(msg_bytes) + elif item.status == Status.PROCESSING: + # Evaluate Retry condition here + # If job running and timeout or job status is KILL + # or heartbeat fails + # or container id doesn't exists, kill process or container + # else decrease retry count and mark status as CREATED. + pass + except Exception as e: + # stdlib + import traceback + + print(e, traceback.format_exc(), file=sys.stderr) + item.status = Status.ERRORED + self.queue_stash.update(item.syft_client_verify_key, item).unwrap() + + def run(self) -> None: + self.thread = threading.Thread(target=self._run) + self.thread.start() + + self.producer_thread = threading.Thread(target=self.read_items) + self.producer_thread.start() + + def send(self, worker: bytes, message: bytes | list[bytes]) -> None: + worker_obj = self.require_worker(worker) + self.send_to_worker(worker_obj, ZMQCommand.W_REQUEST, message) + + def bind(self, endpoint: str) -> None: + """Bind producer to endpoint.""" + self.socket.bind(endpoint) + logger.info(f"ZMQProducer endpoint: {endpoint}") + + def send_heartbeats(self) -> None: + """Send heartbeats to idle workers if it's time""" + if self.heartbeat_t.has_expired(): + for worker in self.waiting: + self.send_to_worker(worker, ZMQCommand.W_HEARTBEAT) + self.heartbeat_t.reset() + + def purge_workers(self) -> None: + """Look for & kill expired workers. + + Workers are oldest to most recent, so we stop at the first alive worker. + """ + # work on a copy of the iterator + for worker in self.waiting: + res = worker._syft_worker(self.worker_stash, self.auth_context.credentials) + if res.is_err() or (syft_worker := res.ok()) is None: + logger.info(f"Failed to retrieve SyftWorker {worker.syft_worker_id}") + continue + + if worker.has_expired() or syft_worker.to_be_deleted: + logger.info(f"Deleting expired worker id={worker}") + self.delete_worker(worker, syft_worker.to_be_deleted) + + # relative + + self.auth_context.server.services.worker._delete( + self.auth_context, syft_worker + ) + + def update_consumer_state_for_worker( + self, syft_worker_id: UID, consumer_state: ConsumerState + ) -> None: + if self.worker_stash is None: + logger.error( # type: ignore[unreachable] + f"ZMQProducer worker stash not defined for {self.queue_name} - {self.id}" + ) + return + + try: + try: + self.worker_stash.get_by_uid( + credentials=self.worker_stash.root_verify_key, + uid=syft_worker_id, + ).unwrap() + except Exception: + return None + + self.worker_stash.update_consumer_state( + credentials=self.worker_stash.root_verify_key, + worker_uid=syft_worker_id, + consumer_state=consumer_state, + ).unwrap() + except Exception: + logger.exception( + f"Failed to update consumer state for worker id: {syft_worker_id} to state {consumer_state}", + ) + + def worker_waiting(self, worker: Worker) -> None: + """This worker is now waiting for work.""" + # Queue to broker and service waiting lists + if worker not in self.waiting: + self.waiting.append(worker) + if worker.service is not None and worker not in worker.service.waiting: + worker.service.waiting.append(worker) + worker.reset_expiry() + self.update_consumer_state_for_worker(worker.syft_worker_id, ConsumerState.IDLE) + self.dispatch(worker.service, None) + + def dispatch(self, service: Service, msg: bytes) -> None: + """Dispatch requests to waiting workers as possible""" + if msg is not None: # Queue message if any + service.requests.append(msg) + + self.purge_workers() + while service.waiting and service.requests: + # One worker consuming only one message at a time. + msg = service.requests.pop(0) + worker = service.waiting.pop(0) + self.waiting.remove(worker) + self.send_to_worker(worker, ZMQCommand.W_REQUEST, msg) + + def send_to_worker( + self, + worker: Worker, + command: bytes, + msg: bytes | list | None = None, + ) -> None: + """Send message to worker. + + If message is provided, sends that message. + """ + + if self.socket.closed: + logger.warning("Socket is closed. Cannot send message.") + return + + if msg is None: + msg = [] + elif not isinstance(msg, list): + msg = [msg] + + # ZMQProducer send frames: [address, empty, header, command, ...data] + core = [worker.address, b"", ZMQHeader.W_WORKER, command] + msg = core + msg + + if command != ZMQCommand.W_HEARTBEAT: + # log everything except the last frame which contains serialized data + logger.info(f"ZMQProducer send: {core}") + + with ZMQ_SOCKET_LOCK: + try: + self.socket.send_multipart(msg) + except zmq.ZMQError: + logger.exception("ZMQProducer send error") + + def _run(self) -> None: + try: + while True: + if self._stop.is_set(): + logger.info("ZMQProducer thread stopped") + return + + for service in self.services.values(): + self.dispatch(service, None) + + items = None + + try: + items = self.poll_workers.poll(ZMQ_POLLER_TIMEOUT_MSEC) + except Exception as e: + logger.exception("ZMQProducer poll error", exc_info=e) + + if items: + msg = self.socket.recv_multipart() + + if len(msg) < 3: + logger.error(f"ZMQProducer invalid recv: {msg}") + continue + + # ZMQProducer recv frames: [address, empty, header, command, ...data] + (address, _, header, command, *data) = msg + + if command != ZMQCommand.W_HEARTBEAT: + # log everything except the last frame which contains serialized data + logger.info(f"ZMQProducer recv: {msg[:4]}") + + if header == ZMQHeader.W_WORKER: + self.process_worker(address, command, data) + else: + logger.error(f"Invalid message header: {header}") + + self.send_heartbeats() + self.purge_workers() + except Exception as e: + logger.exception("ZMQProducer thread exception", exc_info=e) + + def require_worker(self, address: bytes) -> Worker: + """Finds the worker (creates if necessary).""" + identity = hexlify(address) + worker = self.workers.get(identity) + if worker is None: + worker = Worker(identity=identity, address=address) + self.workers[identity] = worker + return worker + + def process_worker(self, address: bytes, command: bytes, data: list[bytes]) -> None: + worker_ready = hexlify(address) in self.workers + worker = self.require_worker(address) + + if ZMQCommand.W_READY == command: + service_name = data.pop(0).decode() + syft_worker_id = data.pop(0).decode() + if worker_ready: + # Not first command in session or Reserved service name + # If worker was already present, then we disconnect it first + # and wait for it to re-register itself to the producer. This ensures that + # we always have a healthy worker in place that can talk to the producer. + self.delete_worker(worker, True) + else: + # Attach worker to service and mark as idle + if service_name in self.services: + service: Service | None = self.services.get(service_name) + else: + service = Service(service_name) + self.services[service_name] = service + if service is not None: + worker.service = service + logger.info(f"New worker: {worker}") + worker.syft_worker_id = UID(syft_worker_id) + self.worker_waiting(worker) + + elif ZMQCommand.W_HEARTBEAT == command: + if worker_ready: + # If worker is ready then reset expiry + # and add it to worker waiting list + # if not already present + self.worker_waiting(worker) + else: + logger.info(f"Got heartbeat, but worker not ready. {worker}") + self.delete_worker(worker, True) + elif ZMQCommand.W_DISCONNECT == command: + logger.info(f"Removing disconnected worker: {worker}") + self.delete_worker(worker, False) + else: + logger.error(f"Invalid command: {command!r}") + + def delete_worker(self, worker: Worker, disconnect: bool) -> None: + """Deletes worker from all data structures, and deletes worker.""" + if disconnect: + self.send_to_worker(worker, ZMQCommand.W_DISCONNECT) + + if worker.service and worker in worker.service.waiting: + worker.service.waiting.remove(worker) + + if worker in self.waiting: + self.waiting.remove(worker) + + self.workers.pop(worker.identity, None) + + if worker.syft_worker_id is not None: + self.update_consumer_state_for_worker( + worker.syft_worker_id, ConsumerState.DETACHED + ) + + @property + def alive(self) -> bool: + return not self.socket.closed diff --git a/packages/syft/src/syft/service/queue/zmq_queue.py b/packages/syft/src/syft/service/queue/zmq_queue.py deleted file mode 100644 index 0f42904356a..00000000000 --- a/packages/syft/src/syft/service/queue/zmq_queue.py +++ /dev/null @@ -1,962 +0,0 @@ -# stdlib -from binascii import hexlify -from collections import defaultdict -import itertools -import socketserver -import threading -import time -from time import sleep -from typing import Any - -# third party -from loguru import logger -from pydantic import field_validator -from zmq import Frame -from zmq import LINGER -from zmq.error import ContextTerminated -import zmq.green as zmq - -# relative -from ...serde.deserialize import _deserialize -from ...serde.serializable import serializable -from ...serde.serialize import _serialize as serialize -from ...service.action.action_object import ActionObject -from ...service.context import AuthedServiceContext -from ...types.base import SyftBaseModel -from ...types.syft_object import SYFT_OBJECT_VERSION_4 -from ...types.syft_object import SyftObject -from ...types.uid import UID -from ...util.util import get_queue_address -from ..response import SyftError -from ..response import SyftSuccess -from ..service import AbstractService -from ..worker.worker_pool import ConsumerState -from ..worker.worker_stash import WorkerStash -from .base_queue import AbstractMessageHandler -from .base_queue import QueueClient -from .base_queue import QueueClientConfig -from .base_queue import QueueConfig -from .base_queue import QueueConsumer -from .base_queue import QueueProducer -from .queue_stash import ActionQueueItem -from .queue_stash import QueueStash -from .queue_stash import Status - -# Producer/Consumer heartbeat interval (in seconds) -HEARTBEAT_INTERVAL_SEC = 2 - -# Thread join timeout (in seconds) -THREAD_TIMEOUT_SEC = 5 - -# Max duration (in ms) to wait for ZMQ poller to return -ZMQ_POLLER_TIMEOUT_MSEC = 1000 - -# Duration (in seconds) after which a worker without a heartbeat will be marked as expired -WORKER_TIMEOUT_SEC = 60 - -# Duration (in seconds) after which producer without a heartbeat will be marked as expired -PRODUCER_TIMEOUT_SEC = 60 - -# Lock for working on ZMQ socket -ZMQ_SOCKET_LOCK = threading.Lock() - - -class QueueMsgProtocol: - W_WORKER = b"MDPW01" - W_READY = b"0x01" - W_REQUEST = b"0x02" - W_REPLY = b"0x03" - W_HEARTBEAT = b"0x04" - W_DISCONNECT = b"0x05" - - -MAX_RECURSION_NESTED_ACTIONOBJECTS = 5 - - -class Timeout: - def __init__(self, offset_sec: float): - self.__offset = float(offset_sec) - self.__next_ts: float = 0.0 - - self.reset() - - @property - def next_ts(self) -> float: - return self.__next_ts - - def reset(self) -> None: - self.__next_ts = self.now() + self.__offset - - def has_expired(self) -> bool: - return self.now() >= self.__next_ts - - @staticmethod - def now() -> float: - return time.time() - - -class Service: - def __init__(self, name: str) -> None: - self.name = name - self.requests: list[bytes] = [] - self.waiting: list[Worker] = [] # List of waiting workers - - -class Worker(SyftBaseModel): - address: bytes - identity: bytes - service: Service | None = None - syft_worker_id: UID | None = None - expiry_t: Timeout = Timeout(WORKER_TIMEOUT_SEC) - - # TODO[pydantic]: We couldn't refactor the `validator`, please replace it by `field_validator` manually. - # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. - @field_validator("syft_worker_id", mode="before") - @classmethod - def set_syft_worker_id(cls, v: Any) -> Any: - if isinstance(v, str): - return UID(v) - return v - - def has_expired(self) -> bool: - return self.expiry_t.has_expired() - - def get_expiry(self) -> float: - return self.expiry_t.next_ts - - def reset_expiry(self) -> None: - self.expiry_t.reset() - - -@serializable() -class ZMQProducer(QueueProducer): - INTERNAL_SERVICE_PREFIX = b"mmi." - - def __init__( - self, - queue_name: str, - queue_stash: QueueStash, - worker_stash: WorkerStash, - port: int, - context: AuthedServiceContext, - ) -> None: - self.id = UID().short() - self.port = port - self.queue_stash = queue_stash - self.worker_stash = worker_stash - self.queue_name = queue_name - self.auth_context = context - self._stop = threading.Event() - self.post_init() - - @property - def address(self) -> str: - return get_queue_address(self.port) - - def post_init(self) -> None: - """Initialize producer state.""" - - self.services: dict[str, Service] = {} - self.workers: dict[bytes, Worker] = {} - self.waiting: list[Worker] = [] - self.heartbeat_t = Timeout(HEARTBEAT_INTERVAL_SEC) - self.context = zmq.Context(1) - self.socket = self.context.socket(zmq.ROUTER) - self.socket.setsockopt(LINGER, 1) - self.socket.setsockopt_string(zmq.IDENTITY, self.id) - self.poll_workers = zmq.Poller() - self.poll_workers.register(self.socket, zmq.POLLIN) - self.bind(f"tcp://*:{self.port}") - self.thread: threading.Thread | None = None - self.producer_thread: threading.Thread | None = None - - def close(self) -> None: - self._stop.set() - - try: - self.poll_workers.unregister(self.socket) - except Exception as e: - logger.exception("Failed to unregister poller. {}", e) - finally: - if self.thread: - self.thread.join(THREAD_TIMEOUT_SEC) - self.thread = None - - if self.producer_thread: - self.producer_thread.join(THREAD_TIMEOUT_SEC) - self.producer_thread = None - - self.socket.close() - self.context.destroy() - - self._stop.clear() - - @property - def action_service(self) -> AbstractService: - if self.auth_context.node is not None: - return self.auth_context.node.get_service("ActionService") - else: - raise Exception(f"{self.auth_context} does not have a node.") - - def contains_unresolved_action_objects(self, arg: Any, recursion: int = 0) -> bool: - """recursively check collections for unresolved action objects""" - if isinstance(arg, UID): - arg = self.action_service.get(self.auth_context, arg).ok() - return self.contains_unresolved_action_objects(arg, recursion=recursion + 1) - if isinstance(arg, ActionObject): - if not arg.syft_resolved: - res = self.action_service.get(self.auth_context, arg) - if res.is_err(): - return True - arg = res.ok() - if not arg.syft_resolved: - return True - arg = arg.syft_action_data - - try: - value = False - if isinstance(arg, list): - for elem in arg: - value = self.contains_unresolved_action_objects( - elem, recursion=recursion + 1 - ) - if value: - return True - if isinstance(arg, dict): - for elem in arg.values(): - value = self.contains_unresolved_action_objects( - elem, recursion=recursion + 1 - ) - if value: - return True - return value - except Exception as e: - logger.exception("Failed to resolve action objects. {}", e) - return True - - def unwrap_nested_actionobjects(self, data: Any) -> Any: - """recursively unwraps nested action objects""" - - if isinstance(data, list): - return [self.unwrap_nested_actionobjects(obj) for obj in data] - if isinstance(data, dict): - return { - key: self.unwrap_nested_actionobjects(obj) for key, obj in data.items() - } - if isinstance(data, ActionObject): - res = self.action_service.get(self.auth_context, data.id) - res = res.ok() if res.is_ok() else res.err() - if not isinstance(res, ActionObject): - return SyftError(message=f"{res}") - else: - nested_res = res.syft_action_data - if isinstance(nested_res, ActionObject): - nested_res.syft_node_location = res.syft_node_location - nested_res.syft_client_verify_key = res.syft_client_verify_key - return nested_res - return data - - def preprocess_action_arg(self, arg: Any) -> None: - res = self.action_service.get(context=self.auth_context, uid=arg) - if res.is_err(): - return arg - action_object = res.ok() - data = action_object.syft_action_data - new_data = self.unwrap_nested_actionobjects(data) - new_action_object = ActionObject.from_obj(new_data, id=action_object.id) - res = self.action_service.set( - context=self.auth_context, action_object=new_action_object - ) - - def read_items(self) -> None: - while True: - if self._stop.is_set(): - break - sleep(1) - - # Items to be queued - items_to_queue = self.queue_stash.get_by_status( - self.queue_stash.partition.root_verify_key, - status=Status.CREATED, - ).ok() - - items_to_queue = [] if items_to_queue is None else items_to_queue - - # Queue Items that are in the processing state - items_processing = self.queue_stash.get_by_status( - self.queue_stash.partition.root_verify_key, - status=Status.PROCESSING, - ).ok() - - items_processing = [] if items_processing is None else items_processing - - for item in itertools.chain(items_to_queue, items_processing): - if item.status == Status.CREATED: - if isinstance(item, ActionQueueItem): - action = item.kwargs["action"] - if self.contains_unresolved_action_objects( - action.args - ) or self.contains_unresolved_action_objects(action.kwargs): - continue - for arg in action.args: - self.preprocess_action_arg(arg) - for _, arg in action.kwargs.items(): - self.preprocess_action_arg(arg) - - msg_bytes = serialize(item, to_bytes=True) - worker_pool = item.worker_pool.resolve_with_context( - self.auth_context - ) - worker_pool = worker_pool.ok() - service_name = worker_pool.name - service: Service | None = self.services.get(service_name) - - # Skip adding message if corresponding service/pool - # is not registered. - if service is None: - continue - - # append request message to the corresponding service - # This list is processed in dispatch method. - - # TODO: Logic to evaluate the CAN RUN Condition - service.requests.append(msg_bytes) - item.status = Status.PROCESSING - res = self.queue_stash.update(item.syft_client_verify_key, item) - if res.is_err(): - logger.error( - "Failed to update queue item={} error={}", - item, - res.err(), - ) - elif item.status == Status.PROCESSING: - # Evaluate Retry condition here - # If job running and timeout or job status is KILL - # or heartbeat fails - # or container id doesn't exists, kill process or container - # else decrease retry count and mark status as CREATED. - pass - - def run(self) -> None: - self.thread = threading.Thread(target=self._run) - self.thread.start() - - self.producer_thread = threading.Thread(target=self.read_items) - self.producer_thread.start() - - def send(self, worker: bytes, message: bytes | list[bytes]) -> None: - worker_obj = self.require_worker(worker) - self.send_to_worker(worker=worker_obj, msg=message) - - def bind(self, endpoint: str) -> None: - """Bind producer to endpoint.""" - self.socket.bind(endpoint) - logger.info("Producer endpoint: {}", endpoint) - - def send_heartbeats(self) -> None: - """Send heartbeats to idle workers if it's time""" - if self.heartbeat_t.has_expired(): - for worker in self.waiting: - self.send_to_worker(worker, QueueMsgProtocol.W_HEARTBEAT, None, None) - self.heartbeat_t.reset() - - def purge_workers(self) -> None: - """Look for & kill expired workers. - - Workers are oldest to most recent, so we stop at the first alive worker. - """ - # work on a copy of the iterator - for worker in list(self.waiting): - if worker.has_expired(): - logger.info( - "Deleting expired Worker id={} uid={} expiry={} now={}", - worker.identity, - worker.syft_worker_id, - worker.get_expiry(), - Timeout.now(), - ) - self.delete_worker(worker, False) - - def update_consumer_state_for_worker( - self, syft_worker_id: UID, consumer_state: ConsumerState - ) -> None: - if self.worker_stash is None: - # TODO: fix the mypy issue - logger.error( # type: ignore[unreachable] - f"Worker stash is not defined for ZMQProducer : {self.queue_name} - {self.id}" - ) - return - - try: - res = self.worker_stash.update_consumer_state( - credentials=self.worker_stash.partition.root_verify_key, - worker_uid=syft_worker_id, - consumer_state=consumer_state, - ) - if res.is_err(): - logger.error( - "Failed to update consumer state for worker id={} error={}", - syft_worker_id, - res.err(), - ) - except Exception as e: - logger.error( - f"Failed to update consumer state for worker id: {syft_worker_id}. Error: {e}" - ) - - def worker_waiting(self, worker: Worker) -> None: - """This worker is now waiting for work.""" - # Queue to broker and service waiting lists - if worker not in self.waiting: - self.waiting.append(worker) - if worker.service is not None and worker not in worker.service.waiting: - worker.service.waiting.append(worker) - worker.reset_expiry() - self.update_consumer_state_for_worker(worker.syft_worker_id, ConsumerState.IDLE) - self.dispatch(worker.service, None) - - def dispatch(self, service: Service, msg: bytes) -> None: - """Dispatch requests to waiting workers as possible""" - if msg is not None: # Queue message if any - service.requests.append(msg) - - self.purge_workers() - while service.waiting and service.requests: - # One worker consuming only one message at a time. - msg = service.requests.pop(0) - worker = service.waiting.pop(0) - self.waiting.remove(worker) - self.send_to_worker(worker, QueueMsgProtocol.W_REQUEST, None, msg) - - def send_to_worker( - self, - worker: Worker, - command: bytes = QueueMsgProtocol.W_REQUEST, - option: bytes | None = None, - msg: bytes | list | None = None, - ) -> None: - """Send message to worker. - - If message is provided, sends that message. - """ - - if self.socket.closed: - logger.warning("Socket is closed. Cannot send message.") - return - - if msg is None: - msg = [] - elif not isinstance(msg, list): - msg = [msg] - - # Stack routing and protocol envelopes to start of message - # and routing envelope - if option is not None: - msg = [option] + msg - msg = [worker.address, b"", QueueMsgProtocol.W_WORKER, command] + msg - - logger.debug("Send: {}", msg) - with ZMQ_SOCKET_LOCK: - try: - self.socket.send_multipart(msg) - except zmq.ZMQError as e: - logger.error("Failed to send message to producer. {}", e) - - def _run(self) -> None: - while True: - if self._stop.is_set(): - return - - for _, service in self.services.items(): - self.dispatch(service, None) - - items = None - - try: - items = self.poll_workers.poll(ZMQ_POLLER_TIMEOUT_MSEC) - except Exception as e: - logger.exception("Failed to poll items: {}", e) - - if items: - msg = self.socket.recv_multipart() - - logger.debug("Recieve: {}", msg) - - address = msg.pop(0) - empty = msg.pop(0) # noqa: F841 - header = msg.pop(0) - - if header == QueueMsgProtocol.W_WORKER: - self.process_worker(address, msg) - else: - logger.error("Invalid message header: {}", header) - - self.send_heartbeats() - self.purge_workers() - - def require_worker(self, address: bytes) -> Worker: - """Finds the worker (creates if necessary).""" - identity = hexlify(address) - worker = self.workers.get(identity) - if worker is None: - worker = Worker(identity=identity, address=address) - self.workers[identity] = worker - return worker - - def process_worker(self, address: bytes, msg: list[bytes]) -> None: - command = msg.pop(0) - - worker_ready = hexlify(address) in self.workers - - worker = self.require_worker(address) - - if QueueMsgProtocol.W_READY == command: - service_name = msg.pop(0).decode() - syft_worker_id = msg.pop(0).decode() - if worker_ready: - # Not first command in session or Reserved service name - # If worker was already present, then we disconnect it first - # and wait for it to re-register itself to the producer. This ensures that - # we always have a healthy worker in place that can talk to the producer. - self.delete_worker(worker, True) - else: - # Attach worker to service and mark as idle - if service_name in self.services: - service: Service | None = self.services.get(service_name) - else: - service = Service(service_name) - self.services[service_name] = service - if service is not None: - worker.service = service - logger.info( - "New Worker service={}, id={}, uid={}", - service.name, - worker.identity, - worker.syft_worker_id, - ) - else: - logger.info( - "New Worker service=None, id={}, uid={}", - worker.identity, - worker.syft_worker_id, - ) - worker.syft_worker_id = UID(syft_worker_id) - self.worker_waiting(worker) - - elif QueueMsgProtocol.W_HEARTBEAT == command: - if worker_ready: - # If worker is ready then reset expiry - # and add it to worker waiting list - # if not already present - self.worker_waiting(worker) - else: - # extract the syft worker id and worker pool name from the message - # Get the corresponding worker pool and worker - # update the status to be unhealthy - self.delete_worker(worker, True) - elif QueueMsgProtocol.W_DISCONNECT == command: - self.delete_worker(worker, False) - else: - logger.error("Invalid command: {}", command) - - def delete_worker(self, worker: Worker, disconnect: bool) -> None: - """Deletes worker from all data structures, and deletes worker.""" - if disconnect: - self.send_to_worker(worker, QueueMsgProtocol.W_DISCONNECT, None, None) - - if worker.service and worker in worker.service.waiting: - worker.service.waiting.remove(worker) - - if worker in self.waiting: - self.waiting.remove(worker) - - self.workers.pop(worker.identity, None) - - self.update_consumer_state_for_worker( - worker.syft_worker_id, ConsumerState.DETACHED - ) - - @property - def alive(self) -> bool: - return not self.socket.closed - - -@serializable(attrs=["_subscriber"]) -class ZMQConsumer(QueueConsumer): - def __init__( - self, - message_handler: AbstractMessageHandler, - address: str, - queue_name: str, - service_name: str, - syft_worker_id: UID | None = None, - worker_stash: WorkerStash | None = None, - verbose: bool = False, - ) -> None: - self.address = address - self.message_handler = message_handler - self.service_name = service_name - self.queue_name = queue_name - self.context = zmq.Context() - self.poller = zmq.Poller() - self.socket = None - self.verbose = verbose - self.id = UID().short() - self._stop = threading.Event() - self.syft_worker_id = syft_worker_id - self.worker_stash = worker_stash - self.post_init() - - def reconnect_to_producer(self) -> None: - """Connect or reconnect to producer""" - if self.socket: - self.poller.unregister(self.socket) # type: ignore[unreachable] - self.socket.close() - self.socket = self.context.socket(zmq.DEALER) - self.socket.linger = 0 - self.socket.setsockopt_string(zmq.IDENTITY, self.id) - self.socket.connect(self.address) - self.poller.register(self.socket, zmq.POLLIN) - - logger.info("Connecting Worker id={} to broker addr={}", self.id, self.address) - - # Register queue with the producer - self.send_to_producer( - QueueMsgProtocol.W_READY, - self.service_name.encode(), - [str(self.syft_worker_id).encode()], - ) - - def post_init(self) -> None: - self.thread: threading.Thread | None = None - self.heartbeat_t = Timeout(HEARTBEAT_INTERVAL_SEC) - self.producer_ping_t = Timeout(PRODUCER_TIMEOUT_SEC) - self.reconnect_to_producer() - - def close(self) -> None: - self._stop.set() - try: - self.poller.unregister(self.socket) - except Exception as e: - logger.exception("Failed to unregister worker. {}", e) - finally: - if self.thread is not None: - self.thread.join(timeout=THREAD_TIMEOUT_SEC) - self.thread = None - self.socket.close() - self.context.destroy() - self._stop.clear() - - def send_to_producer( - self, - command: str, - option: bytes | None = None, - msg: bytes | list | None = None, - ) -> None: - """Send message to producer. - - If no msg is provided, creates one internally - """ - if self.socket.closed: - logger.warning("Socket is closed. Cannot send message.") - return - - if msg is None: - msg = [] - elif not isinstance(msg, list): - msg = [msg] - - if option: - msg = [option] + msg - - msg = [b"", QueueMsgProtocol.W_WORKER, command] + msg - logger.debug("Send: msg={}", msg) - - with ZMQ_SOCKET_LOCK: - try: - self.socket.send_multipart(msg) - except zmq.ZMQError as e: - logger.error("Failed to send message to producer. {}", e) - - def _run(self) -> None: - """Send reply, if any, to producer and wait for next request.""" - try: - while True: - if self._stop.is_set(): - return - - try: - items = self.poller.poll(ZMQ_POLLER_TIMEOUT_MSEC) - except ContextTerminated: - logger.info("Context terminated") - return - except Exception as e: - logger.error("Poll error={}", e) - continue - - if items: - # Message format: - # [b"", "
    ", "", "", ""] - msg = self.socket.recv_multipart() - - logger.debug("Recieve: {}", msg) - - # mark as alive - self.set_producer_alive() - - if len(msg) < 3: - logger.error("Invalid message: {}", msg) - continue - - empty = msg.pop(0) # noqa: F841 - header = msg.pop(0) # noqa: F841 - - command = msg.pop(0) - - if command == QueueMsgProtocol.W_REQUEST: - # Call Message Handler - try: - message = msg.pop() - self.associate_job(message) - self.message_handler.handle_message( - message=message, - syft_worker_id=self.syft_worker_id, - ) - except Exception as e: - logger.exception("Error while handling message. {}", e) - finally: - self.clear_job() - elif command == QueueMsgProtocol.W_HEARTBEAT: - self.set_producer_alive() - elif command == QueueMsgProtocol.W_DISCONNECT: - self.reconnect_to_producer() - else: - logger.error("Invalid command: {}", command) - else: - if not self.is_producer_alive(): - logger.info("Producer check-alive timed out. Reconnecting.") - self.reconnect_to_producer() - self.set_producer_alive() - - self.send_heartbeat() - - except zmq.ZMQError as e: - if e.errno == zmq.ETERM: - logger.info("Consumer connection terminated") - else: - logger.exception("Consumer error. {}", e) - raise e - - logger.info("Worker finished") - - def set_producer_alive(self) -> None: - self.producer_ping_t.reset() - - def is_producer_alive(self) -> bool: - # producer timer is within timeout - return not self.producer_ping_t.has_expired() - - def send_heartbeat(self) -> None: - if self.heartbeat_t.has_expired() and self.is_producer_alive(): - self.send_to_producer(QueueMsgProtocol.W_HEARTBEAT) - self.heartbeat_t.reset() - - def run(self) -> None: - self.thread = threading.Thread(target=self._run) - self.thread.start() - - def associate_job(self, message: Frame) -> None: - try: - queue_item = _deserialize(message, from_bytes=True) - self._set_worker_job(queue_item.job_id) - except Exception as e: - logger.exception("Could not associate job. {}", e) - - def clear_job(self) -> None: - self._set_worker_job(None) - - def _set_worker_job(self, job_id: UID | None) -> None: - if self.worker_stash is not None: - consumer_state = ( - ConsumerState.IDLE if job_id is None else ConsumerState.CONSUMING - ) - res = self.worker_stash.update_consumer_state( - credentials=self.worker_stash.partition.root_verify_key, - worker_uid=self.syft_worker_id, - consumer_state=consumer_state, - ) - if res.is_err(): - logger.error( - f"Failed to update consumer state for {self.service_name}-{self.id}, error={res.err()}" - ) - - @property - def alive(self) -> bool: - return not self.socket.closed and self.is_producer_alive() - - -@serializable() -class ZMQClientConfig(SyftObject, QueueClientConfig): - __canonical_name__ = "ZMQClientConfig" - __version__ = SYFT_OBJECT_VERSION_4 - - id: UID | None = None # type: ignore[assignment] - hostname: str = "127.0.0.1" - queue_port: int | None = None - # TODO: setting this to false until we can fix the ZMQ - # port issue causing tests to randomly fail - create_producer: bool = False - n_consumers: int = 0 - consumer_service: str | None = None - - -@serializable(attrs=["host"]) -class ZMQClient(QueueClient): - """ZMQ Client for creating producers and consumers.""" - - producers: dict[str, ZMQProducer] - consumers: defaultdict[str, list[ZMQConsumer]] - - def __init__(self, config: ZMQClientConfig) -> None: - self.host = config.hostname - self.producers = {} - self.consumers = defaultdict(list) - self.config = config - - @staticmethod - def _get_free_tcp_port(host: str) -> int: - with socketserver.TCPServer((host, 0), None) as s: - free_port = s.server_address[1] - - return free_port - - def add_producer( - self, - queue_name: str, - port: int | None = None, - queue_stash: QueueStash | None = None, - worker_stash: WorkerStash | None = None, - context: AuthedServiceContext | None = None, - ) -> ZMQProducer: - """Add a producer of a queue. - - A queue can have at most one producer attached to it. - """ - - if port is None: - if self.config.queue_port is None: - self.config.queue_port = self._get_free_tcp_port(self.host) - port = self.config.queue_port - else: - port = self.config.queue_port - - producer = ZMQProducer( - queue_name=queue_name, - queue_stash=queue_stash, - port=port, - context=context, - worker_stash=worker_stash, - ) - self.producers[queue_name] = producer - return producer - - def add_consumer( - self, - queue_name: str, - message_handler: AbstractMessageHandler, - service_name: str, - address: str | None = None, - worker_stash: WorkerStash | None = None, - syft_worker_id: UID | None = None, - ) -> ZMQConsumer: - """Add a consumer to a queue - - A queue should have at least one producer attached to the group. - - """ - - if address is None: - address = get_queue_address(self.config.queue_port) - - consumer = ZMQConsumer( - queue_name=queue_name, - message_handler=message_handler, - address=address, - service_name=service_name, - syft_worker_id=syft_worker_id, - worker_stash=worker_stash, - ) - self.consumers[queue_name].append(consumer) - - return consumer - - def send_message( - self, - message: bytes, - queue_name: str, - worker: bytes | None = None, - ) -> SyftSuccess | SyftError: - producer = self.producers.get(queue_name) - if producer is None: - return SyftError( - message=f"No producer attached for queue: {queue_name}. Please add a producer for it." - ) - try: - producer.send(message=message, worker=worker) - except Exception as e: - # stdlib - return SyftError( - message=f"Failed to send message to: {queue_name} with error: {e}" - ) - return SyftSuccess( - message=f"Successfully queued message to : {queue_name}", - ) - - def close(self) -> SyftError | SyftSuccess: - try: - for _, consumers in self.consumers.items(): - for consumer in consumers: - # make sure look is stopped - consumer.close() - - for _, producer in self.producers.items(): - # make sure loop is stopped - producer.close() - # close existing connection. - except Exception as e: - return SyftError(message=f"Failed to close connection: {e}") - - return SyftSuccess(message="All connections closed.") - - def purge_queue(self, queue_name: str) -> SyftError | SyftSuccess: - if queue_name not in self.producers: - return SyftError(message=f"No producer running for : {queue_name}") - - producer = self.producers[queue_name] - - # close existing connection. - producer.close() - - # add a new connection - self.add_producer(queue_name=queue_name, address=producer.address) # type: ignore - - return SyftSuccess(message=f"Queue: {queue_name} successfully purged") - - def purge_all(self) -> SyftError | SyftSuccess: - for queue_name in self.producers: - self.purge_queue(queue_name=queue_name) - - return SyftSuccess(message="Successfully purged all queues.") - - -@serializable() -class ZMQQueueConfig(QueueConfig): - def __init__( - self, - client_type: type[ZMQClient] | None = None, - client_config: ZMQClientConfig | None = None, - thread_workers: bool = False, - ): - self.client_type = client_type or ZMQClient - self.client_config: ZMQClientConfig = client_config or ZMQClientConfig() - self.thread_workers = thread_workers diff --git a/packages/syft/src/syft/service/request/request.py b/packages/syft/src/syft/service/request/request.py index fea214c4904..ab07c7380f1 100644 --- a/packages/syft/src/syft/service/request/request.py +++ b/packages/syft/src/syft/service/request/request.py @@ -3,48 +3,46 @@ from enum import Enum import hashlib import inspect +import logging from typing import Any -from typing import cast # third party -from result import Err -from result import Ok -from result import Result +from pydantic import model_validator from typing_extensions import Self # relative -from ...abstract_node import AbstractNode -from ...abstract_node import NodeSideType +from ...abstract_server import ServerSideType from ...client.api import APIRegistry from ...client.client import SyftClient +from ...custom_worker.config import DockerWorkerConfig from ...custom_worker.config import WorkerConfig from ...custom_worker.k8s import IN_KUBERNETES -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable from ...serde.serialize import _serialize +from ...server.credentials import SyftVerifyKey from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.errors import SyftException +from ...types.result import Err +from ...types.result import as_result +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject from ...types.syncable_object import SyncableSyftObject from ...types.transforms import TransformContext -from ...types.transforms import add_node_uid_for_key +from ...types.transforms import add_server_uid_for_key from ...types.transforms import generate_id from ...types.transforms import transform from ...types.twin_object import TwinObject from ...types.uid import LineageID from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE +from ...util.decorators import deprecated from ...util.markdown import markdown_as_class_with_fields -from ...util.notebook_ui.notebook_addons import REQUEST_ICON +from ...util.notebook_ui.icons import Icon from ...util.util import prompt_warning_message from ..action.action_object import ActionObject -from ..action.action_service import ActionService -from ..action.action_store import ActionObjectPermission -from ..action.action_store import ActionPermission -from ..blob_storage.service import BlobStorageService +from ..action.action_permissions import ActionObjectPermission +from ..action.action_permissions import ActionPermission +from ..code.user_code import ApprovalDecision from ..code.user_code import UserCode from ..code.user_code import UserCodeStatus from ..code.user_code import UserCodeStatusCollection @@ -54,22 +52,36 @@ from ..job.job_stash import JobInfo from ..job.job_stash import JobStatus from ..notification.notifications import Notification +from ..policy.policy import UserPolicy from ..response import SyftError from ..response import SyftSuccess from ..user.user import UserView +logger = logging.getLogger(__name__) -@serializable() + +@serializable(canonical_name="RequestStatus", version=1) class RequestStatus(Enum): PENDING = 0 REJECTED = 1 APPROVED = 2 + @classmethod + def from_usercode_status( + cls, status: UserCodeStatusCollection, context: AuthedServiceContext + ) -> "RequestStatus": + if status.get_is_approved(context): + return RequestStatus.APPROVED + elif status.denied: + return RequestStatus.REJECTED + else: + return RequestStatus.PENDING + @serializable() class Change(SyftObject): __canonical_name__ = "Change" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 linked_obj: LinkedObject | None = None @@ -80,7 +92,7 @@ def change_object_is_type(self, type_: type) -> bool: @serializable() class ChangeStatus(SyftObject): __canonical_name__ = "ChangeStatus" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore[assignment] change_id: UID @@ -94,177 +106,170 @@ def from_change(cls, change: Change, applied: bool) -> Self: @serializable() class ActionStoreChange(Change): __canonical_name__ = "ActionStoreChange" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 linked_obj: LinkedObject apply_permission_type: ActionPermission __repr_attrs__ = ["linked_obj", "apply_permission_type"] - def _run( - self, context: ChangeContext, apply: bool - ) -> Result[SyftSuccess, SyftError]: - try: - if context.node is None: - return Err(SyftError(message=f"context {context}'s node is None")) - action_service: ActionService = context.node.get_service(ActionService) # type: ignore[assignment] - blob_storage_service = context.node.get_service(BlobStorageService) - action_store = action_service.store - - # can we ever have a lineage ID in the store? - obj_uid = self.linked_obj.object_uid - obj_uid = obj_uid.id if isinstance(obj_uid, LineageID) else obj_uid - - action_obj = action_store.get( - uid=obj_uid, - credentials=context.approving_user_credentials, - ) + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: + action_store = context.server.services.action.stash - if action_obj.is_err(): - return Err(SyftError(message=f"{action_obj.err()}")) + # can we ever have a lineage ID in the store? + obj_uid = self.linked_obj.object_uid + obj_uid = obj_uid.id if isinstance(obj_uid, LineageID) else obj_uid - action_obj = action_obj.ok() + action_obj = action_store.get( + uid=obj_uid, + credentials=context.approving_user_credentials, + ).unwrap() - owner_permission = ActionObjectPermission( - uid=obj_uid, - credentials=context.approving_user_credentials, + owner_permission = ActionObjectPermission( + uid=obj_uid, + credentials=context.approving_user_credentials, + permission=self.apply_permission_type, + ) + + if action_store.has_permission(permission=owner_permission): + id_action = ( + action_obj.id + if not isinstance(action_obj.id, LineageID) + else action_obj.id.id + ) + + requesting_permission_action_obj = ActionObjectPermission( + uid=id_action, + credentials=context.requesting_user_credentials, permission=self.apply_permission_type, ) - if action_store.has_permission(permission=owner_permission): - id_action = ( - action_obj.id - if not isinstance(action_obj.id, LineageID) - else action_obj.id.id - ) - requesting_permission_action_obj = ActionObjectPermission( - uid=id_action, - credentials=context.requesting_user_credentials, - permission=self.apply_permission_type, - ) - if isinstance(action_obj, TwinObject): - uid_blob = action_obj.private.syft_blob_storage_entry_id - else: - uid_blob = action_obj.syft_blob_storage_entry_id - requesting_permission_blob_obj = ActionObjectPermission( + + if isinstance(action_obj, TwinObject): + uid_blob = action_obj.private.syft_blob_storage_entry_id + else: + uid_blob = action_obj.syft_blob_storage_entry_id # type: ignore[unreachable] + + requesting_permission_blob_obj = ( + ActionObjectPermission( uid=uid_blob, credentials=context.requesting_user_credentials, permission=self.apply_permission_type, ) - if apply: - print( - "ADDING PERMISSION", requesting_permission_action_obj, id_action + if uid_blob + else None + ) + + if apply: + logger.debug( + "ADDING PERMISSION", requesting_permission_action_obj, id_action + ) + action_store.add_permission(requesting_permission_action_obj) + ( + context.server.services.blob_storage.stash.add_permission( + requesting_permission_blob_obj ) - action_store.add_permission(requesting_permission_action_obj) - blob_storage_service.stash.add_permission( + if requesting_permission_blob_obj + else None + ) + else: + if action_store.has_permission(requesting_permission_action_obj): + action_store.remove_permission(requesting_permission_action_obj) + if ( + requesting_permission_blob_obj + and context.server.services.blob_storage.stash.has_permission( requesting_permission_blob_obj ) - else: - if action_store.has_permission(requesting_permission_action_obj): - action_store.remove_permission(requesting_permission_action_obj) - if blob_storage_service.stash.has_permission( + ): + context.server.services.blob_storage.stash.remove_permission( requesting_permission_blob_obj - ): - blob_storage_service.stash.remove_permission( - requesting_permission_blob_obj - ) - else: - return Err( - SyftError( - message=f"No permission for approving_user_credentials {context.approving_user_credentials}" ) - ) - return Ok(SyftSuccess(message=f"{type(self)} Success")) - except Exception as e: - print(f"failed to apply {type(self)}", e) - return Err(SyftError(message=str(e))) + else: + raise SyftException( + public_message=f"No permission for approving_user_credentials {context.approving_user_credentials}" + ) + + return SyftSuccess(message=f"{type(self)} Success") - def apply(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=True) + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=True).unwrap() - def undo(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=False) + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=False).unwrap() def __repr_syft_nested__(self) -> str: return f"Apply {self.apply_permission_type} to \ - {self.linked_obj.object_type.__canonical_name__}:{self.linked_obj.object_uid.short()}" +{self.linked_obj.object_type.__canonical_name__}:{self.linked_obj.object_uid.short()}." @serializable() class CreateCustomImageChange(Change): __canonical_name__ = "CreateCustomImageChange" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 config: WorkerConfig - tag: str + tag: str | None = None registry_uid: UID | None = None pull_image: bool = True __repr_attrs__ = ["config", "tag"] - def _run( - self, context: ChangeContext, apply: bool - ) -> Result[SyftSuccess, SyftError]: - try: - if context.node is None: - return Err(SyftError(message=f"context {context}'s node is None")) - - worker_image_service = context.node.get_service("SyftWorkerImageService") - - service_context = context.to_service_ctx() - result = worker_image_service.submit_dockerfile( - service_context, docker_config=self.config - ) - - if isinstance(result, SyftError): - return Err(result) + @model_validator(mode="after") + def _tag_required_for_dockerworkerconfig(self) -> Self: + if isinstance(self.config, DockerWorkerConfig) and self.tag is None: + raise ValueError("`tag` is required for `DockerWorkerConfig`.") + return self + + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: + service_context = context.to_service_ctx() + context.server.services.syft_worker_image.submit( + service_context, worker_config=self.config + ) - result = worker_image_service.stash.get_by_docker_config( + worker_image = ( + context.server.services.syft_worker_image.stash.get_by_worker_config( service_context.credentials, config=self.config - ) - - if result.is_err(): - return Err(SyftError(message=f"{result.err()}")) + ).unwrap() + ) + if worker_image is None: + raise SyftException(public_message="The worker image does not exist.") - worker_image = result.ok() + build_success_message = "Image was pre-built." - build_result = worker_image_service.build( + if not worker_image.is_prebuilt: + build_result = context.server.services.syft_worker_image.build( service_context, image_uid=worker_image.id, tag=self.tag, registry_uid=self.registry_uid, - pull=self.pull_image, + pull_image=self.pull_image, ) + build_success_message = build_result.message - if isinstance(build_result, SyftError): - return Err(build_result) - - if IN_KUBERNETES: - push_result = worker_image_service.push( - service_context, - image=worker_image.id, - username=context.extra_kwargs.get("reg_username", None), - password=context.extra_kwargs.get("reg_password", None), - ) - - if isinstance(push_result, SyftError): - return Err(push_result) - - return Ok( - SyftSuccess( - message=f"Build Result: {build_result.message} \n Push Result: {push_result.message}" - ) - ) - - return Ok(build_result) + build_success = f"Build result: {build_success_message}" + if IN_KUBERNETES and not worker_image.is_prebuilt: + push_result = context.server.services.syft_worker_image.push( + service_context, + image_uid=worker_image.id, + username=context.extra_kwargs.get("registry_username", None), + password=context.extra_kwargs.get("registry_password", None), + ) + return SyftSuccess( + message=f"{build_success}\nPush result: {push_result.message}" + ) - except Exception as e: - return Err(SyftError(message=f"Failed to create/build image: {e}")) + return SyftSuccess(message=build_success) - def apply(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=True) + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=True).unwrap() - def undo(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=False) + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=False).unwrap() def __repr_syft_nested__(self) -> str: return f"Create Image for Config: {self.config} with tag: {self.tag}" @@ -273,64 +278,56 @@ def __repr_syft_nested__(self) -> str: @serializable() class CreateCustomWorkerPoolChange(Change): __canonical_name__ = "CreateCustomWorkerPoolChange" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 pool_name: str num_workers: int image_uid: UID | None = None config: WorkerConfig | None = None + pod_annotations: dict[str, str] | None = None + pod_labels: dict[str, str] | None = None __repr_attrs__ = ["pool_name", "num_workers", "image_uid"] - def _run( - self, context: ChangeContext, apply: bool - ) -> Result[SyftSuccess, SyftError]: + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: """ This function is run when the DO approves (apply=True) or deny (apply=False) the request. """ - # TODO: refactor the returned Err(SyftError) or Ok(SyftSuccess) to just - # SyftError or SyftSuccess if apply: # get the worker pool service and try to launch a pool - if context.node is None: - return Err(SyftError(message=f"context {context}'s node is None")) - worker_pool_service = context.node.get_service("SyftWorkerPoolService") service_context: AuthedServiceContext = context.to_service_ctx() if self.config is not None: - result = worker_pool_service.image_stash.get_by_docker_config( + worker_image = context.server.services.syft_worker_pool.image_stash.get_by_worker_config( service_context.credentials, self.config - ) - if result.is_err(): - return Err(SyftError(message=f"{result.err()}")) - worker_image = result.ok() + ).unwrap() self.image_uid = worker_image.id - result = worker_pool_service.launch( + result = context.server.services.syft_worker_pool.launch( context=service_context, - name=self.pool_name, + pool_name=self.pool_name, image_uid=self.image_uid, num_workers=self.num_workers, - reg_username=context.extra_kwargs.get("reg_username", None), - reg_password=context.extra_kwargs.get("reg_password", None), + registry_username=context.extra_kwargs.get("registry_username", None), + registry_password=context.extra_kwargs.get("registry_password", None), + pod_annotations=self.pod_annotations, + pod_labels=self.pod_labels, ) - if isinstance(result, SyftError): - return Err(result) - else: - return Ok(result) + return SyftSuccess(message="Worker successfully launched", value=result) else: - return Err( - SyftError( - message=f"Request to create a worker pool with name {self.name} denied" - ) + raise SyftException( + public_message=f"Request to create a worker pool with name {self.name} denied" ) - def apply(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=True) + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=True).unwrap() - def undo(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=False) + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=False).unwrap() def __repr_syft_nested__(self) -> str: return ( @@ -341,7 +338,7 @@ def __repr_syft_nested__(self) -> str: @serializable() class Request(SyncableSyftObject): __canonical_name__ = "Request" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 requesting_user_verify_key: SyftVerifyKey requesting_user_name: str = "" @@ -350,14 +347,25 @@ class Request(SyncableSyftObject): approving_user_verify_key: SyftVerifyKey | None = None request_time: DateTime updated_at: DateTime | None = None - node_uid: UID + server_uid: UID request_hash: str changes: list[Change] history: list[ChangeStatus] = [] + tags: list[str] = [] + + __table_coll_widths__ = [ + "min-content", + "auto", + "auto", + "auto", + "auto", + "auto", + ] __attr_searchable__ = [ "requesting_user_verify_key", "approving_user_verify_key", + "code_id", ] __attr_unique__ = ["request_hash"] __repr_attrs__ = [ @@ -367,6 +375,8 @@ class Request(SyncableSyftObject): "changes", "requesting_user_verify_key", ] + __exclude_sync_diff_attrs__ = ["server_uid", "changes", "history"] + __table_sort_attr__ = "Request time" def _repr_html_(self) -> Any: # add changes @@ -385,25 +395,28 @@ def _repr_html_(self) -> Any: str_change = f"{str_change}. " str_changes_.append(str_change) str_changes = "\n".join(str_changes_) - api = APIRegistry.api_for( - self.node_uid, - self.syft_client_verify_key, - ) + + api = self.get_api_wrapped() shared_with_line = "" if self.code and len(self.code.output_readers) > 0: # owner_names = ["canada", "US"] owners_string = " and ".join( - [f"{x}" for x in self.code.output_reader_names] + [f"{x}" for x in self.code.output_reader_names] # type: ignore ) shared_with_line += ( f"

    Custom Policy: " f"outputs are shared with the owners of {owners_string} once computed" ) - if api is not None: + server_info = "" + if api.is_ok(): + api = api.unwrap() metadata = api.services.metadata.get_metadata() - node_name = api.node_name.capitalize() if api.node_name is not None else "" - node_type = metadata.node_type.value.capitalize() + server_name = ( + api.server_name.capitalize() if api.server_name is not None else "" + ) + server_type = metadata.server_type.value.capitalize() + server_info = f"

    Requested on: {server_name} of type {server_type}

    " email_str = ( f"({self.requesting_user_email})" if self.requesting_user_email else "" @@ -415,9 +428,6 @@ def _repr_html_(self) -> Any: ) return f""" -

    Request

    Id: {self.id}

    @@ -425,15 +435,48 @@ def _repr_html_(self) -> Any: {updated_at_line} {shared_with_line}

    Status: {self.status}

    -

    Requested on: {node_name} of type \ - {node_type}

    + {server_info}

    Requested by: {self.requesting_user_name} {email_str} {institution_str}

    Changes: {str_changes}

    """ + @property + def html_description(self) -> str: + desc = " ".join([x.__repr_syft_nested__() for x in self.changes]) + # desc = desc.replace('\n', '') + # desc = desc.replace('
    ', '\n') + desc = desc.replace(". ", ".\n\n") + desc = desc.replace("", "") + desc = desc.replace("", "") + desc = desc.replace("", "") + desc = desc.replace("", "") + + return desc + + @property + def deny_reason(self) -> str | SyftError: + code = self.code + if isinstance(code, SyftError): + return code + + code_status: UserCodeStatusCollection = code.status_link.resolve + return code_status.first_denial_reason + + @as_result(SyftException) + def get_deny_reason(self, context: AuthedServiceContext) -> str | None: + code = self.get_user_code(context).unwrap() + if code is None: + return None + + code_status = code.get_status(context).unwrap() + return code_status.first_denial_reason + def _coll_repr_(self) -> dict[str, str | dict[str, str]]: + # relative + from ...util.notebook_ui.components.sync import Badge + if self.status == RequestStatus.APPROVED: badge_color = "badge-green" elif self.status == RequestStatus.PENDING: @@ -441,7 +484,18 @@ def _coll_repr_(self) -> dict[str, str | dict[str, str]]: else: badge_color = "badge-red" - status_badge = {"value": self.status.name.capitalize(), "type": badge_color} + status_badge = Badge( + value=self.status.name.capitalize(), + badge_class=badge_color, + ).to_html() + + if self.status == RequestStatus.REJECTED: + deny_reason = self.deny_reason + if isinstance(deny_reason, str) and len(deny_reason) > 0: + status_badge += ( + "
    " + f"Deny Reason: {deny_reason}" + ) user_data = [ self.requesting_user_name, @@ -450,8 +504,9 @@ def _coll_repr_(self) -> dict[str, str | dict[str, str]]: ] return { - "Description": " ".join([x.__repr_syft_nested__() for x in self.changes]), + "Description": self.html_description, "Requested By": "\n".join(user_data), + "Creation Time": str(self.request_time), "Status": status_badge, } @@ -460,8 +515,17 @@ def code_id(self) -> UID: for change in self.changes: if isinstance(change, UserCodeStatusChange): return change.linked_user_code.object_uid - return SyftError( - message="This type of request does not have code associated with it." + raise SyftException( + public_message="This type of request does not have code associated with it." + ) + + @property + def status_id(self) -> UID: + for change in self.changes: + if isinstance(change, UserCodeStatusChange): + return change.linked_obj.object_uid # type: ignore + raise SyftException( + public_message="This type of request does not have code associated with it." ) @property @@ -473,8 +537,15 @@ def codes(self) -> Any: message="This type of request does not have code associated with it." ) + @as_result(SyftException) + def get_user_code(self, context: AuthedServiceContext) -> UserCode | None: + for change in self.changes: + if isinstance(change, UserCodeStatusChange): + return change.get_user_code(context).unwrap() + return None + @property - def code(self) -> Any: + def code(self) -> UserCode | SyftError: for change in self.changes: if isinstance(change, UserCodeStatusChange): return change.code @@ -482,9 +553,6 @@ def code(self) -> Any: message="This type of request does not have code associated with it." ) - def get_results(self) -> Any: - return self.code.get_results() - @property def current_change_state(self) -> dict[UID, bool]: change_applied_map = {} @@ -496,10 +564,28 @@ def current_change_state(self) -> dict[UID, bool]: @property def icon(self) -> str: - return REQUEST_ICON + return Icon.REQUEST.svg - @property - def status(self) -> RequestStatus: + def get_status(self, context: AuthedServiceContext | None = None) -> RequestStatus: + # TODO fix + try: + # this is breaking in l2 coming from sending a request email to admin + is_l0_deployment = ( + self.get_is_l0_deployment(context) if context else self.is_l0_deployment + ) + if is_l0_deployment: + code_status = ( + self.code.get_status(context).unwrap() + if context + else self.code.status + ) + return RequestStatus.from_usercode_status(code_status, context) + except Exception: # nosec + # this breaks when coming from a user submitting a request + # which tries to send an email to the admin and ends up here + pass # lets keep going + + self.refresh() if len(self.history) == 0: return RequestStatus.PENDING @@ -513,95 +599,123 @@ def status(self) -> RequestStatus: return request_status + @property + def status(self) -> RequestStatus: + return self.get_status() + def approve( self, disable_warnings: bool = False, approve_nested: bool = False, **kwargs: dict, - ) -> Result[SyftSuccess, SyftError]: - api = APIRegistry.api_for( - self.node_uid, - self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"api is None. You must login to {self.node_uid}") + ) -> SyftSuccess: + api = self._get_api() + + if self.is_l0_deployment: + raise SyftException( + public_message="This request is a low-side request. Please sync your results to approve." + ) # TODO: Refactor so that object can also be passed to generate warnings if api.connection: - metadata = api.connection.get_node_metadata(api.signing_key) + metadata = api.connection.get_server_metadata(api.signing_key) else: metadata = None - message, is_enclave = None, False + message = None is_code_request = not isinstance(self.codes, SyftError) if is_code_request and len(self.codes) > 1 and not approve_nested: - return SyftError( - message="Multiple codes detected, please use approve_nested=True" + raise SyftException( + public_message="Multiple codes detected, please use approve_nested=True" ) - if self.code and not isinstance(self.code, SyftError): - is_enclave = getattr(self.code, "enclave_metadata", None) is not None - - if is_enclave: - message = "On approval, the result will be released to the enclave." - elif metadata and metadata.node_side_type == NodeSideType.HIGH_SIDE.value: + if metadata and metadata.server_side_type == ServerSideType.HIGH_SIDE.value: message = ( "You're approving a request on " - f"{metadata.node_side_type} side {metadata.node_type} " + f"{metadata.server_side_type} side {metadata.server_type} " "which may host datasets with private information." ) if message and metadata and metadata.show_warnings and not disable_warnings: prompt_warning_message(message=message, confirm=True) + msg = ( + "Approving request ", + f"on change {self.code.service_func_name} " if is_code_request else "", + f"for datasite {api.server_name}", + ) - print(f"Approving request for domain {api.node_name}") + print("".join(msg)) res = api.services.request.apply(self.id, **kwargs) - # if isinstance(res, SyftSuccess): - return res - def deny(self, reason: str) -> SyftSuccess | SyftError: + def deny(self, reason: str) -> SyftSuccess: """Denies the particular request. Args: reason (str): Reason for which the request has been denied. """ - api = APIRegistry.api_for( - self.node_uid, - self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"api is None. You must login to {self.node_uid}") + api = self._get_api() + + if self.is_l0_deployment: + if self.status == RequestStatus.APPROVED: + prompt_warning_message( + "This request already has results published to the data scientist. " + "They will still be able to access those results." + ) + api.code_status.update( + id=self.code.status_link.object_uid, + decision=ApprovalDecision(status=UserCodeStatus.DENIED, reason=reason), + ) + + return SyftSuccess(message=f"Request denied with reason: {reason}") + return api.services.request.undo(uid=self.id, reason=reason) - def approve_with_client(self, client: SyftClient) -> Result[SyftSuccess, SyftError]: - print(f"Approving request for domain {client.name}") + @property + def is_l0_deployment(self) -> bool: + return bool(self.code) and self.code.is_l0_deployment + + def get_is_l0_deployment(self, context: AuthedServiceContext) -> bool: + code = self.get_user_code(context).unwrap() + if code: + return code.is_l0_deployment + else: + return False + + def approve_with_client(self, client: SyftClient) -> SyftSuccess: + if self.is_l0_deployment: + raise SyftException( + public_message="This request is a low-side request. Please sync your results to approve." + ) + + print(f"Approving request for datasite {client.name}") return client.api.services.request.apply(self.id) - def apply(self, context: AuthedServiceContext) -> Result[SyftSuccess, SyftError]: + @as_result(SyftException) + def apply(self, context: AuthedServiceContext) -> SyftSuccess: change_context: ChangeContext = ChangeContext.from_service(context) change_context.requesting_user_credentials = self.requesting_user_verify_key + for change in self.changes: # by default change status is not applied change_status = ChangeStatus(change_id=change.id, applied=False) - result = change.apply(context=change_context) - if isinstance(result, SyftError): - return result - if result.is_err(): + + # FIX: Change change.apply + try: + change.apply(context=change_context).unwrap() + change_status.applied = True + self.history.append(change_status) + except: # add to history and save history to request self.history.append(change_status) self.save(context=context) - return result - - # If no error, then change successfully applied. - change_status.applied = True - self.history.append(change_status) + raise self.updated_at = DateTime.now() self.save(context=context) - return Ok(SyftSuccess(message=f"Request {self.id} changes applied")) + return SyftSuccess(message=f"Request {self.id} changes applied") - def undo(self, context: AuthedServiceContext) -> Result[SyftSuccess, SyftError]: + def undo(self, context: AuthedServiceContext) -> SyftSuccess: change_context: ChangeContext = ChangeContext.from_service(context) change_context.requesting_user_credentials = self.requesting_user_verify_key @@ -613,6 +727,7 @@ def undo(self, context: AuthedServiceContext) -> Result[SyftSuccess, SyftError]: change_id=change.id, applied=is_change_applied, ) + # undo here may be deny for certain Changes (UserCodeChange) result = change.undo(context=change_context) if result.is_err(): @@ -627,25 +742,158 @@ def undo(self, context: AuthedServiceContext) -> Result[SyftSuccess, SyftError]: self.updated_at = DateTime.now() result = self.save(context=context) - if isinstance(result, SyftError): - return Err(result) + # override object with latest changes. self = result - return Ok(SyftSuccess(message=f"Request {self.id} changes undone.")) + return SyftSuccess(message=f"Request {self.id} changes undone.", value=self.id) - def save(self, context: AuthedServiceContext) -> Result[SyftSuccess, SyftError]: + def save(self, context: AuthedServiceContext) -> SyftSuccess: # relative - from .request_service import RequestService - context.node = cast(AbstractNode, context.node) - save_method = context.node.get_service_method(RequestService.save) - return save_method(context=context, request=self) + return context.server.services.request.save(context=context, request=self) + + def _create_action_object_for_deposited_result( + self, + result: Any, + ) -> ActionObject: + api = self._get_api() + + # Ensure result is an ActionObject + if isinstance(result, ActionObject): + try: + existing_job = api.services.job.get_by_result_id(result.id.id) + except SyftException: + existing_job = None + if existing_job is not None: + raise SyftException( + public_message=f"This ActionObject is already the result of Job {existing_job.id}" + ) + action_object = result + else: + action_object = ActionObject.from_obj( + result, + syft_client_verify_key=self.syft_client_verify_key, + syft_server_location=self.syft_server_location, + ) + + # Ensure ActionObject exists on this server + action_object_is_from_this_server = isinstance( + api.services.action.exists(action_object.id.id), SyftSuccess + ) + if ( + action_object.syft_blob_storage_entry_id is None + or not action_object_is_from_this_server + ): + action_object.reload_cache() + action_object._send(self.server_uid, self.syft_client_verify_key) + return action_object + + def _create_output_history_for_deposited_result( + self, job: Job, result: Any + ) -> SyftSuccess: + code = self.code + api = self._get_api() + input_ids = {} + input_policy = code.input_policy + if input_policy is not None: + for input_ in input_policy.inputs.values(): + input_ids.update(input_) + + input_ids = {k: v for k, v in input_ids.items() if isinstance(v, UID)} + + return api.services.code.store_execution_output( + user_code_id=code.id, + outputs=result, + job_id=job.id, + input_ids=input_ids, + ) + + def deposit_result( + self, + result: Any, + log_stdout: str = "", + log_stderr: str = "", + approve: bool | None = None, + **kwargs: dict[str, Any], + ) -> Job: + """ + Adds a result to this Request: + - Create an ActionObject from the result (if not already an ActionObject) + - Ensure ActionObject exists on this server + - Create Job with new result and logs + - Update the output history + + If this is a L2 request, the old accept_by_deposit_result will be used. + + Args: + result (Any): ActionObject or any object to be saved as an ActionObject. + log_stdout (str): stdout logs. + log_stderr (str): stderr logs. + approve (bool, optional): Only supported for L2 requests. If True, the request will be approved. + Defaults to None. + + + Returns: + Job: Job object if successful, else raise SyftException. + """ + + # L2 request + # TODO specify behavior and rewrite old flow + if not self.is_l0_deployment: + if approve is None: + approve = prompt_warning_message( + "Depositing a result on this request will approve it.", + confirm=True, + ) + if approve is False: + raise SyftException( + public_message="Cannot deposit result without approving the request." + ) + else: + return self._deposit_result_l2(result, **kwargs) + + # L0 request + if approve: + return SyftError( + message="This is a request from the low side, it can only be approved by syncing the results." + ) + + api = self._get_api() + if isinstance(api, SyftError): + return api + code = self.code + if isinstance(code, SyftError): + return code + + # Create ActionObject + action_object = self._create_action_object_for_deposited_result(result) + # Create Job + # NOTE code owner read permissions are added when syncing this Job + job = api.services.job.create_job_for_user_code_id( + code.id, + result=action_object, + log_stdout=log_stdout, + log_stderr=log_stderr, + status=JobStatus.COMPLETED, + add_code_owner_read_permissions=False, + ) + # Add to output history + self._create_output_history_for_deposited_result(job, action_object) + return job + + def _get_job_from_action_object(self, action_object: ActionObject) -> Job | None: + api = self._get_api() + if isinstance(api, SyftError): + return None + + job = api.services.job.get_by_result_id(action_object.id.id) + return job def _get_latest_or_create_job(self) -> Job | SyftError: """Get the latest job for this requests user_code, or creates one if no jobs exist""" - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) - if api is None: - return SyftError(message=f"api is None. You must login to {self.node_uid}") + api = self._get_api() + if isinstance(api, SyftError): + return api job_service = api.services.job existing_jobs = job_service.get_by_user_code_id(self.code.id) @@ -653,14 +901,13 @@ def _get_latest_or_create_job(self) -> Job | SyftError: return existing_jobs if len(existing_jobs) == 0: - print("Creating job for existing user code") - job = job_service.create_job_for_user_code_id(self.code.id) + job = job_service.create_job_for_user_code_id( + user_code_id=self.code.id, + add_code_owner_read_permissions=True, + ) else: - print("returning existing job") - print("setting permission") job = existing_jobs[-1] res = job_service.add_read_permission_job_for_code_owner(job, self.code) - print(res) res = job_service.add_read_permission_log_for_code_owner( job.log_id, self.code ) @@ -668,19 +915,11 @@ def _get_latest_or_create_job(self) -> Job | SyftError: return job - def _is_action_object_from_job(self, action_object: ActionObject) -> Job | None: # type: ignore - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) - if api is None: - raise ValueError(f"Can't access the api. You must login to {self.node_uid}") - job_service = api.services.job - existing_jobs = job_service.get_by_user_code_id(self.code.id) - for job in existing_jobs: - if job.result and job.result.id == action_object.id: - return job - - def accept_by_depositing_result( - self, result: Any, force: bool = False - ) -> SyftError | SyftSuccess: + def _deposit_result_l2( + self, + result: Any, + force: bool = False, + ) -> Job | SyftError: # this code is extremely brittle because its a work around that relies on # the type of request being very specifically tied to code which needs approving @@ -695,13 +934,19 @@ def accept_by_depositing_result( elif isinstance(result, ActionObject): # Do not allow accepting a result produced by a Job, # This can cause an inconsistent Job state - if self._is_action_object_from_job(result): - action_object_job = self._is_action_object_from_job(result) - if action_object_job is not None: - return SyftError( - message=f"This ActionObject is the result of Job {action_object_job.id}, " - f"please use the `Job.info` instead." - ) + action_object_job = self._get_job_from_action_object(result) + if action_object_job is not None: + return SyftError( + message=f"This ActionObject is the result of Job {action_object_job.id}, " + f"please use the `Job.info` instead." + ) + else: + job_info = JobInfo( + includes_metadata=True, + includes_result=True, + status=JobStatus.COMPLETED, + resolved=True, + ) else: # NOTE result is added at the end of function (once ActionObject is created) job_info = JobInfo( @@ -712,6 +957,18 @@ def accept_by_depositing_result( ) user_code_status_change: UserCodeStatusChange = self.changes[0] + code = user_code_status_change.code + output_history = code.output_history + if isinstance(output_history, SyftError): + return output_history + output_policy = code.output_policy + if isinstance(output_policy, SyftError): + return output_policy + if isinstance(user_code_status_change.code.output_policy_type, UserPolicy): + return SyftError( + message="UserCode uses an user-submitted custom policy. Please use .approve()" + ) + if not user_code_status_change.change_object_is_type(UserCodeStatusCollection): raise TypeError( f"accept_by_depositing_result can only be run on {UserCodeStatusCollection} not " @@ -723,10 +980,10 @@ def accept_by_depositing_result( f"{type(user_code_status_change)}" ) - api = APIRegistry.api_for(self.node_uid, self.syft_client_verify_key) + api = APIRegistry.api_for(self.server_uid, self.syft_client_verify_key).unwrap() if not api: raise Exception( - f"No access to Syft API. Please login to {self.node_uid} first." + f"No access to Syft API. Please login to {self.server_uid} first." ) if api.signing_key is None: raise ValueError(f"{api}'s signing key is None") @@ -736,13 +993,6 @@ def accept_by_depositing_result( if isinstance(permission_request, SyftError): return permission_request - code = user_code_status_change.code - output_history = code.output_history - if isinstance(output_history, SyftError): - return output_history - output_policy = code.output_policy - if isinstance(output_policy, SyftError): - return output_policy job = self._get_latest_or_create_job() if isinstance(job, SyftError): return job @@ -762,19 +1012,19 @@ def accept_by_depositing_result( result, id=action_obj_id, syft_client_verify_key=api.signing_key.verify_key, - syft_node_location=api.node_uid, + syft_server_location=api.server_uid, ) else: action_object = result action_object_is_from_this_node = ( - self.syft_node_location == action_object.syft_node_location + self.syft_server_location == action_object.syft_server_location ) if ( action_object.syft_blob_storage_entry_id is None or not action_object_is_from_this_node ): action_object.reload_cache() - action_object.syft_node_location = self.syft_node_location + action_object.syft_server_location = self.syft_server_location action_object.syft_client_verify_key = self.syft_client_verify_key blob_store_result = action_object._save_to_blob_storage() if isinstance(blob_store_result, SyftError): @@ -787,7 +1037,7 @@ def accept_by_depositing_result( action_object = ActionObject.from_obj( result, syft_client_verify_key=api.signing_key.verify_key, - syft_node_location=api.node_uid, + syft_server_location=api.server_uid, ) else: action_object = result @@ -795,14 +1045,14 @@ def accept_by_depositing_result( # TODO: proper check for if actionobject is already uploaded # we also need this for manualy syncing action_object_is_from_this_node = ( - self.syft_node_location == action_object.syft_node_location + self.syft_server_location == action_object.syft_server_location ) if ( action_object.syft_blob_storage_entry_id is None or not action_object_is_from_this_node ): action_object.reload_cache() - action_object.syft_node_location = self.syft_node_location + action_object.syft_server_location = self.syft_server_location action_object.syft_client_verify_key = self.syft_client_verify_key blob_store_result = action_object._save_to_blob_storage() if isinstance(blob_store_result, SyftError): @@ -811,15 +1061,9 @@ def accept_by_depositing_result( if isinstance(result, SyftError): return result - # Do we still need this? - # policy_state_mutation = ObjectMutation( - # linked_obj=user_code_status_change.linked_obj, - # attr_name="output_policy", - # match_type=True, - # value=output_policy, - # ) - - action_object_link = LinkedObject.from_obj(result, node_uid=self.node_uid) + action_object_link = LinkedObject.from_obj( + result, server_uid=self.server_uid + ) permission_change = ActionStoreChange( linked_obj=action_object_link, apply_permission_type=ActionPermission.READ, @@ -842,7 +1086,8 @@ def accept_by_depositing_result( for inps in code.input_policy.inputs.values(): input_ids.update(inps) - res = api.services.code.apply_output( + input_ids = {k: v for k, v in input_ids.items() if isinstance(v, UID)} + res = api.services.code.store_execution_output( user_code_id=code.id, outputs=result, job_id=job.id, @@ -852,8 +1097,19 @@ def accept_by_depositing_result( return res job_info.result = action_object + job_info.status = ( + JobStatus.ERRORED + if isinstance(action_object.syft_action_data, Err) + else JobStatus.COMPLETED + ) - existing_result = job.result.id if job.result is not None else None + existing_result = None + if isinstance(job.result, ActionObject): + existing_result = job.result.id + elif isinstance(job.result, Err): + existing_result = job.result # type: ignore [assignment] + else: + existing_result = job.result print( f"Job({job.id}) Setting new result {existing_result} -> {job_info.result.id}" ) @@ -864,44 +1120,25 @@ def accept_by_depositing_result( if isinstance(res, SyftError): return res - return SyftSuccess(message="Request submitted for updating result.") - - def sync_job( - self, job_info: JobInfo, **kwargs: Any - ) -> Result[SyftSuccess, SyftError]: - if job_info.includes_result: - return SyftError( - message="This JobInfo includes a Result. Please use Request.accept_by_depositing_result instead." - ) - - api = APIRegistry.api_for( - node_uid=self.node_uid, user_verify_key=self.syft_client_verify_key - ) - if api is None: - return SyftError(message=f"api is None. You must login to {self.node_uid}") - job_service = api.services.job - - job = self._get_latest_or_create_job() - job.apply_info(job_info) - return job_service.update(job) - - def get_sync_dependencies(self, api: Any = None) -> list[UID] | SyftError: - dependencies = [] - - code_id = self.code_id - if isinstance(code_id, SyftError): - return code_id + return job - dependencies.append(code_id) + @deprecated( + return_syfterror=True, + reason="accept_by_depositing_result has been removed. Use approve instead to " + "approve this request, or deposit_result to deposit a new result.", + ) + def accept_by_depositing_result(self, result: Any, force: bool = False) -> Any: + pass - return dependencies + def get_sync_dependencies(self, context: AuthedServiceContext) -> list[UID]: + return [self.code_id, self.status_id] @serializable() class RequestInfo(SyftObject): # version __canonical_name__ = "RequestInfo" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 user: UserView request: Request @@ -912,7 +1149,7 @@ class RequestInfo(SyftObject): class RequestInfoFilter(SyftObject): # version __canonical_name__ = "RequestInfoFilter" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 name: str | None = None @@ -920,7 +1157,7 @@ class RequestInfoFilter(SyftObject): @serializable() class SubmitRequest(SyftObject): __canonical_name__ = "SubmitRequest" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 changes: list[Change] requesting_user_verify_key: SyftVerifyKey | None = None @@ -953,8 +1190,8 @@ def add_request_time(context: TransformContext) -> TransformContext: def check_requesting_user_verify_key(context: TransformContext) -> TransformContext: - if context.output and context.node and context.obj: - if context.obj.requesting_user_verify_key and context.node.is_root( + if context.output and context.server and context.obj: + if context.obj.requesting_user_verify_key and context.server.is_root( context.credentials ): context.output["requesting_user_verify_key"] = ( @@ -967,11 +1204,10 @@ def check_requesting_user_verify_key(context: TransformContext) -> TransformCont def add_requesting_user_info(context: TransformContext) -> TransformContext: - if context.output is not None and context.node is not None: + if context.output is not None and context.server is not None: try: user_key = context.output["requesting_user_verify_key"] - user_service = context.node.get_service("UserService") - user = user_service.get_by_verify_key(user_key) + user = context.server.services.user.get_by_verify_key(user_key).unwrap() context.output["requesting_user_name"] = user.name context.output["requesting_user_email"] = user.email context.output["requesting_user_institution"] = ( @@ -987,7 +1223,7 @@ def add_requesting_user_info(context: TransformContext) -> TransformContext: def submit_request_to_request() -> list[Callable]: return [ generate_id, - add_node_uid_for_key("node_uid"), + add_server_uid_for_key("server_uid"), add_request_time, check_requesting_user_verify_key, add_requesting_user_info, @@ -998,7 +1234,7 @@ def submit_request_to_request() -> list[Callable]: @serializable() class ObjectMutation(Change): __canonical_name__ = "ObjectMutation" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 linked_obj: LinkedObject | None = None attr_name: str @@ -1024,34 +1260,30 @@ def mutate(self, obj: Any, value: Any | None = None) -> Any: def __repr_syft_nested__(self) -> str: return f"Mutate {self.attr_name} to {self.value}" - def _run( - self, context: ChangeContext, apply: bool - ) -> Result[SyftSuccess, SyftError]: + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: if self.linked_obj is None: - return Err(SyftError(message=f"{self}'s linked object is None")) - try: - obj = self.linked_obj.resolve_with_context(context) - if obj.is_err(): - return Err(SyftError(message=obj.err())) - obj = obj.ok() - if apply: - obj = self.mutate(obj, value=self.value) - self.linked_obj.update_with_context(context, obj) - else: - # unset the set value - obj = self.mutate(obj, value=self.previous_value) - self.linked_obj.update_with_context(context, obj) + raise SyftException(public_message=f"{self}'s linked object is None") + + obj = self.linked_obj.resolve_with_context(context).unwrap() + + if apply: + obj = self.mutate(obj, value=self.value) + self.linked_obj.update_with_context(context, obj) + else: + # unset the set value + obj = self.mutate(obj, value=self.previous_value) + self.linked_obj.update_with_context(context, obj) - return Ok(SyftSuccess(message=f"{type(self)} Success")) - except Exception as e: - print(f"failed to apply {type(self)}. {e}") - return Err(SyftError(message=str(e))) + return SyftSuccess(message=f"{type(self)} Success") - def apply(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=True) + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=True).unwrap() - def undo(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=False) + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=False).unwrap() def type_for_field(object_type: type, attr_name: str) -> type | None: @@ -1069,7 +1301,7 @@ def type_for_field(object_type: type, attr_name: str) -> type | None: @serializable() class EnumMutation(ObjectMutation): __canonical_name__ = "EnumMutation" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 enum_type: type[Enum] value: Enum | None = None @@ -1078,10 +1310,10 @@ class EnumMutation(ObjectMutation): __repr_attrs__ = ["linked_obj", "attr_name", "value"] @property - def valid(self) -> SyftSuccess | SyftError: + def valid(self) -> SyftSuccess: if self.match_type and not isinstance(self.value, self.enum_type): - return SyftError( - message=f"{type(self.value)} must be of type: {self.enum_type}" + raise SyftException( + public_message=f"{type(self.value)} must be of type: {self.enum_type}" ) return SyftSuccess(message=f"{type(self)} valid") @@ -1098,35 +1330,30 @@ def from_obj( match_type=True, ) - def _run( - self, context: ChangeContext, apply: bool - ) -> Result[SyftSuccess, SyftError]: - try: - valid = self.valid - if not valid: - return Err(valid) - if self.linked_obj is None: - return Err(SyftError(message=f"{self}'s linked object is None")) - obj = self.linked_obj.resolve_with_context(context) - if obj.is_err(): - return Err(SyftError(message=obj.err())) - obj = obj.ok() - if apply: - obj = self.mutate(obj=obj) + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: + valid = self.valid + if not valid: + raise SyftException(public_message=valid.message) + if self.linked_obj is None: + raise SyftException(public_message=f"{self}'s linked object is None") + obj = self.linked_obj.resolve_with_context(context).unwrap() - self.linked_obj.update_with_context(context, obj) - else: - raise NotImplementedError - return Ok(SyftSuccess(message=f"{type(self)} Success")) - except Exception as e: - print(f"failed to apply {type(self)}. {e}") - return Err(SyftError(message=e)) + if apply: + obj = self.mutate(obj=obj) + self.linked_obj.update_with_context(context, obj) + else: + raise SyftException(public_message="undo not implemented") + + return SyftSuccess(message=f"{type(self)} Success") - def apply(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=True) + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=True).unwrap() - def undo(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=False) + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=False).unwrap() def __repr_syft_nested__(self) -> str: return f"Mutate {self.enum_type} to {self.value}" @@ -1141,7 +1368,7 @@ def link(self) -> SyftObject | None: @serializable() class UserCodeStatusChange(Change): __canonical_name__ = "UserCodeStatusChange" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 value: UserCodeStatus linked_obj: LinkedObject @@ -1158,45 +1385,55 @@ class UserCodeStatusChange(Change): @property def code(self) -> UserCode: + if self.linked_user_code._resolve_cache: + return self.linked_user_code._resolve_cache return self.linked_user_code.resolve + @as_result(SyftException) + def get_user_code(self, context: AuthedServiceContext) -> UserCode: + return self.linked_user_code.resolve_with_context(context).unwrap() + @property def codes(self) -> list[UserCode]: - def recursive_code(node: Any) -> list: + def recursive_code(server: Any) -> list: codes = [] - for _, (obj, new_node) in node.items(): + for obj, new_server in server.values(): + # TODO: this fixes problems with getting the api for object + # we should fix this more properly though + obj.syft_server_location = obj.server_uid codes.append(obj.resolve) - codes.extend(recursive_code(new_node)) + codes.extend(recursive_code(new_server)) return codes codes = [self.code] codes.extend(recursive_code(self.code.nested_codes)) return codes - def nested_repr(self, node: Any | None = None, level: int = 0) -> str: + def nested_repr(self, server: Any | None = None, level: int = 0) -> str: msg = "" - if node is None: - node = self.code.nested_codes + if server is None: + server = self.code.nested_codes - for service_func_name, (_, new_node) in node.items(): # type: ignore + for service_func_name, (_, new_server) in server.items(): # type: ignore msg = "├──" + "──" * level + f"{service_func_name}
    " - msg += self.nested_repr(node=new_node, level=level + 1) + msg += self.nested_repr(server=new_server, level=level + 1) return msg def __repr_syft_nested__(self) -> str: msg = ( - f"Request to change {self.code.service_func_name} " - f"(Pool Id: {self.code.worker_pool_name}) " + f"Request to change {self.code.service_func_name} " + f"(Pool Id: {self.code.worker_pool_name}) " ) - msg += "to permission RequestStatus.APPROVED" - if self.nested_solved: - if self.link.nested_codes == {}: # type: ignore - msg += ". No nested requests" - else: - msg += ".

    This change requests the following nested functions calls:
    " - msg += self.nested_repr() + msg += "to permission RequestStatus.APPROVED." + if self.code.nested_codes is None or self.code.nested_codes == {}: # type: ignore + msg += " No nested requests" else: - msg += ". Nested Requests not resolved" + if self.nested_solved: + # else: + msg += "

    This change requests the following nested functions calls:
    " + msg += self.nested_repr() + else: + msg += " Nested Requests not resolved" return msg def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: @@ -1224,115 +1461,90 @@ def approved(self) -> bool: return self.linked_obj.resolve.approved @property - def valid(self) -> SyftSuccess | SyftError: + def valid(self) -> SyftSuccess: if self.match_type and not isinstance(self.value, UserCodeStatus): # TODO: fix the mypy issue - return SyftError( # type: ignore[unreachable] - message=f"{type(self.value)} must be of type: {UserCodeStatus}" + raise SyftException( # type: ignore[unreachable] + public_message=f"{type(self.value)} must be of type: {UserCodeStatus}" ) return SyftSuccess(message=f"{type(self)} valid") - # def get_nested_requests(self, context, code_tree: Dict[str: Tuple[LinkedObject, Dict]]): - # approved_nested_codes = {} - # for key, (linked_obj, new_code_tree) in code_tree.items(): - # code_obj = linked_obj.resolve_with_context(context).ok() - # approved_nested_codes[key] = code_obj.id - - # res = self.get_nested_requests(context, new_code_tree) - # if isinstance(res, SyftError): - # return res - # code_obj.nested_codes = res - # linked_obj.update_with_context(context, code_obj) - - # return approved_nested_codes - + @as_result(SyftException) def mutate( self, status: UserCodeStatusCollection, context: ChangeContext, undo: bool, - ) -> UserCodeStatusCollection | SyftError: - if context.node is None: - return SyftError(message=f"context {context}'s node is None") + ) -> UserCodeStatusCollection: reason: str = context.extra_kwargs.get("reason", "") + return status.mutate( + value=ApprovalDecision( + status=UserCodeStatus.DENIED if undo else self.value, reason=reason + ), + server_name=context.server.name, + server_id=context.server.id, + verify_key=context.server.signing_key.verify_key, + ).unwrap() - if not undo: - res = status.mutate( - value=(self.value, reason), - node_name=context.node.name, - node_id=context.node.id, - verify_key=context.node.signing_key.verify_key, - ) - if isinstance(res, SyftError): - return res - else: - res = status.mutate( - value=(UserCodeStatus.DENIED, reason), - node_name=context.node.name, - node_id=context.node.id, - verify_key=context.node.signing_key.verify_key, - ) - return res - - def is_enclave_request(self, user_code: UserCode) -> bool: - return ( - user_code.is_enclave_code is not None - and self.value == UserCodeStatus.APPROVED - ) - - def _run( - self, context: ChangeContext, apply: bool - ) -> Result[SyftSuccess, SyftError]: - try: - valid = self.valid - if not valid: - return Err(valid) - user_code = self.linked_user_code.resolve_with_context(context) - if user_code.is_err(): - return Err(SyftError(message=user_code.err())) - user_code = user_code.ok() - user_code_status = self.linked_obj.resolve_with_context(context) - if user_code_status.is_err(): - return Err(SyftError(message=user_code_status.err())) - user_code_status = user_code_status.ok() - - if apply: - # Only mutate, does not write to stash - updated_status = self.mutate(user_code_status, context, undo=False) - - if isinstance(updated_status, SyftError): - return Err(updated_status.message) + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: + valid = self.valid - # relative - from ..enclave.enclave_service import propagate_inputs_to_enclave + if not valid: + raise SyftException(public_message=valid.message) - self.linked_obj.update_with_context(context, updated_status) - if self.is_enclave_request(user_code): - enclave_res = propagate_inputs_to_enclave( - user_code=user_code, context=context - ) - if isinstance(enclave_res, SyftError): - return enclave_res - else: - updated_status = self.mutate(user_code_status, context, undo=True) - if isinstance(updated_status, SyftError): - return Err(updated_status.message) + self.linked_user_code.resolve_with_context(context).unwrap() + user_code_status = self.linked_obj.resolve_with_context(context).unwrap() - # TODO: Handle Enclave approval. - self.linked_obj.update_with_context(context, updated_status) - return Ok(SyftSuccess(message=f"{type(self)} Success")) - except Exception as e: - print(f"failed to apply {type(self)}. {e}") - return Err(SyftError(message=str(e))) + if apply: + # Only mutate, does not write to stash + updated_status = self.mutate(user_code_status, context, undo=False).unwrap() + self.linked_obj.update_with_context(context, updated_status) + else: + updated_status = self.mutate(user_code_status, context, undo=True).unwrap() + self.linked_obj.update_with_context(context, updated_status) + return SyftSuccess(message=f"{type(self)} Success") - def apply(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=True) + @as_result(SyftException) + def apply(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=True).unwrap() - def undo(self, context: ChangeContext) -> Result[SyftSuccess, SyftError]: - return self._run(context=context, apply=False) + @as_result(SyftException) + def undo(self, context: ChangeContext) -> SyftSuccess: + return self._run(context=context, apply=False).unwrap() @property def link(self) -> SyftObject | None: if self.linked_obj: return self.linked_obj.resolve return None + + +@serializable() +class SyncedUserCodeStatusChange(UserCodeStatusChange): + __canonical_name__ = "SyncedUserCodeStatusChange" + __version__ = SYFT_OBJECT_VERSION_1 + linked_obj: LinkedObject | None = None # type: ignore + + @property + def approved(self) -> bool: + return self.code.status.approved + + def mutate( + self, + status: UserCodeStatusCollection, + context: ChangeContext, + undo: bool, + ) -> UserCodeStatusCollection: + raise SyftException( + public_message="Synced UserCodes status is computed, and cannot be updated manually." + ) + + @as_result(SyftException) + def _run(self, context: ChangeContext, apply: bool) -> SyftSuccess: + raise SyftException( + public_message="Synced UserCodes status is computed, and cannot be updated manually." + ) + + def link(self) -> Any: # type: ignore + return self.code.status diff --git a/packages/syft/src/syft/service/request/request_service.py b/packages/syft/src/syft/service/request/request_service.py index 3ef97c83b7e..ed94c185689 100644 --- a/packages/syft/src/syft/service/request/request_service.py +++ b/packages/syft/src/syft/service/request/request_service.py @@ -1,36 +1,29 @@ # stdlib -from typing import cast - -# third party -from result import Err -from result import Ok +import logging # relative -from ...abstract_node import AbstractNode from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager from ...store.linked_obj import LinkedObject +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument -from ..action.action_permissions import ActionObjectPermission -from ..action.action_permissions import ActionPermission from ..context import AuthedServiceContext +from ..notification.email_templates import EmailTemplate from ..notification.email_templates import RequestEmailTemplate from ..notification.email_templates import RequestUpdateEmailTemplate from ..notification.notification_service import CreateNotification -from ..notification.notification_service import NotificationService -from ..notification.notifications import Notification from ..notifier.notifier_enums import NOTIFIERS -from ..response import SyftError +from ..notifier.notifier_service import RateLimitException from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES from ..service import TYPE_TO_SERVICE from ..service import service_method -from ..user.user import UserView +from ..user.user_roles import ADMIN_ROLE_LEVEL from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL -from ..user.user_service import UserService from .request import Change from .request import Request from .request import RequestInfo @@ -39,15 +32,14 @@ from .request import SubmitRequest from .request_stash import RequestStash +logger = logging.getLogger(__name__) + -@instrument -@serializable() +@serializable(canonical_name="RequestService", version=1) class RequestService(AbstractService): - store: DocumentStore stash: RequestStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = RequestStash(store=store) @service_method(path="request.submit", name="submit", roles=GUEST_ROLE_LEVEL) @@ -57,64 +49,96 @@ def submit( request: SubmitRequest, send_message: bool = True, reason: str | None = "", - ) -> Request | SyftError: + ) -> Request: """Submit a Request""" - try: - req = request.to(Request, context=context) - result = self.stash.set( - context.credentials, - req, - add_permissions=[ - ActionObjectPermission( - uid=req.id, permission=ActionPermission.ALL_READ - ), - ], + request = request.to(Request, context=context) + request = self.stash.set( + context.credentials, + request, + ).unwrap() + + root_verify_key = context.server.services.user.root_verify_key + + if send_message: + message_subject = f"Result to request {str(request.id)[:4]}...{str(request.id)[-3:]}\ + has been successfully deposited." + self._send_email_notification( + context=context, + message_subject=message_subject if not reason else reason, + request=request, + to_user_verify_key=root_verify_key, + email_template=RequestEmailTemplate, ) - if result.is_ok(): - request = result.ok() - link = LinkedObject.with_context(request, context=context) - context.node = cast(AbstractNode, context.node) - admin_verify_key = context.node.get_service_method( - UserService.admin_verify_key - ) + return request - root_verify_key = admin_verify_key() - if send_message: - subject_msg = f"Result to request {str(request.id)[:4]}...{str(request.id)[-3:]}\ - has been successfully deposited." - message = CreateNotification( - subject=subject_msg if not reason else reason, - from_user_verify_key=context.credentials, - to_user_verify_key=root_verify_key, - linked_obj=link, - notifier_types=[NOTIFIERS.EMAIL], - email_template=RequestEmailTemplate, - ) - method = context.node.get_service_method(NotificationService.send) - result = method(context=context, notification=message) - if isinstance(result, Notification): - return Ok(request) - else: - return Err(result) - - return Ok(request) - - if result.is_err(): - return SyftError(message=str(result.err())) - return result.ok() - except Exception as e: - print("Failed to submit Request", e) - raise e + @service_method( + path="request.get_by_uid", name="get_by_uid", roles=DATA_SCIENTIST_ROLE_LEVEL + ) + def get_by_uid(self, context: AuthedServiceContext, uid: UID) -> Request: + return self.stash.get_by_uid(context.credentials, uid).unwrap() @service_method( path="request.get_all", name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def get_all(self, context: AuthedServiceContext) -> list[Request] | SyftError: - result = self.stash.get_all(context.credentials) - if result.is_err(): - return SyftError(message=str(result.err())) - requests = result.ok() - # return [self.resolve_nested_requests(context, request) for request in requests] + def get_all(self, context: AuthedServiceContext) -> list[Request]: + requests = self.stash.get_all(context.credentials).unwrap() + # TODO remove once sorting is handled by the stash + requests.sort(key=lambda x: (x.request_time, x.id), reverse=True) + + return requests + + # DIRTY METHOD: DELETE AFTER DATABASE UPGRADE + @service_method( + path="request.get_all_approved", + name="get_all_approved", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def get_all_approved(self, context: AuthedServiceContext) -> list[Request]: + requests = self.stash.get_all(context.credentials).unwrap() + # TODO remove once sorting is handled by the stash + requests = [ + request + for request in requests + if request.get_status(context) == RequestStatus.APPROVED + ] + requests.sort(key=lambda x: (x.request_time, x.id), reverse=True) + + return requests + + # DIRTY METHOD: DELETE AFTER DATABASE UPGRADE + @service_method( + path="request.get_all_rejected", + name="get_all_rejected", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def get_all_rejected(self, context: AuthedServiceContext) -> list[Request]: + requests = self.stash.get_all(context.credentials).unwrap() + # TODO remove once sorting is handled by the stash + requests = [ + request + for request in requests + if request.get_status(context) == RequestStatus.REJECTED + ] + requests.sort(key=lambda x: (x.request_time, x.id), reverse=True) + + return requests + + # DIRTY METHOD: DELETE AFTER DATABASE UPGRADE + @service_method( + path="request.get_all_pending", + name="get_all_pending", + roles=DATA_SCIENTIST_ROLE_LEVEL, + ) + def get_all_pending(self, context: AuthedServiceContext) -> list[Request]: + requests = self.stash.get_all(context.credentials).unwrap() + # TODO remove once sorting is handled by the stash + requests = [ + request + for request in requests + if request.get_status(context) == RequestStatus.PENDING + ] + requests.sort(key=lambda x: (x.request_time, x.id), reverse=True) + return requests @service_method(path="request.get_all_info", name="get_all_info") @@ -123,20 +147,17 @@ def get_all_info( context: AuthedServiceContext, page_index: int | None = 0, page_size: int | None = 0, - ) -> list[list[RequestInfo]] | list[RequestInfo] | SyftError: + ) -> list[list[RequestInfo]] | list[RequestInfo]: """Get the information of all requests""" - context.node = cast(AbstractNode, context.node) - result = self.stash.get_all(context.credentials) - if result.is_err(): - return SyftError(message=result.err()) - - method = context.node.get_service_method(UserService.get_by_verify_key) - get_message = context.node.get_service_method(NotificationService.filter_by_obj) - + result = self.stash.get_all(context.credentials).unwrap() requests: list[RequestInfo] = [] - for req in result.ok(): - user = method(req.requesting_user_verify_key).to(UserView) - message = get_message(context=context, obj_uid=req.id) + for req in result: + user = context.server.services.user.get_by_verify_key( + req.requesting_user_verify_key + ).unwrap() + message = context.server.services.notification.filter_by_obj( + context=context, obj_uid=req.id + ).unwrap() requests.append(RequestInfo(user=user, request=req, notification=message)) if not page_size: return requests @@ -153,15 +174,10 @@ def get_all_info( @service_method(path="request.add_changes", name="add_changes") def add_changes( self, context: AuthedServiceContext, uid: UID, changes: list[Change] - ) -> Request | SyftError: - result = self.stash.get_by_uid(credentials=context.credentials, uid=uid) - - if result.is_err(): - return SyftError( - message=f"Failed to retrieve request with uid: {uid}. Error: {result.err()}" - ) - - request = result.ok() + ) -> Request: + request = self.stash.get_by_uid( + credentials=context.credentials, uid=uid + ).unwrap() request.changes.extend(changes) return self.save(context=context, request=request) @@ -172,13 +188,15 @@ def filter_all_info( request_filter: RequestInfoFilter, page_index: int | None = 0, page_size: int | None = 0, - ) -> list[RequestInfo] | SyftError: - """Get a Dataset""" + ) -> list[RequestInfo]: + """Filter Request""" result = self.get_all_info(context) + requests = list( filter(lambda res: (request_filter.name in res.user.name), result) ) + # TODO: Move chunking to a function? # If chunk size is defined, then split list into evenly sized chunks if page_size: requests = [ @@ -190,106 +208,109 @@ def filter_all_info( return requests - @service_method( - path="request.apply", - name="apply", - ) + @service_method(path="request.apply", name="apply", unwrap_on_success=False) def apply( self, context: AuthedServiceContext, uid: UID, **kwargs: dict, - ) -> SyftSuccess | SyftError: - context.node = cast(AbstractNode, context.node) - request = self.stash.get_by_uid(context.credentials, uid) - if request.is_ok(): - request = request.ok() + ) -> SyftSuccess: + request: Request = self.stash.get_by_uid(context.credentials, uid).unwrap() + + context.extra_kwargs = kwargs + result = request.apply(context=context).unwrap() + request_notification = context.server.services.notification.filter_by_obj( + context=context, obj_uid=uid + ).unwrap() + + if not request.get_status(context) == RequestStatus.PENDING: + if request_notification is not None: + context.server.services.notification.mark_as_read( + context=context, uid=request_notification.id + ) - context.extra_kwargs = kwargs - result = request.apply(context=context) + self._send_email_notification( + context=context, + message_subject=f"Your request ({str(uid)[:4]}) has been approved. ", + request=request, + to_user_verify_key=request.requesting_user_verify_key, + email_template=RequestUpdateEmailTemplate, + ) + return result - filter_by_obj = context.node.get_service_method( - NotificationService.filter_by_obj - ) - request_notification = filter_by_obj(context=context, obj_uid=uid) - - link = LinkedObject.with_context(request, context=context) - if not request.status == RequestStatus.PENDING: - if request_notification is not None and not isinstance( - request_notification, SyftError - ): - mark_as_read = context.node.get_service_method( - NotificationService.mark_as_read - ) - mark_as_read(context=context, uid=request_notification.id) - - notification = CreateNotification( - subject=f"Your request ({str(uid)[:4]}) has been approved!", - from_user_verify_key=context.credentials, - to_user_verify_key=request.requesting_user_verify_key, - linked_obj=link, - notifier_types=[NOTIFIERS.EMAIL], - email_template=RequestUpdateEmailTemplate, - ) - send_notification = context.node.get_service_method( - NotificationService.send - ) - send_notification(context=context, notification=notification) - - # TODO: check whereever we're return SyftError encapsulate it in Result. - if hasattr(result, "value"): - return result.value - return result - return request.value - - @service_method(path="request.undo", name="undo") - def undo( - self, context: AuthedServiceContext, uid: UID, reason: str - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(credentials=context.credentials, uid=uid) - if result.is_err(): - return SyftError( - message=f"Failed to update request: {uid} with error: {result.err()}" - ) + @as_result(SyftException, RateLimitException) + def _send_email_notification( + self, + *, + context: AuthedServiceContext, + request: Request, + message_subject: str, + to_user_verify_key: SyftVerifyKey, + email_template: type[EmailTemplate], + ) -> None: + linked_obj = LinkedObject.with_context(request, context=context) + notification = CreateNotification( + subject=message_subject, + from_user_verify_key=context.credentials, + to_user_verify_key=to_user_verify_key, + linked_obj=linked_obj, + notifier_types=[NOTIFIERS.EMAIL], + email_template=email_template, + ) + context.server.services.notification.send( + context=context, notification=notification + ) - request = result.ok() - if request is None: - return SyftError(message=f"Request with uid: {uid} does not exists.") + @service_method(path="request.undo", name="undo", unwrap_on_success=False) + def undo(self, context: AuthedServiceContext, uid: UID, reason: str) -> SyftSuccess: + request: Request = self.stash.get_by_uid( + credentials=context.credentials, uid=uid + ).unwrap() context.extra_kwargs["reason"] = reason - result = request.undo(context=context) - - if result.is_err(): - return SyftError( - message=f"Failed to undo Request: <{uid}> with error: {result.err()}" - ) - - link = LinkedObject.with_context(request, context=context) - message_subject = f"Your request ({str(uid)[:4]}) has been denied. " + request.undo(context=context) - notification = CreateNotification( - subject=message_subject, - from_user_verify_key=context.credentials, + self._send_email_notification( + context=context, + message_subject=f"Your request ({str(uid)[:4]}) has been denied. ", + request=request, to_user_verify_key=request.requesting_user_verify_key, - linked_obj=link, - notifier_types=[NOTIFIERS.EMAIL], email_template=RequestUpdateEmailTemplate, ) - context.node = cast(AbstractNode, context.node) - send_notification = context.node.get_service_method(NotificationService.send) - send_notification(context=context, notification=notification) - - return SyftSuccess(message=f"Request {uid} successfully denied !") - - def save( - self, context: AuthedServiceContext, request: Request - ) -> Request | SyftError: - result = self.stash.update(context.credentials, request) - if result.is_ok(): - return result.ok() - return SyftError( - message=f"Failed to update Request: <{request.id}>. Error: {result.err()}" - ) + + return SyftSuccess(message=f"Request {uid} successfully denied!") + + def save(self, context: AuthedServiceContext, request: Request) -> Request: + return self.stash.update(context.credentials, request).unwrap() + + @service_method( + path="request.delete_by_uid", name="delete_by_uid", unwrap_on_success=False + ) + def delete_by_uid(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: + """Delete the request with the given uid.""" + self.stash.delete_by_uid(context.credentials, uid).unwrap() + return SyftSuccess(message=f"Request with id {uid} deleted.", value=uid) + + @service_method( + path="request.set_tags", + name="set_tags", + roles=ADMIN_ROLE_LEVEL, + ) + def set_tags( + self, + context: AuthedServiceContext, + request: Request, + tags: list[str], + ) -> Request: + request = self.stash.get_by_uid(context.credentials, request.id).unwrap() + request.tags = tags + return self.save(context, request) + + @service_method(path="request.get_by_usercode_id", name="get_by_usercode_id") + def get_by_usercode_id( + self, context: AuthedServiceContext, usercode_id: UID + ) -> list[Request]: + return self.stash.get_by_usercode_id(context.credentials, usercode_id).unwrap() TYPE_TO_SERVICE[Request] = RequestService diff --git a/packages/syft/src/syft/service/request/request_stash.py b/packages/syft/src/syft/service/request/request_stash.py index 5b8fe3e08c5..a28fd5842e1 100644 --- a/packages/syft/src/syft/service/request/request_stash.py +++ b/packages/syft/src/syft/service/request/request_stash.py @@ -1,44 +1,31 @@ -# stdlib - -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...types.datetime import DateTime -from ...util.telemetry import instrument +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.uid import UID from .request import Request -RequestingUserVerifyKeyPartitionKey = PartitionKey( - key="requesting_user_verify_key", type_=SyftVerifyKey -) - -OrderByRequestTimeStampPartitionKey = PartitionKey(key="request_time", type_=DateTime) - - -@instrument -@serializable() -class RequestStash(BaseUIDStoreStash): - object_type = Request - settings: PartitionSettings = PartitionSettings( - name=Request.__canonical_name__, object_type=Request - ) +@serializable(canonical_name="RequestStashSQL", version=1) +class RequestStash(ObjectStash[Request]): + @as_result(SyftException) def get_all_for_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey, - ) -> Result[list[Request], str]: - if isinstance(verify_key, str): - verify_key = SyftVerifyKey.from_string(verify_key) - qks = QueryKeys(qks=[RequestingUserVerifyKeyPartitionKey.with_obj(verify_key)]) - return self.query_all( + ) -> list[Request]: + return self.get_all( + credentials=credentials, + filters={"requesting_user_verify_key": verify_key}, + ).unwrap() + + @as_result(SyftException) + def get_by_usercode_id( + self, credentials: SyftVerifyKey, user_code_id: UID + ) -> list[Request]: + return self.get_all( credentials=credentials, - qks=qks, - order_by=OrderByRequestTimeStampPartitionKey, - ) + filters={"code_id": user_code_id}, + ).unwrap() diff --git a/packages/syft/src/syft/service/response.py b/packages/syft/src/syft/service/response.py index d30c1dbac2b..acb6f27fdc9 100644 --- a/packages/syft/src/syft/service/response.py +++ b/packages/syft/src/syft/service/response.py @@ -1,20 +1,58 @@ # stdlib -import sys +from copy import deepcopy import traceback from typing import Any +from typing import TYPE_CHECKING # third party -from result import Err +from typing_extensions import Self # relative from ..serde.serializable import serializable from ..types.base import SyftBaseModel +from ..util.util import sanitize_html + +if TYPE_CHECKING: + # relative + from .context import AuthedServiceContext class SyftResponseMessage(SyftBaseModel): message: str _bool: bool = True require_api_update: bool = False + client_warnings: list[str] = [] + + def add_warnings_from_context(self, context: "AuthedServiceContext") -> None: + self.client_warnings = deepcopy(context.client_warnings) + + def is_err(self) -> bool: + return False + + def is_ok(self) -> bool: + return True + + def __getattr__(self, name: str) -> Any: + if name in [ + "_bool", + # "_repr_html_", + # "message", + # 'require_api_update', + # '__bool__', + # '__eq__', + # '__repr__', + # '__str__', + # '_repr_html_class_', + # '_repr_html_', + "_ipython_canary_method_should_not_exist_", + "_ipython_display_", + "__canonical_name__", + "__version__", + ] or name.startswith("_repr"): + return super().__getattr__(name) + raise AttributeError( + f"You have tried accessing `{name}` on a {type(self).__name__} with message: {self.message}" + ) def __bool__(self) -> bool: return self._bool @@ -41,32 +79,92 @@ def _repr_html_class_(self) -> str: def _repr_html_(self) -> str: return ( - f'
    ' - + f"{type(self).__name__}: {self.message}

    " + f'
    ' + f"{type(self).__name__}: " + f'
    '
    +            f"{sanitize_html(self.message)}

    " ) -@serializable() +@serializable(canonical_name="SyftError", version=1) class SyftError(SyftResponseMessage): _bool: bool = False + tb: str | None = None @property def _repr_html_class_(self) -> str: return "alert-danger" - def to_result(self) -> Err: - return Err(value=self.message) + def __bool__(self) -> bool: + return False + + def is_err(self) -> bool: + return True + + def is_ok(self) -> bool: + return False + + @classmethod + def from_public_exception( + cls, + exc: Exception, + ) -> Self: + return cls(message=exc.public_message) + + @classmethod + def from_exception( + cls, + context: "AuthedServiceContext", + exc: Exception, + include_traceback: bool = False, + ) -> Self: + # traceback may contain private information + # relative + from ..types.errors import SyftException as NewSyftException + + tb = None + + if include_traceback: + if isinstance(exc, NewSyftException): + error_msg = exc.get_message(context) + tb = exc.get_tb(context) + else: + # other exceptions + lines = traceback.format_exception(exc) + tb = "".join(lines) + error_msg = lines[-1] + print(f"Error: {tb}") + else: + if isinstance(exc, NewSyftException): + error_msg = exc.get_message(context) + else: + # by default only type + error_msg = f"Something unexpected happened server side {type(exc)}" + print(f"Error: {exc}") + print(traceback.format_exc()) + return cls(message=error_msg, tb=tb) + + +@serializable(canonical_name="SyftSuccess", version=1) +class SyftSuccess(SyftResponseMessage): + value: Any | None = None + def is_err(self) -> bool: + return False + + def is_ok(self) -> bool: + return True -@serializable() -class SyftSuccess(SyftResponseMessage): @property def _repr_html_class_(self) -> str: return "alert-success" + def unwrap_value(self) -> Any: + return self.value + -@serializable() -class SyftNotReady(SyftResponseMessage): +@serializable(canonical_name="SyftNotReady", version=1) +class SyftNotReady(SyftError): _bool: bool = False @property @@ -74,74 +172,17 @@ def _repr_html_class_(self) -> str: return "alert-info" -@serializable() +@serializable(canonical_name="SyftWarning", version=1) class SyftWarning(SyftResponseMessage): @property def _repr_html_class_(self) -> str: return "alert-warning" -@serializable() +@serializable(canonical_name="SyftInfo", version=1) class SyftInfo(SyftResponseMessage): _bool: bool = False @property def _repr_html_class_(self) -> str: return "alert-info" - - -@serializable() -class SyftException(Exception): - traceback: bool = False - traceback_limit: int = 10 - - @property - def _repr_html_class_(self) -> str: - return "alert-danger" - - def _repr_html_(self) -> str: - return ( - f'
    ' - + f"{type(self).__name__}: {self.args}

    " - ) - - @staticmethod - def format_traceback(etype: Any, evalue: Any, tb: Any, tb_offset: Any) -> str: - line = "---------------------------------------------------------------------------\n" - template = "" - template += line - template += f"{type(evalue).__name__}\n" - template += line - template += f"Exception: {evalue}\n" - - if evalue.traceback: - template += line - template += "Traceback:\n" - tb_lines = "".join(traceback.format_tb(tb, evalue.traceback_limit)) + "\n" - template += tb_lines - template += line - - return template - - -def syft_exception_handler( - shell: Any, etype: Any, evalue: Any, tb: Any, tb_offset: Any = None -) -> None: - template = evalue.format_traceback( - etype=etype, evalue=evalue, tb=tb, tb_offset=tb_offset - ) - sys.stderr.write(template) - - -try: - # third party - from IPython import get_ipython - - get_ipython().set_custom_exc((SyftException,), syft_exception_handler) # noqa: F821 -except Exception: - pass # nosec - - -@serializable() -class SyftAttributeError(AttributeError, SyftException): - pass diff --git a/packages/syft/src/syft/service/service.py b/packages/syft/src/syft/service/service.py index 8a98eca633b..d990e297054 100644 --- a/packages/syft/src/syft/service/service.py +++ b/packages/syft/src/syft/service/service.py @@ -1,22 +1,30 @@ +# future +from __future__ import annotations + # stdlib +from collections import OrderedDict from collections import defaultdict from collections.abc import Callable +from collections.abc import Iterable from copy import deepcopy +import functools from functools import partial +from functools import reduce import inspect from inspect import Parameter +import logging +import operator +import types +import typing from typing import Any from typing import TYPE_CHECKING -from typing import Union # third party -from result import Ok -from result import OkErr +from pydantic import ValidationError from typing_extensions import Self # relative -from ..abstract_node import AbstractNode -from ..node.credentials import SyftVerifyKey +from ..abstract_server import AbstractServer from ..protocol.data_protocol import migrate_args_and_kwargs from ..serde.lib_permissions import CMPCRUDPermission from ..serde.lib_permissions import CMPPermission @@ -28,20 +36,29 @@ from ..serde.signature import Signature from ..serde.signature import signature_remove_context from ..serde.signature import signature_remove_self +from ..server.credentials import SyftVerifyKey +from ..store.db.stash import ObjectStash from ..store.linked_obj import LinkedObject +from ..types.errors import SyftException +from ..types.result import as_result +from ..types.syft_metaclass import Empty +from ..types.syft_metaclass import EmptyType +from ..types.syft_object import EXCLUDED_FROM_SIGNATURE +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SYFT_OBJECT_VERSION_2 from ..types.syft_object import SyftBaseObject from ..types.syft_object import SyftObject from ..types.syft_object import attach_attribute_to_syft_object from ..types.uid import UID +from ..util.telemetry import instrument from .context import AuthedServiceContext from .context import ChangeContext -from .response import SyftError from .user.user_roles import DATA_OWNER_ROLE_LEVEL from .user.user_roles import ServiceRole -from .veilid import VEILID_ENABLED from .warnings import APIEndpointWarning +logger = logging.getLogger(__name__) + if TYPE_CHECKING: # relative from ..client.api import APIModule @@ -51,36 +68,55 @@ class AbstractService: - node: AbstractNode - node_uid: UID + server: AbstractServer + server_uid: UID + stash: ObjectStash + @as_result(SyftException) def resolve_link( self, context: AuthedServiceContext | ChangeContext | Any, linked_obj: LinkedObject, - ) -> Any | SyftError: + ) -> Any: if isinstance(context, AuthedServiceContext): credentials = context.credentials elif isinstance(context, ChangeContext): credentials = context.approving_user_credentials else: - return SyftError(message="wrong context passed") - - obj = self.stash.get_by_uid(credentials, uid=linked_obj.object_uid) - if isinstance(obj, OkErr) and obj.is_ok(): - obj = obj.ok() - if hasattr(obj, "node_uid"): - if context.node is None: - return SyftError(message=f"context {context}'s node is None") - obj.node_uid = context.node.id - if not isinstance(obj, OkErr): - obj = Ok(obj) + raise SyftException(public_message="Wrong context passed") + + # TODO: Add stash to AbstractService? + obj = self.stash.get_by_uid(credentials, uid=linked_obj.object_uid).unwrap() # type: ignore + + if hasattr(obj, "server_uid"): + if context.server is None: + raise SyftException( + public_message=f"The context '{context}' server is None" + ) + obj.server_uid = context.server.id + return obj + # TODO: Delete? def get_all(*arg: Any, **kwargs: Any) -> Any: pass +@serializable() +class BaseConfigV1(SyftBaseObject): + __canonical_name__ = "BaseConfig" + __version__ = SYFT_OBJECT_VERSION_1 + + public_path: str + private_path: str + public_name: str + method_name: str + doc_string: str | None = None + signature: Signature | None = None + is_from_lib: bool = False + warning: APIEndpointWarning | None = None + + @serializable() class BaseConfig(SyftBaseObject): __canonical_name__ = "BaseConfig" @@ -94,11 +130,23 @@ class BaseConfig(SyftBaseObject): signature: Signature | None = None is_from_lib: bool = False warning: APIEndpointWarning | None = None + unwrap_on_success: bool = True + + +@serializable() +class ServiceConfigV1(BaseConfigV1): + __canonical_name__ = "ServiceConfig" + __version__ = SYFT_OBJECT_VERSION_1 + + permissions: list + roles: list[ServiceRole] @serializable() class ServiceConfig(BaseConfig): __canonical_name__ = "ServiceConfig" + __version__ = SYFT_OBJECT_VERSION_2 + permissions: list roles: list[ServiceRole] @@ -106,9 +154,17 @@ def has_permission(self, user_service_role: ServiceRole) -> bool: return user_service_role in self.roles +@serializable() +class LibConfigV1(BaseConfigV1): + __canonical_name__ = "LibConfig" + __version__ = SYFT_OBJECT_VERSION_1 + permissions: set[CMPPermission] + + @serializable() class LibConfig(BaseConfig): __canonical_name__ = "LibConfig" + __version__ = SYFT_OBJECT_VERSION_2 permissions: set[CMPPermission] def has_permission(self, credentials: SyftVerifyKey) -> bool: @@ -228,19 +284,14 @@ def register_lib_obj(lib_obj: CMPBase) -> None: LibConfigRegistry.register(lib_config) -# NOTE: Currently we disable adding library enpoints like numpy, torch when veilid is enabled -# This is because the /api endpoint which return SyftAPI along with the lib enpoints exceeds -# 2 MB . But veilid has a limit of 32 KB for sending and receiving message. -# This would be fixed, when chunking is implemented at veilid core. -if not VEILID_ENABLED: - # hacky, prevent circular imports - for lib_obj in action_execute_registry_libs.flatten(): - # # for functions - # func_name = func.__name__ - # # for classes - # func_name = path.split(".")[-1] - if isinstance(lib_obj, CMPFunction) or isinstance(lib_obj, CMPClass): - register_lib_obj(lib_obj) +# hacky, prevent circular imports +for lib_obj in action_execute_registry_libs.flatten(): + # # for functions + # func_name = func.__name__ + # # for classes + # func_name = path.split(".")[-1] + if isinstance(lib_obj, CMPFunction) or isinstance(lib_obj, CMPClass): + register_lib_obj(lib_obj) def deconstruct_param(param: inspect.Parameter) -> dict[str, Any]: @@ -258,16 +309,57 @@ def deconstruct_param(param: inspect.Parameter) -> dict[str, Any]: def types_for_autosplat(signature: Signature, autosplat: list[str]) -> dict[str, type]: - autosplat_types = {} - for k, v in signature.parameters.items(): - if k in autosplat: - autosplat_types[k] = v.annotation - return autosplat_types + return {k: v.annotation for k, v in signature.parameters.items() if k in autosplat} + + +def _check_empty_union(x: Any) -> bool: + return isinstance( + x, typing._UnionGenericAlias | types.UnionType + ) and EmptyType in typing.get_args(x) + + +def _check_empty_parameter(p: Parameter) -> bool: + return _check_empty_union(p.annotation) and p.default is Empty + + +def _make_union_type(args: Iterable) -> types.UnionType: + return reduce(operator.or_, args) + + +def _replace_empty_parameter(p: Parameter) -> Parameter: + return Parameter( + name=p.name, + default="optional", + annotation=_make_union_type( + t for t in typing.get_args(p.annotation) if t is not EmptyType + ), + kind=p.kind, + ) + + +def _format_signature(s: inspect.Signature) -> inspect.Signature: + params = ( + (_replace_empty_parameter(p) if _check_empty_parameter(p) else p) + for p in s.parameters.values() + ) + + return inspect.Signature( + parameters=params, + return_annotation=inspect.Signature.empty, + ) + + +_SIGNATURE_ERROR_MESSAGE = "Please provide the correct arguments to the method according to the following signature:" + + +def _signature_error_message(s: inspect.Signature) -> str: + return f"{_SIGNATURE_ERROR_MESSAGE}\n{s}" def reconstruct_args_kwargs( signature: Signature, autosplat: list[str], + expanded_signature: Signature, args: tuple[Any, ...], kwargs: dict[Any, str], ) -> tuple[tuple[Any, ...], dict[str, Any]]: @@ -280,10 +372,26 @@ def reconstruct_args_kwargs( for key in keys: if key in kwargs: init_kwargs[key] = kwargs.pop(key) - autosplat_objs[autosplat_key] = autosplat_type(**init_kwargs) + try: + autosplat_objs[autosplat_key] = autosplat_type(**init_kwargs) + except ValidationError: + raise TypeError( + f"Invalid argument(s) provided. " + f"{_signature_error_message(_format_signature(expanded_signature))}" + ) + + autosplat_parameters = OrderedDict( + (param_key, param) + for param_key, param in signature.parameters.items() + if param_key in autosplat_objs + ) final_kwargs = {} - for param_key, param in signature.parameters.items(): + for key in kwargs: + if key not in autosplat_parameters: + final_kwargs[key] = kwargs[key] + + for param_key, param in autosplat_parameters.items(): if param_key in kwargs: final_kwargs[param_key] = kwargs[param_key] elif param_key in autosplat_objs: @@ -291,7 +399,14 @@ def reconstruct_args_kwargs( elif not isinstance(param.default, type(Parameter.empty)): final_kwargs[param_key] = param.default else: - raise Exception(f"Missing {param_key} not in kwargs.") + raise TypeError( + f"Missing argument {param_key}. " + f"{_signature_error_message(_format_signature(expanded_signature))}" + ) + + if "context" in kwargs: + final_kwargs["context"] = kwargs["context"] + return (args, final_kwargs) @@ -314,7 +429,7 @@ def expand_signature(signature: Signature, autosplat: list[str]) -> Signature: # Reorder the parameter based on if they have default value or not new_params = sorted( - new_mapping.values(), + (v for k, v in new_mapping.items() if k not in EXCLUDED_FROM_SIGNATURE), key=lambda param: param.default is param.empty, reverse=True, ) @@ -333,6 +448,7 @@ def service_method( roles: list[ServiceRole] | None = None, autosplat: list[str] | None = None, warning: APIEndpointWarning | None = None, + unwrap_on_success: bool = True, ) -> Callable: if roles is None or len(roles) == 0: # TODO: this is dangerous, we probably want to be more conservative @@ -348,6 +464,14 @@ def wrapper(func: Any) -> Callable: input_signature = deepcopy(signature) + if autosplat is not None and len(autosplat) > 0: + signature = expand_signature(signature=input_signature, autosplat=autosplat) + + @instrument( # type: ignore + span_name=f"service_method::{_path}", + attributes={"service.name": name, "service.path": path}, + ) + @functools.wraps(func) def _decorator(self: Any, *args: Any, **kwargs: Any) -> Callable: communication_protocol = kwargs.pop("communication_protocol", None) @@ -359,6 +483,7 @@ def _decorator(self: Any, *args: Any, **kwargs: Any) -> Callable: args, kwargs = reconstruct_args_kwargs( signature=input_signature, autosplat=autosplat, + expanded_signature=signature, args=args, kwargs=kwargs, ) @@ -373,15 +498,11 @@ def _decorator(self: Any, *args: Any, **kwargs: Any) -> Callable: context = kwargs.get("context", None) context = args[0] if context is None else context attrs_to_attach = { - "syft_node_location": context.node.id, + "syft_server_location": context.server.id, "syft_client_verify_key": context.credentials, } - return attach_attribute_to_syft_object( - result=result, attr_dict=attrs_to_attach - ) - - if autosplat is not None and len(autosplat) > 0: - signature = expand_signature(signature=input_signature, autosplat=autosplat) + attach_attribute_to_syft_object(result=result, attr_dict=attrs_to_attach) + return result config = ServiceConfig( public_path=_path if path is None else path, @@ -393,6 +514,7 @@ def _decorator(self: Any, *args: Any, **kwargs: Any) -> Callable: roles=roles, permissions=["Guest"], warning=warning, + unwrap_on_success=unwrap_on_success, ) ServiceConfigRegistry.register(config) @@ -413,7 +535,7 @@ def __init_subclass__(cls, **kwargs: Any) -> None: cls.__object_version_registry__[mapping_string] = cls @classmethod - def versioned_class(cls, name: str, version: int) -> type["SyftObject"] | None: + def versioned_class(cls, name: str, version: int) -> type[SyftObject] | None: mapping_string = f"{name}_{version}" if mapping_string not in cls.__object_version_registry__: return None @@ -433,7 +555,7 @@ def add_transform( @classmethod def get_transform( - cls, type_from: type["SyftObject"], type_to: type["SyftObject"] + cls, type_from: type[SyftObject], type_to: type[SyftObject] ) -> Callable: klass_from = type_from.__canonical_name__ version_from = type_from.__version__ @@ -445,52 +567,52 @@ def get_transform( def from_api_or_context( func_or_path: str, - syft_node_location: UID | None = None, + syft_server_location: UID | None = None, syft_client_verify_key: SyftVerifyKey | None = None, -) -> Union["APIModule", SyftError, partial] | None: +) -> APIModule | partial | None: # relative from ..client.api import APIRegistry - from ..node.node import AuthNodeContextRegistry + from ..server.server import AuthServerContextRegistry if callable(func_or_path): func_or_path = func_or_path.__qualname__ - if not (syft_node_location and syft_client_verify_key): + if not (syft_server_location and syft_client_verify_key): return None api = APIRegistry.api_for( - node_uid=syft_node_location, + server_uid=syft_server_location, user_verify_key=syft_client_verify_key, ) - if api is not None: - service_method = api.services + if api.is_ok(): + service_method = api.unwrap().services for path in func_or_path.split("."): service_method = getattr(service_method, path) return service_method - node_context = AuthNodeContextRegistry.auth_context_for_user( - node_uid=syft_node_location, + server_context = AuthServerContextRegistry.auth_context_for_user( + server_uid=syft_server_location, user_verify_key=syft_client_verify_key, ) - if node_context is not None and node_context.node is not None: + if server_context is not None and server_context.server is not None: user_config_registry = UserServiceConfigRegistry.from_role( - node_context.role, + server_context.role, ) if func_or_path not in user_config_registry: if ServiceConfigRegistry.path_exists(func_or_path): - return SyftError( - message=f"As a `{node_context.role}` you have has no access to: {func_or_path}" + raise SyftException( + public_message=f"As a `{server_context.role}` you have has no access to: {func_or_path}" ) else: - return SyftError( - message=f"API call not in registered services: {func_or_path}" + raise SyftException( + public_message=f"API call not in registered services: {func_or_path}" ) _private_api_path = user_config_registry.private_path_for(func_or_path) - service_method = node_context.node.get_service_method( + service_method = server_context.server.get_service_method( _private_api_path, ) - return partial(service_method, node_context) + return partial(service_method, server_context) else: - print("Could not get method from api or context") + logger.error("Could not get method from api or context") return None diff --git a/packages/syft/src/syft/service/settings/migrations.py b/packages/syft/src/syft/service/settings/migrations.py index 3ce68583ca7..9306638cf7f 100644 --- a/packages/syft/src/syft/service/settings/migrations.py +++ b/packages/syft/src/syft/service/settings/migrations.py @@ -5,10 +5,10 @@ from ...types.transforms import TransformContext -def set_from_node_to_key(node_attr: str, key: str) -> Callable: - def extract_from_node(context: TransformContext) -> TransformContext: +def set_from_server_to_key(server_attr: str, key: str) -> Callable: + def extract_from_server(context: TransformContext) -> TransformContext: if context.output is not None: - context.output[key] = getattr(context.node, node_attr) + context.output[key] = getattr(context.server, server_attr) return context - return extract_from_node + return extract_from_server diff --git a/packages/syft/src/syft/service/settings/settings.py b/packages/syft/src/syft/service/settings/settings.py index 874c65b1a26..a50ca25257c 100644 --- a/packages/syft/src/syft/service/settings/settings.py +++ b/packages/syft/src/syft/service/settings/settings.py @@ -1,22 +1,105 @@ # stdlib +from collections.abc import Callable +import logging +from typing import Any + +# third party +from pydantic import EmailStr +from pydantic import field_validator +from pydantic import model_validator +from typing_extensions import Self # relative -from ...abstract_node import NodeSideType -from ...abstract_node import NodeType -from ...node.credentials import SyftVerifyKey +from ...abstract_server import ServerSideType +from ...abstract_server import ServerType from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...service.worker.utils import DEFAULT_WORKER_POOL_NAME +from ...types.syft_migration import migrate from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.syft_object import SYFT_OBJECT_VERSION_4 from ...types.syft_object import SyftObject +from ...types.transforms import drop +from ...types.transforms import make_set_default from ...types.uid import UID +from ...util.misc_objs import HTMLObject +from ...util.misc_objs import MarkdownDescription +from ...util.schema import DEFAULT_WELCOME_MSG + +logger = logging.getLogger(__name__) + +MIN_ORG_NAME_LENGTH = 1 +MIN_SERVER_NAME_LENGTH = 1 + + +@serializable() +class PwdTokenResetConfig(SyftObject): + __canonical_name__ = "PwdTokenResetConfig" + __version__ = SYFT_OBJECT_VERSION_1 + + ascii: bool = True + numbers: bool = True + token_len: int = 12 + # Token expiration time in seconds (not minutes) + token_exp_min: int = 1800 # TODO: Rename variable to token_exp_sec + + @model_validator(mode="after") + def validate_char_types(self) -> Self: + if not self.ascii and not self.numbers: + raise ValueError( + "Invalid config, at least one of the ascii/number options must be true." + ) + + return self + + @field_validator("token_len") + @classmethod + def check_token_len(cls, value: int) -> int: + if value < 4: + raise ValueError("Token length must be greater than 4.") + return value + + +@serializable() +class ServerSettingsUpdateV1(PartialSyftObject): + __canonical_name__ = "ServerSettingsUpdate" + __version__ = SYFT_OBJECT_VERSION_1 + id: UID + name: str + organization: str + description: str + on_board: bool + signup_enabled: bool + admin_email: str + association_request_auto_approval: bool + welcome_markdown: HTMLObject | MarkdownDescription + eager_execution_enabled: bool = False @serializable() -class NodeSettingsUpdate(PartialSyftObject): - __canonical_name__ = "NodeSettingsUpdate" +class ServerSettingsUpdateV2(PartialSyftObject): + __canonical_name__ = "ServerSettingsUpdate" __version__ = SYFT_OBJECT_VERSION_2 + id: UID + name: str + organization: str + description: str + on_board: bool + signup_enabled: bool + admin_email: str + association_request_auto_approval: bool + welcome_markdown: HTMLObject | MarkdownDescription + eager_execution_enabled: bool + notifications_enabled: bool + +@serializable() +class ServerSettingsUpdateV3(PartialSyftObject): + __canonical_name__ = "ServerSettingsUpdate" + __version__ = SYFT_OBJECT_VERSION_3 id: UID name: str organization: str @@ -24,29 +107,287 @@ class NodeSettingsUpdate(PartialSyftObject): on_board: bool signup_enabled: bool admin_email: str + association_request_auto_approval: bool + welcome_markdown: HTMLObject | MarkdownDescription + eager_execution_enabled: bool + notifications_enabled: bool + pwd_token_config: PwdTokenResetConfig + + +@serializable() +class ServerSettingsUpdate(PartialSyftObject): + __canonical_name__ = "ServerSettingsUpdate" + __version__ = SYFT_OBJECT_VERSION_4 + id: UID + name: str + organization: str + description: str + on_board: bool + signup_enabled: bool + admin_email: str + association_request_auto_approval: bool + welcome_markdown: HTMLObject | MarkdownDescription + eager_execution_enabled: bool + notifications_enabled: bool + pwd_token_config: PwdTokenResetConfig + allow_guest_sessions: bool + + +@serializable() +class ServerSettingsV1(SyftObject): + __canonical_name__ = "ServerSettings" + __version__ = SYFT_OBJECT_VERSION_1 + + id: UID + name: str = "Server" + deployed_on: str + organization: str = "OpenMined" + verify_key: SyftVerifyKey + on_board: bool = True + description: str = "This is the default description for a Datasite Server." + server_type: ServerType = ServerType.DATASITE + signup_enabled: bool + admin_email: str + server_side_type: ServerSideType = ServerSideType.HIGH_SIDE + show_warnings: bool + association_request_auto_approval: bool + eager_execution_enabled: bool = False + default_worker_pool: str = DEFAULT_WORKER_POOL_NAME + welcome_markdown: HTMLObject | MarkdownDescription = HTMLObject( + text=DEFAULT_WELCOME_MSG + ) + + +@serializable() +class ServerSettingsV2(SyftObject): + __canonical_name__ = "ServerSettings" + __version__ = SYFT_OBJECT_VERSION_2 + + id: UID + name: str = "Server" + deployed_on: str + organization: str = "OpenMined" + verify_key: SyftVerifyKey + on_board: bool = True + description: str = "This is the default description for a Datasite Server." + server_type: ServerType = ServerType.DATASITE + signup_enabled: bool + admin_email: str + server_side_type: ServerSideType = ServerSideType.HIGH_SIDE + show_warnings: bool + association_request_auto_approval: bool + eager_execution_enabled: bool = False + default_worker_pool: str = DEFAULT_WORKER_POOL_NAME + welcome_markdown: HTMLObject | MarkdownDescription = HTMLObject( + text=DEFAULT_WELCOME_MSG + ) + notifications_enabled: bool @serializable() -class NodeSettingsV2(SyftObject): - __canonical_name__ = "NodeSettings" +class ServerSettingsV3(SyftObject): + __canonical_name__ = "ServerSettings" __version__ = SYFT_OBJECT_VERSION_3 __repr_attrs__ = [ "name", "organization", + "description", "deployed_on", "signup_enabled", "admin_email", ] id: UID - name: str = "Node" + name: str = "Server" deployed_on: str organization: str = "OpenMined" verify_key: SyftVerifyKey on_board: bool = True - description: str = "Text" - node_type: NodeType = NodeType.DOMAIN + description: str = "This is the default description for a Datasite Server." + server_type: ServerType = ServerType.DATASITE signup_enabled: bool admin_email: str - node_side_type: NodeSideType = NodeSideType.HIGH_SIDE + server_side_type: ServerSideType = ServerSideType.HIGH_SIDE + show_warnings: bool + association_request_auto_approval: bool + eager_execution_enabled: bool = False + default_worker_pool: str = DEFAULT_WORKER_POOL_NAME + welcome_markdown: HTMLObject | MarkdownDescription = HTMLObject( + text=DEFAULT_WELCOME_MSG + ) + notifications_enabled: bool + pwd_token_config: PwdTokenResetConfig = PwdTokenResetConfig() + + +@serializable() +class ServerSettings(SyftObject): + __canonical_name__ = "ServerSettings" + __version__ = SYFT_OBJECT_VERSION_4 + __repr_attrs__ = [ + "name", + "organization", + "description", + "deployed_on", + "signup_enabled", + "admin_email", + "allow_guest_sessions", + ] + + id: UID + name: str = "Server" + deployed_on: str + organization: str = "OpenMined" + verify_key: SyftVerifyKey + on_board: bool = True + description: str = "This is the default description for a Datasite Server." + server_type: ServerType = ServerType.DATASITE + signup_enabled: bool + admin_email: EmailStr + server_side_type: ServerSideType = ServerSideType.HIGH_SIDE show_warnings: bool + association_request_auto_approval: bool + eager_execution_enabled: bool = False + default_worker_pool: str = DEFAULT_WORKER_POOL_NAME + welcome_markdown: HTMLObject | MarkdownDescription = HTMLObject( + text=DEFAULT_WELCOME_MSG + ) + notifications_enabled: bool + pwd_token_config: PwdTokenResetConfig = PwdTokenResetConfig() + allow_guest_sessions: bool = True + + @field_validator("organization") + def organization_length(cls, v: str) -> str: + if len(v) < MIN_ORG_NAME_LENGTH: + raise ValueError( + f"'organization' must be at least {MIN_ORG_NAME_LENGTH} characters long" + ) + return v + + @field_validator("name") + def name_length(cls, v: str) -> str: + if len(v) < MIN_SERVER_NAME_LENGTH: + raise ValueError( + f'"name" must be at least {MIN_SERVER_NAME_LENGTH} characters long' + ) + return v + + def _repr_html_(self) -> Any: + # .api.services.notifications.settings() is how the server itself would dispatch notifications. + # .api.services.notifications.user_settings() sets if a specific user wants or not to receive notifications. + # Class NotifierSettings holds both pieces of info. + # Users will get notification x where x in {email, slack, sms, app} if three things are set to True: + # 1) .....settings().active + # 2) .....settings().x_enabled + # 3) .....user_settings().x + + preferences = self._get_api().services.notifications.settings() + if not preferences: + notification_print_str = "Create notification settings using enable_notifications from user_service" + else: + notifications = [] + if preferences.email_enabled: + notifications.append("email") + if preferences.sms_enabled: + notifications.append("sms") + if preferences.slack_enabled: + notifications.append("slack") + if preferences.app_enabled: + notifications.append("app") + + # self.notifications_enabled = preferences.active + if preferences.active: + if notifications: + notification_print_str = f"Enabled via {', '.join(notifications)}" + else: + notification_print_str = "Enabled without any communication method" + else: + notification_print_str = "Disabled" + + return f""" +
    +

    Settings

    +

    Id: {self.id}

    +

    Name: {self.name}

    +

    Organization: {self.organization}

    +

    Description: {self.description}

    +

    Deployed on: {self.deployed_on}

    +

    Signup enabled: {self.signup_enabled}

    +

    Notifications enabled: {notification_print_str}

    +

    Admin email: {self.admin_email}

    +

    Enable guest sessions: {self.allow_guest_sessions}

    +
    + + """ + + +# Server Settings Migration + + +# set +@migrate(ServerSettingsV1, ServerSettingsV2) +def migrate_server_settings_v1_to_v2() -> list[Callable]: + return [make_set_default("notifications_enabled", False)] + + +@migrate(ServerSettingsV2, ServerSettingsV3) +def migrate_server_settings_v2_to_v3() -> list[Callable]: + return [make_set_default("pwd_token_config", PwdTokenResetConfig())] + + +@migrate(ServerSettingsV3, ServerSettings) +def migrate_server_settings_v3_to_current() -> list[Callable]: + return [make_set_default("allow_guest_sessions", False)] + + +# drop +@migrate(ServerSettingsV2, ServerSettingsV1) +def migrate_server_settings_v2_to_v1() -> list[Callable]: + # Use drop function on "notifications_enabled" attrubute + return [drop(["notifications_enabled"])] + + +@migrate(ServerSettingsV3, ServerSettingsV2) +def migrate_server_settings_v3_to_v2() -> list[Callable]: + # Use drop function on "notifications_enabled" attrubute + return [drop(["pwd_token_config"])] + + +@migrate(ServerSettings, ServerSettingsV3) +def migrate_server_settings_current_to_v3() -> list[Callable]: + # Use drop function on "notifications_enabled" attrubute + return [drop(["allow_guest_sessions"])] + + +# Server Settings Update Migration + + +# set +@migrate(ServerSettingsUpdateV1, ServerSettingsUpdateV2) +def migrate_server_settings_update_v1_to_v2() -> list[Callable]: + return [make_set_default("notifications_enabled", False)] + + +@migrate(ServerSettingsUpdateV2, ServerSettingsUpdateV3) +def migrate_server_settings_update_v2_to_v3() -> list[Callable]: + return [make_set_default("pwd_token_config", PwdTokenResetConfig())] + + +@migrate(ServerSettingsUpdateV3, ServerSettingsUpdate) +def migrate_server_settings_update_v3_to_current() -> list[Callable]: + return [make_set_default("allow_guest_sessions", False)] + + +# drop +@migrate(ServerSettingsUpdateV2, ServerSettingsUpdateV1) +def migrate_server_settings_update_v2_to_v1() -> list[Callable]: + return [drop(["notifications_enabled"])] + + +@migrate(ServerSettingsUpdateV3, ServerSettingsUpdateV2) +def migrate_server_settings_update_current_to_v2() -> list[Callable]: + return [drop(["pwd_token_config"])] + + +@migrate(ServerSettingsUpdate, ServerSettingsUpdateV3) +def migrate_server_settings_update_current_to_v3() -> list[Callable]: + return [drop(["allow_guest_sessions"])] diff --git a/packages/syft/src/syft/service/settings/settings_service.py b/packages/syft/src/syft/service/settings/settings_service.py index ffe58198308..9aba353dbc7 100644 --- a/packages/syft/src/syft/service/settings/settings_service.py +++ b/packages/syft/src/syft/service/settings/settings_service.py @@ -1,87 +1,217 @@ # stdlib - -# stdlib -from typing import cast +from string import Template +from typing import Any # third party -from result import Err -from result import Ok -from result import Result +from pydantic import ValidationError # relative -from ...abstract_node import AbstractNode +from ...abstract_server import ServerSideType from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_metaclass import Empty +from ...util.assets import load_png_base64 from ...util.experimental_flags import flags +from ...util.misc_objs import HTMLObject +from ...util.misc_objs import MarkdownDescription +from ...util.notebook_ui.styles import FONT_CSS +from ...util.schema import DO_COMMANDS +from ...util.schema import DS_COMMANDS +from ...util.schema import GUEST_COMMANDS from ..context import AuthedServiceContext from ..context import UnauthedServiceContext -from ..response import SyftError +from ..notifier.notifier_enums import EMAIL_TYPES +from ..notifier.notifier_enums import NOTIFICATION_FREQUENCY from ..response import SyftSuccess from ..service import AbstractService from ..service import service_method from ..user.user_roles import ADMIN_ROLE_LEVEL +from ..user.user_roles import GUEST_ROLE_LEVEL +from ..user.user_roles import ServiceRole from ..warnings import HighSideCRUDWarning -from .settings import NodeSettingsUpdate -from .settings import NodeSettingsV2 +from .settings import ServerSettings +from .settings import ServerSettingsUpdate from .settings_stash import SettingsStash +# for testing purpose +_NOTIFICATIONS_ENABLED_WIHOUT_CREDENTIALS_ERROR = ( + "Failed to enable notification. " + "Email credentials are invalid or have not been set. " + "Please use `enable_notifications` from `user_service` " + "to set the correct email credentials." +) + -@serializable() +@serializable(canonical_name="SettingsService", version=1) class SettingsService(AbstractService): - store: DocumentStore stash: SettingsStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = SettingsStash(store=store) @service_method(path="settings.get", name="get") - def get(self, context: UnauthedServiceContext) -> Result[Ok, Err]: + def get(self, context: UnauthedServiceContext) -> ServerSettings: """Get Settings""" - context.node = cast(AbstractNode, context.node) - result = self.stash.get_all(context.node.signing_key.verify_key) - if result.is_ok(): - settings = result.ok() - # check if the settings list is empty - if len(settings) == 0: - return SyftError(message="No settings found") - result = settings[0] - return Ok(result) - else: - return SyftError(message=result.err()) + all_settings = self.stash.get_all( + context.server.signing_key.verify_key + ).unwrap() + + if len(all_settings) == 0: + raise NotFoundException(public_message="No settings found") + + return all_settings[0] @service_method(path="settings.set", name="set") def set( - self, context: AuthedServiceContext, settings: NodeSettingsV2 - ) -> Result[Ok, Err]: - """Set a new the Node Settings""" - print("Here!") - result = self.stash.set(context.credentials, settings) - if result.is_ok(): - return result - else: - return SyftError(message=result.err()) + self, context: AuthedServiceContext, settings: ServerSettings + ) -> ServerSettings: + """Set a new the Server Settings""" + return self.stash.set(context.credentials, settings).unwrap() - @service_method(path="settings.update", name="update") + @service_method( + path="settings.update", + name="update", + autosplat=["settings"], + unwrap_on_success=False, + roles=ADMIN_ROLE_LEVEL, + ) def update( - self, context: AuthedServiceContext, settings: NodeSettingsUpdate - ) -> Result[Ok, Err]: - result = self.stash.get_all(context.credentials) - if result.is_ok(): - current_settings = result.ok() - if len(current_settings) > 0: - new_settings = current_settings[0].copy( - update=settings.to_dict(exclude_empty=True) + self, context: AuthedServiceContext, settings: ServerSettingsUpdate + ) -> SyftSuccess: + """ + Update the Server Settings using the provided values. + + Args: + name: Optional[str] + Server name + organization: Optional[str] + Organization name + description: Optional[str] + Server description + on_board: Optional[bool] + Show onboarding panel when a user logs in for the first time + signup_enabled: Optional[bool] + Enable/Disable registration + admin_email: Optional[str] + Administrator email + association_request_auto_approval: Optional[bool] + + Returns: + SyftSuccess: Message indicating the success of the operation, with the + update server settings as the value property. + + Example: + >>> server_client.update(name='foo', organization='bar', description='baz', signup_enabled=True) + SyftSuccess: Settings updated successfully. + """ + updated_settings = self._update(context, settings).unwrap() + return SyftSuccess( + message=( + "Settings updated successfully. " + + "You must call .refresh() to sync your client with the changes." + ), + value=updated_settings, + ) + + @as_result(StashException, NotFoundException, ValidationError) + def _update( + self, context: AuthedServiceContext, settings: ServerSettingsUpdate + ) -> ServerSettings: + all_settings = self.stash.get_all( + context.credentials, limit=1, sort_order="desc" + ).unwrap() + if len(all_settings) > 0: + new_settings = all_settings[0].model_copy( + update=settings.to_dict(exclude_empty=True) + ) + ServerSettings.model_validate(new_settings.to_dict()) + update_result = self.stash.update( + context.credentials, obj=new_settings + ).unwrap() + + # If notifications_enabled is present in the update, we need to update the notifier settings + if settings.notifications_enabled is not Empty: # type: ignore[comparison-overlap] + notifier_settings_res = context.server.services.notifier.settings( + context + ) + if ( + not notifier_settings_res.is_ok() + or notifier_settings_res.ok() is None + ): + raise SyftException( + public_message=( + "Notification has not been enabled. " + "Please use `enable_notifications` from `user_service`." + ) + ) + + context.server.services.notifier._set_notifier( + context, active=settings.notifications_enabled ) - update_result = self.stash.update(context.credentials, new_settings) - if update_result.is_ok(): - return result - else: - return SyftError(message=update_result.err()) - else: - return SyftError(message="No settings found") + + return update_result else: - return SyftError(message=result.err()) + raise NotFoundException(public_message="Server settings not found") + + @service_method( + path="settings.set_server_side_type_dangerous", + name="set_server_side_type_dangerous", + roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, + ) + def set_server_side_type_dangerous( + self, context: AuthedServiceContext, server_side_type: str + ) -> SyftSuccess: + side_type_options = [e.value for e in ServerSideType] + + if server_side_type not in side_type_options: + raise SyftException( + public_message=f"Not a valid server_side_type, please use one of the options from: {side_type_options}" + ) + + current_settings = self.stash.get_all( + context.credentials, limit=1, sort_order="desc" + ).unwrap() + if len(current_settings) > 0: + new_settings = current_settings[0] + new_settings.server_side_type = ServerSideType(server_side_type) + updated_settings = self.stash.update( + context.credentials, new_settings + ).unwrap() + return SyftSuccess( + message=( + "Settings updated successfully. " + + "You must call .refresh() to sync your client with the changes." + ), + value=updated_settings, + ) + else: + # TODO: Turn this into a function? + raise NotFoundException(public_message="Server settings not found") + + @service_method( + path="settings.batch_notifications", + name="batch_notifications", + roles=ADMIN_ROLE_LEVEL, + ) + def batch_notifications( + self, + context: AuthedServiceContext, + email_type: EMAIL_TYPES, + frequency: NOTIFICATION_FREQUENCY, + start_time: str = "", + ) -> SyftSuccess: + result = context.server.services.notifier.set_email_batch( + context=context, + email_type=email_type, + frequency=frequency, + start_time=start_time, + ).unwrap() + return result @service_method( path="settings.enable_notifications", @@ -96,17 +226,16 @@ def enable_notifications( email_sender: str | None = None, email_server: str | None = None, email_port: str | None = None, - ) -> SyftSuccess | SyftError: - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - return notifier_service.turn_on( + ) -> SyftSuccess: + context.server.services.notifier.turn_on( context=context, email_username=email_username, email_password=email_password, email_sender=email_sender, email_server=email_server, email_port=email_port, - ) + ).unwrap() + return SyftSuccess(message="Notifications enabled") @service_method( path="settings.disable_notifications", @@ -116,29 +245,207 @@ def enable_notifications( def disable_notifications( self, context: AuthedServiceContext, - ) -> SyftSuccess | SyftError: - context.node = cast(AbstractNode, context.node) - notifier_service = context.node.get_service("notifierservice") - return notifier_service.turn_off(context=context) + ) -> SyftSuccess: + context.server.services.notifier.turn_off(context=context).unwrap() + return SyftSuccess(message="Notifications disabled") @service_method( path="settings.allow_guest_signup", name="allow_guest_signup", warning=HighSideCRUDWarning(confirmation=True), + unwrap_on_success=False, ) def allow_guest_signup( self, context: AuthedServiceContext, enable: bool - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """Enable/Disable Registration for Data Scientist or Guest Users.""" flags.CAN_REGISTER = enable - context.node = cast(AbstractNode, context.node) - method = context.node.get_service_method(SettingsService.update) - settings = NodeSettingsUpdate(signup_enabled=enable) - result = method(context=context, settings=settings) + settings = ServerSettingsUpdate(signup_enabled=enable) + self._update(context=context, settings=settings).unwrap() + message = "enabled" if enable else "disabled" + return SyftSuccess( + message=f"Registration feature successfully {message}", value=message + ) + + # NOTE: This service is disabled until we bring back Eager Execution + # @service_method( + # path="settings.enable_eager_execution", + # name="enable_eager_execution", + # roles=ADMIN_ROLE_LEVEL, + # warning=HighSideCRUDWarning(confirmation=True), + # ) + def enable_eager_execution( + self, context: AuthedServiceContext, enable: bool + ) -> SyftSuccess: + """Enable/Disable eager execution.""" + settings = ServerSettingsUpdate(eager_execution_enabled=enable) + self._update(context=context, settings=settings).unwrap() + message = "enabled" if enable else "disabled" + return SyftSuccess(message=f"Eager execution {message}", value=message) - if result.is_err(): - return SyftError(message=f"Failed to update settings: {result.err()}") + @service_method(path="settings.set_email_rate_limit", name="set_email_rate_limit") + def set_email_rate_limit( + self, context: AuthedServiceContext, email_type: EMAIL_TYPES, daily_limit: int + ) -> SyftSuccess: + return context.server.services.notifier.set_email_rate_limit( + context, email_type, daily_limit + ) + @service_method( + path="settings.allow_association_request_auto_approval", + name="allow_association_request_auto_approval", + unwrap_on_success=False, + ) + def allow_association_request_auto_approval( + self, context: AuthedServiceContext, enable: bool + ) -> SyftSuccess: + new_settings = ServerSettingsUpdate(association_request_auto_approval=enable) + self._update(context, settings=new_settings).unwrap() message = "enabled" if enable else "disabled" - return SyftSuccess(message=f"Registration feature successfully {message}") + return SyftSuccess( + message="Association request auto-approval successfully " + message + ) + + @service_method( + path="settings.welcome_preview", + name="welcome_preview", + ) + def welcome_preview( + self, + context: AuthedServiceContext, + markdown: str = "", + html: str = "", + ) -> MarkdownDescription | HTMLObject: + if not markdown and not html or markdown and html: + raise SyftException( + public_message="Invalid markdown/html fields. You must set one of them." + ) + + welcome_msg = None + if markdown: + welcome_msg = MarkdownDescription(text=markdown) + else: + welcome_msg = HTMLObject(text=html) + + return welcome_msg + + @service_method( + path="settings.welcome_customize", + name="welcome_customize", + unwrap_on_success=False, + ) + def welcome_customize( + self, + context: AuthedServiceContext, + markdown: str = "", + html: str = "", + ) -> SyftSuccess: + if not markdown and not html or markdown and html: + raise SyftException( + public_message="Invalid markdown/html fields. You must set one of them." + ) + + welcome_msg = None + if markdown: + welcome_msg = MarkdownDescription(text=markdown) + else: + welcome_msg = HTMLObject(text=html) + + new_settings = ServerSettingsUpdate(welcome_markdown=welcome_msg) + self._update(context=context, settings=new_settings).unwrap() + + return SyftSuccess(message="Welcome Markdown was successfully updated!") + + @service_method( + path="settings.welcome_show", + name="welcome_show", + roles=GUEST_ROLE_LEVEL, + ) + def welcome_show( + self, + context: AuthedServiceContext, + ) -> HTMLObject | MarkdownDescription: + all_settings = self.stash.get_all( + context.server.signing_key.verify_key + ).unwrap() + role = context.server.services.user.get_role_for_credentials( + context.credentials + ).unwrap() + + # check if the settings list is empty + if len(all_settings) == 0: + raise NotFoundException(public_message="Server settings not found") + settings = all_settings[0] + + if settings.welcome_markdown: + str_tmp = Template(settings.welcome_markdown.text) + welcome_msg_class = type(settings.welcome_markdown) + server_side_type = ( + "Low Side" + if context.server.metadata.server_side_type + == ServerSideType.LOW_SIDE.value + else "High Side" + ) + commands = "" + if ( + role.value == ServiceRole.NONE.value + or role.value == ServiceRole.GUEST.value + ): + commands = GUEST_COMMANDS + elif role is not None and role.value == ServiceRole.DATA_SCIENTIST.value: + commands = DS_COMMANDS + elif role is not None and role.value >= ServiceRole.DATA_OWNER.value: + commands = DO_COMMANDS + + command_list = f""" +
      + {commands} +
    + """ + result = str_tmp.safe_substitute( + FONT_CSS=FONT_CSS, + server_symbol=load_png_base64("small-syft-symbol-logo.png"), + datasite_name=context.server.name, + description=context.server.metadata.description, + # server_url='http://testing:8080', + server_type=context.server.metadata.server_type.capitalize(), + server_side_type=server_side_type, + server_version=context.server.metadata.syft_version, + command_list=command_list, + ) + return welcome_msg_class(text=result) + raise SyftException(public_message="There's no welcome message") + + @service_method( + path="settings.get_server_config", + name="get_server_config", + roles=ADMIN_ROLE_LEVEL, + ) + def get_server_config( + self, + context: AuthedServiceContext, + ) -> dict[str, Any]: + server = context.server + + return { + "name": server.name, + "server_type": server.server_type, + # "deploy_to": server.deployment_type_enum, + "server_side_type": server.server_side_type, + # "port": server.port, + "processes": server.processes, + "dev_mode": server.dev_mode, + "reset": True, # we should be able to get all the objects from migration data + "tail": False, + # "host": server.host, + "enable_warnings": server.enable_warnings, + "n_consumers": server.queue_config.client_config.create_producer, + "thread_workers": server.queue_config.thread_workers, + "create_producer": server.queue_config.client_config.create_producer, + "queue_port": server.queue_config.client_config.queue_port, + "association_request_auto_approval": server.association_request_auto_approval, + "background_tasks": True, + "debug": True, # we also want to debug + "migrate": False, # I think we dont want to migrate? + } diff --git a/packages/syft/src/syft/service/settings/settings_stash.py b/packages/syft/src/syft/service/settings/settings_stash.py index 4aac62c60d7..c22c08045f3 100644 --- a/packages/syft/src/syft/service/settings/settings_stash.py +++ b/packages/syft/src/syft/service/settings/settings_stash.py @@ -1,57 +1,11 @@ -# stdlib - -# third party -from result import Result - # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...types.uid import UID +from ...store.db.stash import ObjectStash from ...util.telemetry import instrument -from ..action.action_permissions import ActionObjectPermission -from .settings import NodeSettingsV2 - -NamePartitionKey = PartitionKey(key="name", type_=str) -ActionIDsPartitionKey = PartitionKey(key="action_ids", type_=list[UID]) +from .settings import ServerSettings @instrument -@serializable() -class SettingsStash(BaseUIDStoreStash): - object_type = NodeSettingsV2 - settings: PartitionSettings = PartitionSettings( - name=NodeSettingsV2.__canonical_name__, object_type=NodeSettingsV2 - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - - def set( - self, - credentials: SyftVerifyKey, - settings: NodeSettingsV2, - add_permission: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[NodeSettingsV2, str]: - res = self.check_type(settings, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().set(credentials=credentials, obj=res.ok()) - - def update( - self, - credentials: SyftVerifyKey, - settings: NodeSettingsV2, - has_permission: bool = False, - ) -> Result[NodeSettingsV2, str]: - res = self.check_type(settings, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().update(credentials=credentials, obj=res.ok()) +@serializable(canonical_name="SettingsStashSQL", version=1) +class SettingsStash(ObjectStash[ServerSettings]): + pass diff --git a/packages/syft/src/syft/service/sync/diff_state.py b/packages/syft/src/syft/service/sync/diff_state.py index 74d4eb1fb4c..1078dab3b42 100644 --- a/packages/syft/src/syft/service/sync/diff_state.py +++ b/packages/syft/src/syft/service/sync/diff_state.py @@ -1,19 +1,20 @@ -""" -How to check differences between two objects: - * by default merge every attr - * check if there is a custom implementation of the check function - * check if there are exceptions we do not want to merge - * check if there are some restrictions on the attr set -""" - # stdlib +from collections.abc import Callable +from collections.abc import Collection +from collections.abc import Iterable +from dataclasses import dataclass +import enum import html +import logging +import operator import textwrap from typing import Any from typing import ClassVar +from typing import Literal +from typing import TYPE_CHECKING # third party -from pydantic import model_validator +import pandas as pd from rich import box from rich.console import Console from rich.console import Group @@ -23,34 +24,53 @@ from typing_extensions import Self # relative -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...client.client import SyftClient +from ...client.sync_decision import SyncDecision +from ...client.sync_decision import SyncDirection +from ...server.credentials import SyftVerifyKey +from ...types.datetime import DateTime +from ...types.errors import SyftException +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject +from ...types.syft_object import short_uid from ...types.syncable_object import SyncableSyftObject from ...types.uid import LineageID from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE -from ...util.fonts import ITABLES_CSS -from ...util.fonts import fonts_css +from ...util.notebook_ui.components.sync import Label +from ...util.notebook_ui.components.sync import SyncTableObject +from ...util.notebook_ui.icons import Icon +from ...util.util import prompt_warning_message from ..action.action_object import ActionObject from ..action.action_permissions import ActionObjectPermission +from ..action.action_permissions import ActionPermission from ..action.action_permissions import StoragePermission +from ..api.api import TwinAPIEndpoint from ..code.user_code import UserCode from ..code.user_code import UserCodeStatusCollection from ..job.job_stash import Job +from ..job.job_stash import JobType from ..log.log import SyftLog from ..output.output_service import ExecutionOutput +from ..policy.policy import Constant from ..request.request import Request -from ..response import SyftError +from ..response import SyftSuccess +from ..user.user import UserView from .sync_state import SyncState +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + # relative + from .resolve_widget import PaginatedResolveWidget + from .resolve_widget import ResolveWidget + sketchy_tab = "‎ " * 4 class AttrDiff(SyftObject): # version __canonical_name__ = "AttrDiff" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 attr_name: str low_attr: Any = None high_attr: Any = None @@ -78,7 +98,7 @@ def _coll_repr_(self) -> dict[str, Any]: class ListDiff(AttrDiff): # version __canonical_name__ = "ListDiff" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 diff_ids: list[int] = [] new_low_ids: list[int] = [] new_high_ids: list[int] = [] @@ -107,10 +127,10 @@ def from_lists(cls, attr_name: str, low_list: list, high_list: list) -> "ListDif common_length = len(low_list) for i in range(common_length): - # if hasattr(low_list[i], 'syft_eq'): - # if not low_list[i].syft_eq(high_list[i]): - # diff_ids.append(i) - if low_list[i] != high_list[i]: + if hasattr(low_list[i], "syft_eq"): + if not low_list[i].syft_eq(high_list[i]): + diff_ids.append(i) + elif low_list[i] != high_list[i]: diff_ids.append(i) change_diff = ListDiff( @@ -128,11 +148,14 @@ def recursive_attr_repr(value_attr: list | dict | bytes, num_tabs: int = 0) -> s new_num_tabs = num_tabs + 1 if isinstance(value_attr, list): - list_repr = "[\n" - for elem in value_attr: - list_repr += recursive_attr_repr(elem, num_tabs=num_tabs + 1) + "\n" - list_repr += "]" - return list_repr + if len(value_attr) == 1: + value_attr = value_attr[0] + else: + list_repr = "[\n" + for elem in value_attr: + list_repr += recursive_attr_repr(elem, num_tabs=num_tabs + 1) + "\n" + list_repr += "]" + return list_repr elif isinstance(value_attr, dict): dict_repr = "{\n" @@ -145,21 +168,29 @@ def recursive_attr_repr(value_attr: list | dict | bytes, num_tabs: int = 0) -> s value_attr = repr(value_attr) # type: ignore if len(value_attr) > 50: value_attr = value_attr[:50] + "..." # type: ignore + + if isinstance(value_attr, UID): + value_attr = short_uid(value_attr) # type: ignore + return f"{sketchy_tab*num_tabs}{str(value_attr)}" class ObjectDiff(SyftObject): # StateTuple (compare 2 objects) # version __canonical_name__ = "ObjectDiff" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 low_obj: SyncableSyftObject | None = None high_obj: SyncableSyftObject | None = None - low_node_uid: UID - high_node_uid: UID + low_server_uid: UID + high_server_uid: UID low_permissions: list[str] = [] high_permissions: list[str] = [] low_storage_permissions: set[UID] = set() high_storage_permissions: set[UID] = set() + low_status: str | None = None + high_status: str | None = None + last_sync_date_low: DateTime | None = None + last_sync_dat_high: DateTime | None = None obj_type: type diff_list: list[AttrDiff] = [] @@ -168,6 +199,7 @@ class ObjectDiff(SyftObject): # StateTuple (compare 2 objects) "low_state", "high_state", ] + __syft_include_id_coll_repr__ = False def is_mock(self, side: str) -> bool: # An object is a mock object if it exists on both sides, @@ -179,31 +211,35 @@ def is_mock(self, side: str) -> bool: obj = self.low_obj other_obj = self.high_obj permissions = self.low_storage_permissions - node_uid = self.low_node_uid + server_uid = self.low_server_uid elif side == "high": obj = self.high_obj other_obj = self.low_obj permissions = self.high_storage_permissions - node_uid = self.high_node_uid + server_uid = self.high_server_uid else: raise ValueError("Invalid side") if obj is None or other_obj is None: return False - return node_uid not in permissions + return server_uid not in permissions @classmethod def from_objects( cls, low_obj: SyncableSyftObject | None, high_obj: SyncableSyftObject | None, + low_status: str | None, + high_status: str | None, low_permissions: set[str], high_permissions: set[str], low_storage_permissions: set[UID], high_storage_permissions: set[UID], - low_node_uid: UID, - high_node_uid: UID, + low_server_uid: UID, + high_server_uid: UID, + last_sync_date_low: DateTime | None = None, + last_sync_date_high: DateTime | None = None, ) -> "ObjectDiff": if low_obj is None and high_obj is None: raise ValueError("Both low and high objects are None") @@ -212,20 +248,24 @@ def from_objects( res = cls( low_obj=low_obj, high_obj=high_obj, + low_status=low_status, + high_status=high_status, obj_type=obj_type, - low_node_uid=low_node_uid, - high_node_uid=high_node_uid, + low_server_uid=low_server_uid, + high_server_uid=high_server_uid, low_permissions=low_permissions, high_permissions=high_permissions, low_storage_permissions=low_storage_permissions, high_storage_permissions=high_storage_permissions, + last_sync_date_low=last_sync_date_low, + last_sync_date_high=last_sync_date_high, ) if ( low_obj is None or high_obj is None - or res.is_mock("low") - or res.is_mock("high") + or (res.is_mock("low") and high_status == "SAME") + or (res.is_mock("high") and low_status == "SAME") ): diff_list = [] else: @@ -235,15 +275,27 @@ def from_objects( return res def __hash__(self) -> int: - return hash(self.id) + hash(self.low_obj) + hash(self.high_obj) + return hash(self.object_id) + hash(self.low_obj) + hash(self.high_obj) @property - def status(self) -> str: + def last_sync_date(self) -> DateTime | None: + last_sync_low = self.last_sync_date_low if self.low_obj is not None else None + last_sync_high = self.last_sync_dat_high if self.high_obj is not None else None + + if last_sync_low is None: + return last_sync_high + elif last_sync_high is None: + return last_sync_low + else: + return max(last_sync_low, last_sync_high) + + @property + def status(self) -> Literal["NEW", "SAME", "MODIFIED"]: if self.low_obj is None or self.high_obj is None: return "NEW" if len(self.diff_list) == 0: return "SAME" - return "DIFF" + return "MODIFIED" @property def object_id(self) -> UID: @@ -256,7 +308,10 @@ def object_id(self) -> UID: @property def non_empty_object(self) -> SyftObject | None: - return self.low_obj or self.high_obj + if self.low_obj is not None: + return self.low_obj + else: + return self.high_obj @property def object_type(self) -> str: @@ -274,6 +329,55 @@ def low_state(self) -> str: def object_uid(self) -> UID: return self.low_obj.id if self.low_obj is not None else self.high_obj.id # type: ignore + def repr_attr_diffstatus_dict(self) -> dict: + # relative + from .resolve_widget import DiffStatus + + low_attrs = self.repr_attr_dict("low") + high_attrs = self.repr_attr_dict("high") + all_attrs = set(low_attrs.keys()) | set(high_attrs.keys()) + + res = {} + for attr in all_attrs: + value_low = low_attrs.get(attr, None) + value_high = high_attrs.get(attr, None) + + if value_low is None or value_high is None: + res[attr] = DiffStatus.NEW + elif isinstance(value_low, pd.DataFrame) and isinstance( + value_high, pd.DataFrame + ): + res[attr] = ( + DiffStatus.MODIFIED + if not value_low.equals(value_high) + else DiffStatus.SAME + ) + elif value_low != value_high: + res[attr] = DiffStatus.MODIFIED + else: + res[attr] = DiffStatus.SAME + return res + + def repr_attr_dict(self, side: str) -> dict[str, Any]: + obj = self.low_obj if side == "low" else self.high_obj + if isinstance(obj, ActionObject): + # Only safe for ActionObjects created by data owners + return {"value": obj.syft_action_data_repr_} + repr_attrs = getattr(obj, "__repr_attrs__", []) + res = {} + for attr in repr_attrs: + value = getattr(obj, attr) + res[attr] = value + + # if there are constants in UserCode input policy, add to repr + # type ignores since mypy thinks the code is unreachable for some reason + if isinstance(obj, UserCode) and obj.input_policy_init_kwargs is not None: # type: ignore + for input_policy_kwarg in obj.input_policy_init_kwargs.values(): # type: ignore + for input_val in input_policy_kwarg.values(): + if isinstance(input_val, Constant): + res[input_val.kw] = input_val.val + return res + def diff_attributes_str(self, side: str) -> str: obj = self.low_obj if side == "low" else self.high_obj @@ -291,7 +395,7 @@ def diff_attributes_str(self, side: str) -> str: attrs_str += f"{attr}: {recursive_attr_repr(value)}\n" return attrs_str - elif self.status == "DIFF": + elif self.status == "MODIFIED": attrs_str = "" for diff in self.diff_list: attrs_str += f"{diff.attr_name}: {diff.__repr_side__(side)}\n" @@ -303,7 +407,7 @@ def diff_side_str(self, side: str) -> str: obj = self.low_obj if side == "low" else self.high_obj if obj is None: return "" - res = f"{self.obj_type.__name__.upper()} #{obj.id}:\n" + res = f"{self.obj_type.__name__.upper()} #{short_uid(obj.id)}\n" res += self.diff_attributes_str(side) return res @@ -348,7 +452,7 @@ def get_obj(self) -> SyftObject | None: if self.status == "NEW": return self.low_obj if self.low_obj is not None else self.high_obj else: - raise ValueError("ERROR") + raise ValueError("Cannot get object from a diff that is not new") def _coll_repr_(self) -> dict[str, Any]: low_state = f"{self.status}\n{self.diff_side_str('low')}" @@ -360,17 +464,9 @@ def _coll_repr_(self) -> dict[str, Any]: def _repr_html_(self) -> str: if self.low_obj is None and self.high_obj is None: - return SyftError(message="Something broke") - - base_str = f""" - + raise SyftException(public_message="Something broke") + + base_str = """
    """ @@ -412,11 +508,13 @@ def _repr_html_(self) -> str: obj_repr += diff.__repr__() + "
    " obj_repr = obj_repr.replace("\n", "
    ") - # print("New lines", res) attr_text = f"

    {self.object_type} ObjectDiff:

    \n{obj_repr}" return base_str + attr_text + def __repr__(self) -> str: + return f"{self.__class__.__name__}[{self.obj_type.__name__}](#{str(self.object_id)})" + def _wrap_text(text: str, width: int, indent: int = 4) -> str: """Wrap text, preserving existing line breaks""" @@ -439,67 +537,395 @@ def _wrap_text(text: str, width: int, indent: int = 4) -> str: class ObjectDiffBatch(SyftObject): __canonical_name__ = "DiffHierarchy" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 LINE_LENGTH: ClassVar[int] = 100 INDENT: ClassVar[int] = 4 ORDER: ClassVar[dict] = {"low": 0, "high": 1} + __syft_include_id_coll_repr__ = False + + low_server_uid: UID + high_server_uid: UID + user_verify_key_low: SyftVerifyKey + user_verify_key_high: SyftVerifyKey + # Diffs are ordered in depth-first order, # the first diff is the root of the hierarchy - diffs: list[ObjectDiff] + global_diffs: dict[UID, ObjectDiff] + global_roots: list[UID] + global_batches: list["ObjectDiffBatch"] | None = None + hierarchy_levels: list[int] dependencies: dict[UID, list[UID]] = {} dependents: dict[UID, list[UID]] = {} + decision: SyncDecision | None = None + root_diff: ObjectDiff + sync_direction: SyncDirection | None + + def resolve(self, build_state: bool = True) -> "ResolveWidget": + # relative + from .resolve_widget import ResolveWidget + + return ResolveWidget(self, build_state=build_state) + + def walk_graph( + self, + deps: dict[UID, list[UID]], + include_roots: bool = False, + include_batch_root: bool = True, + ) -> list[ObjectDiff]: + root_id = self.root_diff.object_id + result = [root_id] + unvisited = [root_id] + global_roots = [x for x in self.global_roots if x is not root_id] + roots = [] + + while len(unvisited): + # Do we update this in the terminal case + new_servers = [] + for server in unvisited: + if server in global_roots: + roots.append(server) + else: + new_servers += deps.get(server, []) + + new_servers = [n for n in new_servers if n not in result] + unvisited = new_servers + result += unvisited + + if include_roots: + result += roots + + if not include_batch_root: + result.remove(root_id) + + return [self.global_diffs[r] for r in set(result)] + + @property + def target_server_uid(self) -> UID: + if self.sync_direction is None: + raise ValueError("no direction specified") + if self.sync_direction == SyncDirection.LOW_TO_HIGH: + return self.high_server_uid + else: + return self.low_server_uid + + @property + def source_server_uid(self) -> UID: + if self.sync_direction is None: + raise ValueError("no direction specified") + if self.sync_direction == SyncDirection.LOW_TO_HIGH: + return self.low_server_uid + else: + return self.high_server_uid + + @property + def target_verify_key(self) -> SyftVerifyKey: + if self.sync_direction is None: + raise ValueError("no direction specified") + if self.sync_direction == SyncDirection.LOW_TO_HIGH: + return self.user_verify_key_high + else: + return self.user_verify_key_low + + @property + def source_verify_key(self) -> SyftVerifyKey: + if self.sync_direction is None: + raise ValueError("no direction specified") + if self.sync_direction == SyncDirection.LOW_TO_HIGH: + return self.user_verify_key_low + else: + return self.user_verify_key_high + + @property + def source_client(self) -> SyftClient: + return self.build(self.source_server_uid, self.source_verify_key) + + @property + def target_client(self) -> SyftClient: + return self.build(self.target_server_uid, self.target_verify_key) + + def build(self, server_uid: UID, syft_client_verify_key: SyftVerifyKey): # type: ignore + # relative + from ...client.datasite_client import DatasiteClient + + api = self.get_api(server_uid, syft_client_verify_key) + + client = DatasiteClient( + api=api, + connection=api.connection, # type: ignore + credentials=api.signing_key, # type: ignore + ) + return client + + def get_dependencies( + self, + include_roots: bool = False, + include_batch_root: bool = True, + ) -> list[ObjectDiff]: + return self.walk_graph( + deps=self.dependencies, + include_roots=include_roots, + include_batch_root=include_batch_root, + ) + + @property + def status(self) -> str: + if self.root_diff.status == "NEW": + return "NEW" + + batch_statuses = [ + diff.status for diff in self.get_dependencies(include_roots=False) + ] + if all(status == "SAME" for status in batch_statuses): + return "SAME" + + return "MODIFIED" + + @property + def is_unchanged(self) -> bool: + return self.status == "SAME" + + def get_dependents( + self, include_roots: bool = False, include_batch_root: bool = True + ) -> list[ObjectDiff]: + return self.walk_graph( + deps=self.dependents, + include_roots=include_roots, + include_batch_root=include_batch_root, + ) + + def __hash__(self) -> int: + diffs = self.get_dependents(include_roots=False) + return sum(hash(x) for x in diffs) + + def ignore(self) -> SyftSuccess: + # relative + from ...client.syncing import handle_ignore_batch + + return handle_ignore_batch(self, self.global_batches) + + def unignore(self) -> SyftSuccess: + # relative + from ...client.syncing import handle_unignore_batch + + return handle_unignore_batch(self, self.global_batches) + + @property + def root_id(self) -> UID: + return self.root_diff.object_id + + @property + def root_type(self) -> type: + return self.root_diff.obj_type + + def decision_badge(self) -> str: + if self.decision is None: + return "" + if self.decision == SyncDecision.IGNORE: + decision_str = "IGNORED" + badge_color = "label-red" + elif self.decision == SyncDecision.SKIP: + decision_str = "SKIPPED" + badge_color = "label-gray" + else: + decision_str = "SYNCED" + badge_color = "label-green" + + return Label(value=decision_str, label_class=badge_color).to_html() + + @property + def is_ignored(self) -> bool: + return self.decision == SyncDecision.IGNORE + + @property + def is_skipped(self) -> bool: + return self.decision == SyncDecision.SKIP + + def create_new_resolved_states( + self, + ) -> tuple["ResolvedSyncState", "ResolvedSyncState"]: + """ + Returns new ResolvedSyncState objects for the source and target servers + """ + resolved_state_low = ResolvedSyncState( + server_uid=self.low_server_uid, alias="low" + ) + resolved_state_high = ResolvedSyncState( + server_uid=self.high_server_uid, alias="high" + ) + + # Return source, target + if self.sync_direction == SyncDirection.LOW_TO_HIGH: + return resolved_state_low, resolved_state_high + else: + return resolved_state_high, resolved_state_low + + @classmethod + def from_dependencies( + cls, + root_uid: UID, + obj_dependencies: dict[UID, list[UID]], + obj_dependents: dict[UID, list[UID]], + obj_uid_to_diff: dict[UID, ObjectDiff], + root_ids: list[UID], + low_server_uid: UID, + high_server_uid: UID, + user_verify_key_low: SyftVerifyKey, + user_verify_key_high: SyftVerifyKey, + sync_direction: SyncDirection, + ) -> "ObjectDiffBatch": + def _build_hierarchy_helper( + uid: UID, level: int = 0, visited: set | None = None + ) -> list: + visited = visited if visited is not None else set() + + if uid in visited: + return [] + + result = [(uid, level)] + visited.add(uid) + if uid in obj_dependencies: + deps = obj_dependencies[uid] + for dep_uid in obj_dependencies[uid]: + if dep_uid not in visited: + # NOTE we pass visited + deps to recursive calls, to have + # all objects at the highest level in the hierarchy + # Example: + # ExecutionOutput + # -- Job + # ---- Result + # -- Result + # We want to omit Job.Result, because it's already in ExecutionOutput.Result + result.extend( + _build_hierarchy_helper( + uid=dep_uid, + level=level + 1, + visited=visited | set(deps) - {dep_uid}, + ) + ) + return result + + batch_uids = _build_hierarchy_helper(root_uid) + # levels in the tree that we create + levels = [level for _, level in batch_uids] + + batch_uids = {uid for uid, _ in batch_uids} # type: ignore + + return cls( + global_diffs=obj_uid_to_diff, + global_roots=root_ids, + hierarchy_levels=levels, + dependencies=obj_dependencies, + dependents=obj_dependents, + root_diff=obj_uid_to_diff[root_uid], + low_server_uid=low_server_uid, + high_server_uid=high_server_uid, + user_verify_key_low=user_verify_key_low, + user_verify_key_high=user_verify_key_high, + sync_direction=sync_direction, + ) + + def flatten_visual_hierarchy(self) -> list[ObjectDiff]: + def flatten_dict(d: dict) -> list: + if len(d) == 0: + return [] + else: + result = [] + for diff, child in d.items(): + result.append(diff) + result += flatten_dict(child) + return result + + return flatten_dict(self.get_visual_hierarchy()) + + def _repr_html_(self) -> str: + try: + diffs = self.flatten_visual_hierarchy() + except Exception as _: + raise SyftException( + public_message=html.escape( + "Could not render batch, please use resolve() instead." + ) + ) + + return f""" +

    ObjectBatchDiff

    +{diffs._repr_html_()} +""" + + def status_badge(self) -> dict[str, str]: + status = self.status + if status == "NEW": + badge_color = "label-green" + elif status == "SAME": + badge_color = "label-gray" + else: + badge_color = "label-orange" + return {"value": status.upper(), "type": badge_color} + + def _coll_repr_(self) -> dict[str, Any]: + no_obj_html = "

    No object

    " + if self.root_diff.low_obj is None: + low_html = no_obj_html + else: + low_html = SyncTableObject(object=self.root_diff.low_obj).to_html() + + if self.root_diff.high_obj is None: + high_html = no_obj_html + else: + high_html = SyncTableObject(object=self.root_diff.high_obj).to_html() + + return { + "Diff status": self.status_badge(), + "Public Sync State": low_html, + "Private sync state": high_html, + "Decision": self.decision_badge(), + } @property def visual_hierarchy(self) -> tuple[type, dict]: # Returns - root_obj: Request | UserCodeStatusCollection | ExecutionOutput | Any = ( + root_obj = ( self.root.low_obj if self.root.low_obj is not None else self.root.high_obj ) if isinstance(root_obj, Request): return Request, { Request: [UserCode], - UserCode: [UserCode], } - if isinstance(root_obj, UserCodeStatusCollection): - return UserCode, { - UserCode: [UserCodeStatusCollection], + elif isinstance(root_obj, UserCode): + return UserCode, { # type: ignore + UserCode: [UserCodeStatusCollection, UserCode], } - if isinstance(root_obj, ExecutionOutput): - return UserCode, { - UserCode: [Job], - Job: [ExecutionOutput, SyftLog, Job], - ExecutionOutput: [ActionObject], + elif isinstance(root_obj, Job): + return UserCode, { # type: ignore + UserCode: [ExecutionOutput, UserCode], + ExecutionOutput: [Job], + Job: [ActionObject, SyftLog, Job], } - raise ValueError(f"Unknown root type: {self.root.obj_type}") - - @model_validator(mode="after") - def make_dependents(self) -> Self: - dependents: dict = {} - for parent, children in self.dependencies.items(): - for child in children: - dependents[child] = dependents.get(child, []) + [parent] - self.dependents = dependents - return self + elif isinstance(root_obj, TwinAPIEndpoint): + return TwinAPIEndpoint, { # type: ignore + TwinAPIEndpoint: [], + } + else: + raise ValueError(f"Unknown root type: {self.root.obj_type}") @property def root(self) -> ObjectDiff: - return self.diffs[0] + return self.root_diff - def __len__(self) -> int: - return len(self.diffs) - - def __repr__(self) -> str: - return f"""{self.hierarchy_str('low')} - -{self.hierarchy_str('high')} -""" + def __repr__(self) -> Any: + return f"{self.__class__.__name__}[{self.root_type.__name__}](#{str(self.root_id)})" def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: return "" # Turns off the _repr_markdown_ of SyftObject - def _get_visual_hierarchy(self, node: ObjectDiff) -> dict[ObjectDiff, dict]: + def _get_visual_hierarchy( + self, node: ObjectDiff, visited: set[UID] | None = None + ) -> dict[ObjectDiff, dict]: + visited = visited if visited is not None else set() + visited.add(node.object_id) + _, child_types_map = self.visual_hierarchy child_types = child_types_map.get(node.obj_type, []) dep_ids = self.dependencies.get(node.object_id, []) + self.dependents.get( @@ -510,25 +936,64 @@ def _get_visual_hierarchy(self, node: ObjectDiff) -> dict[ObjectDiff, dict]: for child_type in child_types: children = [ n - for n in self.diffs + for n in self.global_diffs.values() if n.object_id in dep_ids and isinstance(n.low_obj or n.high_obj, child_type) ] for child in children: - result[child] = self._get_visual_hierarchy(child) + if child.object_id not in visited: + result[child] = self._get_visual_hierarchy(child, visited=visited) return result - def get_visual_hierarchy(self) -> "ObjectDiffBatch": + @property + def visual_root(self) -> ObjectDiff: + dependecies: list[ObjectDiff] = self.get_dependencies(include_roots=True) visual_root_type = self.visual_hierarchy[0] - # First diff with a visual root type is the visual root - # because diffs are in depth-first order - visual_root = [ + + visual_roots = [ diff - for diff in self.diffs + for diff in dependecies if isinstance(diff.low_obj or diff.high_obj, visual_root_type) - ][0] - return {visual_root: self._get_visual_hierarchy(visual_root)} # type: ignore + ] + if not len(visual_roots): + raise ValueError("No visual root found") + + return visual_roots[0] + + @property + def user_code_high(self) -> UserCode | None: + """return the user code of the high side of this batch, if it exists""" + user_code_diff = self.user_code_diff + if user_code_diff is not None and isinstance(user_code_diff.high_obj, UserCode): + return user_code_diff.high_obj + return None + + @property + def user_code_diff(self) -> ObjectDiff | None: + """return the main user code diff of the high side of this batch, if it exists""" + user_code_diffs: list[ObjectDiff] = [ + diff + for diff in self.get_dependencies(include_roots=True) + if issubclass(diff.obj_type, UserCode) + ] + + if len(user_code_diffs) == 0: + return None + else: + # main usercode is always the first, batches are sorted in depth-first order + return user_code_diffs[0] + + @property + def user(self) -> UserView: + user_code_diff = self.user_code_diff + if user_code_diff is not None and isinstance(user_code_diff.low_obj, UserCode): + return user_code_diff.low_obj.user + raise SyftException(public_message="No user found") + + def get_visual_hierarchy(self) -> dict[ObjectDiff, dict]: + visual_root = self.visual_root + return {visual_root: self._get_visual_hierarchy(self.visual_root)} # type: ignore def _get_obj_str(self, diff_obj: ObjectDiff, level: int, side: str) -> str: obj = diff_obj.low_obj if side == "low" else diff_obj.high_obj @@ -549,8 +1014,8 @@ def _get_obj_str(self, diff_obj: ObjectDiff, level: int, side: str) -> str: def hierarchy_str(self, side: str) -> str: def _hierarchy_str_recursive(tree: dict, level: int) -> str: result = "" - for node, children in tree.items(): - result += self._get_obj_str(node, level, side) + for server, children in tree.items(): + result += self._get_obj_str(server, level, side) result += _hierarchy_str_recursive(children, level + 1) return result @@ -563,61 +1028,331 @@ def _hierarchy_str_recursive(tree: dict, level: int) -> str: {res}""" -class NodeDiff(SyftObject): - __canonical_name__ = "NodeDiff" - __version__ = SYFT_OBJECT_VERSION_2 +class IgnoredBatchView(SyftObject): + __canonical_name__ = "IgnoredBatchView" + __version__ = SYFT_OBJECT_VERSION_1 + batch: ObjectDiffBatch + other_batches: list[ObjectDiffBatch] - low_node_uid: UID - high_node_uid: UID + def _coll_repr_(self) -> dict[str, Any]: + return self.batch._coll_repr_() + + def _repr_html_(self) -> str: + return self.batch._repr_html_() + + def stage_change(self) -> None: + self.batch.decision = None + required_dependencies = { + d.object_id for d in self.batch.get_dependencies(include_roots=True) + } + + for other_batch in self.other_batches: + if ( + other_batch.decision == SyncDecision.IGNORE + and other_batch.root_id in required_dependencies + ): + logger.debug(f"ignoring other batch ({other_batch.root_type.__name__})") + other_batch.decision = None + + +class FilterProperty(enum.Enum): + USER = enum.auto() + TYPE = enum.auto() + STATUS = enum.auto() + IGNORED = enum.auto() + + def from_batch(self, batch: ObjectDiffBatch) -> Any: + if self == FilterProperty.USER: + user = batch.user + return user.email + elif self == FilterProperty.TYPE: + return batch.root_diff.obj_type.__name__.lower() + elif self == FilterProperty.STATUS: + return batch.status.lower() + elif self == FilterProperty.IGNORED: + return batch.is_ignored + else: + raise ValueError(f"Invalid property: {property}") + + +@dataclass +class ServerDiffFilter: + """ + Filter to apply to a ServerDiff object to determine if it should be included in a batch. + + Checks for `property op value` , where + property: FilterProperty - property to filter on + value: Any - value to compare against + op: callable[[Any, Any], bool] - comparison operator. Default is `operator.eq` + + If the comparison fails, the batch is excluded. + """ + + filter_property: FilterProperty + filter_value: Any + op: Callable[[Any, Any], bool] = operator.eq + + def __call__(self, batch: ObjectDiffBatch) -> bool: + filter_value = self.filter_value + if isinstance(filter_value, str): + filter_value = filter_value.lower() + + try: + p = self.filter_property.from_batch(batch) + if self.op == operator.contains: + # Contains check has reversed arg order: check if p in self.filter_value + return p in filter_value + else: + return self.op(p, filter_value) + except Exception as e: + # By default, exclude the batch if there is an error + logger.debug(f"Error filtering batch {batch} with {self}: {e}") + return False + + +class ServerDiff(SyftObject): + __canonical_name__ = "ServerDiff" + __version__ = SYFT_OBJECT_VERSION_1 + + low_server_uid: UID + high_server_uid: UID + user_verify_key_low: SyftVerifyKey + user_verify_key_high: SyftVerifyKey obj_uid_to_diff: dict[UID, ObjectDiff] = {} - dependencies: dict[UID, list[UID]] = {} + obj_dependencies: dict[UID, list[UID]] = {} + batches: list[ObjectDiffBatch] = [] + all_batches: list[ObjectDiffBatch] = [] + low_state: SyncState + high_state: SyncState + direction: SyncDirection | None + filters: list[ServerDiffFilter] = [] + + include_ignored: bool = False + + def resolve( + self, build_state: bool = True, filter_ignored: bool = True + ) -> "PaginatedResolveWidget | SyftSuccess": + if len(self.batches) == 0: + return SyftSuccess(message="No batches to resolve") + + # relative + from .resolve_widget import PaginatedResolveWidget + + if filter_ignored: + batches = [b for b in self.batches if b.decision != SyncDecision.IGNORE] + else: + batches = self.batches + + return PaginatedResolveWidget(batches=batches, build_state=build_state) + + def __getitem__(self, idx: Any) -> ObjectDiffBatch: + return self.batches[idx] + + @property + def ignored_batches(self) -> list[ObjectDiffBatch]: + return [ + batch for batch in self.all_batches if batch.decision == SyncDecision.IGNORE + ] + + @property + def active_batches(self) -> Iterable[ObjectDiffBatch]: + decisions_to_skip = {SyncDecision.IGNORE, SyncDecision.SKIP} + # self.batches might be modified during iteration + for batch in self.batches: + if batch.decision not in decisions_to_skip: + yield batch + + @property + def ignored_changes(self) -> list[IgnoredBatchView]: + result = [] + for ignored_batch in self.ignored_batches: + other_batches = [b for b in self.all_batches if b is not ignored_batch] + result.append( + IgnoredBatchView(batch=ignored_batch, other_batches=other_batches) + ) + return result @classmethod def from_sync_state( - cls: type["NodeDiff"], low_state: SyncState, high_state: SyncState - ) -> "NodeDiff": + cls: type["ServerDiff"], + low_state: SyncState, + high_state: SyncState, + direction: SyncDirection, + include_ignored: bool = False, + include_same: bool = False, + filter_by_email: str | None = None, + include_types: Collection[type | str] | None = None, + exclude_types: Collection[type | str] | None = None, + _include_server_status: bool = False, + ) -> "ServerDiff": obj_uid_to_diff = {} + show_deletion_warning = False for obj_id in set(low_state.objects.keys()) | set(high_state.objects.keys()): low_obj = low_state.objects.get(obj_id, None) - low_permissions = low_state.permissions.get(obj_id, set()) - low_storage_permissions = low_state.storage_permissions.get(obj_id, set()) high_obj = high_state.objects.get(obj_id, None) + + low_permissions = low_state.permissions.get(obj_id, set()) high_permissions = high_state.permissions.get(obj_id, set()) + + low_storage_permissions = low_state.storage_permissions.get(obj_id, set()) high_storage_permissions = high_state.storage_permissions.get(obj_id, set()) + + last_sync_date_low = low_state.object_sync_dates.get(obj_id, None) + last_sync_date_high = high_state.object_sync_dates.get(obj_id, None) + + if _include_server_status: + low_status = low_state.get_status(obj_id) + high_status = high_state.get_status(obj_id) + else: + low_status = "NEW" + high_status = "NEW" + + # We don't support deletion of objects yet. + # So, skip if the object is not present on the *source* side + source_obj = low_obj if direction == SyncDirection.LOW_TO_HIGH else high_obj + if source_obj is None: + show_deletion_warning = True + continue + diff = ObjectDiff.from_objects( low_obj=low_obj, high_obj=high_obj, + low_status=low_status, + high_status=high_status, low_permissions=low_permissions, high_permissions=high_permissions, low_storage_permissions=low_storage_permissions, high_storage_permissions=high_storage_permissions, - low_node_uid=low_state.node_uid, - high_node_uid=high_state.node_uid, + low_server_uid=low_state.server_uid, + high_server_uid=high_state.server_uid, + last_sync_date_low=last_sync_date_low, + last_sync_date_high=last_sync_date_high, ) obj_uid_to_diff[diff.object_id] = diff - node_diff = cls( - low_node_uid=low_state.node_uid, - high_node_uid=high_state.node_uid, + # TODO move static methods to ServerDiff __init__ + obj_dependencies = ServerDiff.dependencies_from_states(low_state, high_state) + all_batches = ServerDiff._create_batches( + low_state, + high_state, + obj_dependencies, + obj_uid_to_diff, + direction=direction, + ) + + # TODO: Check if high and low ignored batches are the same else error + previously_ignored_batches = low_state.ignored_batches + ServerDiff.apply_previous_ignore_state(all_batches, previously_ignored_batches) + ServerDiff.ignore_high_side_code(all_batches) + + res = cls( + low_server_uid=low_state.server_uid, + high_server_uid=high_state.server_uid, + user_verify_key_low=low_state.syft_client_verify_key, + user_verify_key_high=high_state.syft_client_verify_key, obj_uid_to_diff=obj_uid_to_diff, + obj_dependencies=obj_dependencies, + batches=all_batches, + all_batches=all_batches, + low_state=low_state, + high_state=high_state, + direction=direction, + filters=[], + ) + + res._filter( + user_email=filter_by_email, + include_types=include_types, + include_ignored=include_ignored, + include_same=include_same, + exclude_types=exclude_types, + inplace=True, ) - node_diff._init_dependencies(low_state, high_state) - return node_diff + if show_deletion_warning: + prompt_warning_message( + message=( + "The target server has objects not found on the source server. " + "These objects cannot be deleted via syncing and only manual deletion is possible." + ), + confirm=False, + ) + + return res - def _init_dependencies(self, low_state: SyncState, high_state: SyncState) -> None: + @staticmethod + def apply_previous_ignore_state( + batches: list[ObjectDiffBatch], previously_ignored_batches: dict[UID, int] + ) -> None: + """ + Loop through all ignored batches in syncstate. If batch did not change, set to ignored + If another batch needs to exist in order to accept that changed batch: also unignore + e.g. if a job changed, also unignore the usercode + """ + + for root_id, batch_hash in previously_ignored_batches.items(): + for batch in batches: + if batch.root_id == root_id: + if hash(batch) == batch_hash: + batch.decision = SyncDecision.IGNORE + else: + logger.debug( + f"""A batch with type {batch.root_type.__name__} was previously ignored but has changed +It will be available for review again.""" + ) + # batch has changed, so unignore + batch.decision = None + # then we also set the dependent batches to unignore + # currently we dont do this recusively + required_dependencies = { + d.object_id + for d in batch.get_dependencies(include_roots=True) + } + + for other_batch in batches: + if other_batch is not batch: + other_batch_root_id = {other_batch.root_id} + # if there is overlap + if len(required_dependencies & other_batch_root_id): + other_batch.decision = None + + @staticmethod + def ignore_high_side_code(batches: list[ObjectDiffBatch]) -> None: + # relative + from ...abstract_server import ServerSideType + from ...client.syncing import get_other_ignore_batches + + for batch in batches: + if not issubclass(batch.root_type, UserCode): + continue + + user_code: UserCode = batch.root.non_empty_object # type: ignore + if user_code.origin_server_side_type == ServerSideType.HIGH_SIDE: + batch.decision = SyncDecision.IGNORE + other_batches = get_other_ignore_batches(batch, batches) + for other_batch in other_batches: + other_batch.decision = SyncDecision.IGNORE + + @staticmethod + def dependencies_from_states( + low_state: SyncState, high_state: SyncState + ) -> dict[UID, list[UID]]: + dependencies = {} all_parents = set(low_state.dependencies.keys()) | set( high_state.dependencies.keys() ) for parent in all_parents: low_deps = low_state.dependencies.get(parent, []) high_deps = high_state.dependencies.get(parent, []) - self.dependencies[parent] = list(set(low_deps) | set(high_deps)) + dependencies[parent] = list(set(low_deps) | set(high_deps)) + return dependencies @property def diffs(self) -> list[ObjectDiff]: diffs_depthfirst = [ - diff for hierarchy in self.hierarchies for diff in hierarchy.diffs + diff + for hierarchy in self.batches + for diff in hierarchy.get_dependents(include_roots=False) ] # deduplicate diffs = [] @@ -628,17 +1363,37 @@ def diffs(self) -> list[ObjectDiff]: ids.add(diff.object_id) return diffs + def _repr_markdown_(self) -> None: # type: ignore + return None + def _repr_html_(self) -> Any: - return self.diffs._repr_html_() + n = len(self.batches) + if self.direction == SyncDirection.LOW_TO_HIGH: + name1 = "Low-side Server" + name2 = "High-side Server" + else: + name1 = "High-side Server" + name2 = "Low-side Server" + repr_html = f""" +

    +
    Comparing sync states
    +

    +
    {name1} {Icon.ARROW.svg} {name2}
    +

    +
    This would sync {n} batches from {name1} to {name2}
    + """ # noqa: E501 + repr_html = repr_html.replace("\n", "") + + res = repr_html + self.batches._repr_html_() + return res - def _sort_hierarchies( - self, hierarchies: list[ObjectDiffBatch] - ) -> list[ObjectDiffBatch]: + @staticmethod + def _sort_batches(hierarchies: list[ObjectDiffBatch]) -> list[ObjectDiffBatch]: without_usercode = [] grouped_by_usercode: dict[UID, list[ObjectDiffBatch]] = {} for hierarchy in hierarchies: has_usercode = False - for diff in hierarchy.diffs: + for diff in hierarchy.get_dependencies(include_roots=True): obj = diff.low_obj if diff.low_obj is not None else diff.high_obj if isinstance(obj, UserCode): usercode_id = obj.id @@ -651,7 +1406,7 @@ def _sort_hierarchies( without_usercode.append(hierarchy) # Order of hierarchies, by root object type - hierarchy_order = [UserCodeStatusCollection, Request, ExecutionOutput] + hierarchy_order = [UserCode, Request, Job] # Sort group by hierarchy_order, then by root object id for hierarchy_group in grouped_by_usercode.values(): hierarchy_group.sort( @@ -668,122 +1423,281 @@ def _sort_hierarchies( sorted_hierarchies.extend(without_usercode) return sorted_hierarchies - @property - def hierarchies(self) -> list[ObjectDiffBatch]: - # Returns a list of hierarchies, where each hierarchy is a list of tuples (ObjectDiff, level), - # in depth-first order. + @staticmethod + def _create_batches( + low_sync_state: SyncState, + high_sync_state: SyncState, + obj_dependencies: dict[UID, list[UID]], + obj_uid_to_diff: dict[UID, ObjectDiff], + direction: SyncDirection, + ) -> list[ObjectDiffBatch]: + batches: list[ObjectDiffBatch] = [] + root_ids = [] + + for diff in obj_uid_to_diff.values(): + diff_obj = diff.low_obj if diff.low_obj is not None else diff.high_obj + if isinstance(diff_obj, Request | UserCode | TwinAPIEndpoint): + # TODO: Figure out nested user codes, do we even need that? + + root_ids.append(diff.object_id) # type: ignore + elif ( # type: ignore[unreachable] + isinstance(diff_obj, Job) # type: ignore + and diff_obj.parent_job_id is None + # ignore Job objects created by TwinAPIEndpoint + and diff_obj.job_type != JobType.TWINAPIJOB + ): + root_ids.append(diff.object_id) # type: ignore + + # Dependents are the reverse edges of the dependency graph + obj_dependents: dict = {} + for parent, children in obj_dependencies.items(): + for child in children: + obj_dependents[child] = obj_dependents.get(child, []) + [parent] - # Each hierarchy only contains one root, at the first position - # Example: [(Diff1, 0), (Diff2, 1), (Diff3, 2), (Diff4, 1)] - # Diff1 - # -- Diff2 - # ---- Diff3 - # -- Diff4 + for root_uid in root_ids: + batch = ObjectDiffBatch.from_dependencies( + root_uid, + obj_dependencies, + obj_dependents, + obj_uid_to_diff, + root_ids, + low_sync_state.server_uid, + high_sync_state.server_uid, + low_sync_state.syft_client_verify_key, + high_sync_state.syft_client_verify_key, + sync_direction=direction, + ) + batches.append(batch) - def _build_hierarchy_helper( - uid: UID, level: int = 0, visited: set | None = None - ) -> list: - visited = visited if visited is not None else set() + # TODO ref back to ServerDiff would clean up a lot of logic, + # No need to save ServerDiff state on every batch + for batch in batches: + batch.global_batches = batches - if uid in visited: - return [] + hierarchies_sorted = ServerDiff._sort_batches(batches) + return hierarchies_sorted - result = [(uid, level)] - visited.add(uid) - if uid in self.dependencies: - deps = self.dependencies[uid] - for dep_uid in self.dependencies[uid]: - if dep_uid not in visited: - # NOTE we pass visited + deps to recursive calls, to have - # all objects at the highest level in the hierarchy - # Example: - # ExecutionOutput - # -- Job - # ---- Result - # -- Result - # We want to omit Job.Result, because it's already in ExecutionOutput.Result - result.extend( - _build_hierarchy_helper( - uid=dep_uid, - level=level + 1, - visited=visited | set(deps) - {dep_uid}, - ) - ) - return result + @property + def is_same(self) -> bool: + return all(object_diff.status == "SAME" for object_diff in self.diffs) - hierarchies = [] - all_ids = set(self.obj_uid_to_diff.keys()) - child_ids = {child for deps in self.dependencies.values() for child in deps} - # Root ids are object ids with no parents - root_ids = list(all_ids - child_ids) + def _apply_filters( + self, filters: list[ServerDiffFilter], inplace: bool = True + ) -> Self: + """ + Apply filters to the ServerDiff object and return a new ServerDiff object + """ + batches = self.all_batches + for filter in filters: + batches = [b for b in batches if filter(b)] + + if inplace: + self.filters = filters + self.batches = batches + return self + else: + return ServerDiff( + low_server_uid=self.low_server_uid, + high_server_uid=self.high_server_uid, + user_verify_key_low=self.user_verify_key_low, + user_verify_key_high=self.user_verify_key_high, + obj_uid_to_diff=self.obj_uid_to_diff, + obj_dependencies=self.obj_dependencies, + batches=batches, + all_batches=self.all_batches, + low_state=self.low_state, + high_state=self.high_state, + direction=self.direction, + filters=filters, + ) - for root_uid in root_ids: - uid_hierarchy = _build_hierarchy_helper(root_uid) - diffs = [self.obj_uid_to_diff[uid] for uid, _ in uid_hierarchy] - levels = [level for _, level in uid_hierarchy] - - batch_uids = {uid for uid, _ in uid_hierarchy} - dependencies = { - uid: [d for d in self.dependencies.get(uid, []) if d in batch_uids] - for uid in batch_uids + def _filter( + self, + user_email: str | None = None, + include_ignored: bool = False, + include_same: bool = False, + include_types: Collection[str | type] | None = None, + exclude_types: Collection[type | str] | None = None, + inplace: bool = True, + ) -> Self: + new_filters = [] + if user_email is not None: + new_filters.append( + ServerDiffFilter(FilterProperty.USER, user_email, operator.eq) + ) + if not include_ignored: + new_filters.append( + ServerDiffFilter(FilterProperty.IGNORED, True, operator.ne) + ) + if not include_same: + new_filters.append( + ServerDiffFilter(FilterProperty.STATUS, "SAME", operator.ne) + ) + if include_types is not None: + include_types_ = { + t.__name__.lower() if isinstance(t, type) else t.lower() + for t in include_types } - - batch = ObjectDiffBatch( - diffs=diffs, - hierarchy_levels=levels, - dependencies=dependencies, + new_filters.append( + ServerDiffFilter(FilterProperty.TYPE, include_types_, operator.contains) ) - hierarchies.append(batch) - - hierarchies_sorted = self._sort_hierarchies(hierarchies) - return hierarchies_sorted + if exclude_types: + for exclude_type in exclude_types: + if isinstance(exclude_type, type): + exclude_type = exclude_type.__name__ + new_filters.append( + ServerDiffFilter(FilterProperty.TYPE, exclude_type, operator.ne) + ) - def objs_to_sync(self) -> list[SyftObject]: - objs: list[SyftObject] = [] - for diff in self.diffs: - if diff.status == "NEW": - objs.append(diff.get_obj()) - return objs + return self._apply_filters(new_filters, inplace=inplace) -class SyncDecision(SyftObject): +class SyncInstruction(SyftObject): __canonical_name__ = "SyncDecision" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 diff: ObjectDiff - decision: str | None - new_permissions_lowside: list[ActionObjectPermission] + decision: SyncDecision | None + new_permissions_lowside: dict[type, list[ActionObjectPermission]] + new_permissions_highside: dict[type, list[ActionObjectPermission]] new_storage_permissions_lowside: list[StoragePermission] new_storage_permissions_highside: list[StoragePermission] + unignore: bool = False mockify: bool + @classmethod + def from_batch_decision( + cls, + diff: ObjectDiff, + sync_direction: SyncDirection, + share_private_data: bool, + mockify: bool, + decision: SyncDecision, + share_to_user: SyftVerifyKey | None, + ) -> Self: + # read widget state + new_permissions_low_side = {} + new_permissions_high_side = {} + # read permissions + if sync_direction == SyncDirection.HIGH_TO_LOW: + # To create read permissions for the object + # job/usercode/request/TwinAPIEndpoint + if share_private_data: # or diff.object_type == "Job": + if share_to_user is None: + # job ran by another user + if ( + diff.object_type != "Job" + and diff.object_type != "ExecutionOutput" + ): + raise ValueError( + "share_to_user is required to share private data" + ) + else: + new_permissions_low_side = { + diff.obj_type: [ + ActionObjectPermission( + uid=diff.object_id, + permission=ActionPermission.READ, + credentials=share_to_user, + ) + ] + } + if diff.obj_type in [Job, SyftLog, Request] or issubclass( + diff.obj_type, ActionObject + ): + new_permissions_high_side = { + diff.obj_type: [ + ActionObjectPermission( + uid=diff.object_id, + permission=ActionPermission.READ, + credentials=share_to_user, + ) + ] + } + + # storage permissions + new_storage_permissions = [] + + if sync_direction == SyncDirection.HIGH_TO_LOW: + # TODO: apply storage permissions on both ends + if not mockify: + new_storage_permissions.append( + StoragePermission( + uid=diff.object_id, server_uid=diff.low_server_uid + ) + ) + elif sync_direction == SyncDirection.LOW_TO_HIGH: + new_storage_permissions.append( + StoragePermission(uid=diff.object_id, server_uid=diff.high_server_uid) + ) + + return cls( + diff=diff, + decision=decision, + new_permissions_lowside=new_permissions_low_side, + new_permissions_highside=new_permissions_high_side, + new_storage_permissions_lowside=new_storage_permissions, + new_storage_permissions_highside=new_storage_permissions, + mockify=mockify, + ) + class ResolvedSyncState(SyftObject): __canonical_name__ = "SyncUpdate" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - node_uid: UID + server_uid: UID create_objs: list[SyncableSyftObject] = [] update_objs: list[SyncableSyftObject] = [] delete_objs: list[SyftObject] = [] - new_permissions: list[ActionObjectPermission] = [] + new_permissions: dict[type, list[ActionObjectPermission]] = {} new_storage_permissions: list[StoragePermission] = [] + ignored_batches: dict[UID, int] = {} # batch root uid -> hash of the batch + unignored_batches: set[UID] = set() alias: str - def add_sync_decision(self, sync_decision: SyncDecision) -> None: - diff = sync_decision.diff + @classmethod + def from_client(cls, client: SyftClient) -> "ResolvedSyncState": + alias: str = client.metadata.server_side_type # type: ignore + if alias not in ["low", "high"]: + raise ValueError( + "can only create resolved sync state for high, low side deployments" + ) + return cls(server_uid=client.id, alias=alias) + + def add_ignored(self, batch: ObjectDiffBatch) -> None: + self.ignored_batches[batch.root_id] = hash(batch) + + def add_unignored(self, root_id: UID) -> None: + self.unignored_batches.add(root_id) + + def add_sync_instruction(self, sync_instruction: SyncInstruction) -> None: + if ( + sync_instruction.decision == SyncDecision.IGNORE + or sync_instruction.decision == SyncDecision.SKIP + ): + return + diff = sync_instruction.diff + + if sync_instruction.unignore: + self.unignored_batches.add(sync_instruction.batch_diff.root_id) - if diff.status == "SAME": + if ( + diff.status == "SAME" + and len(sync_instruction.new_permissions_highside) == 0 + ): return my_obj = diff.low_obj if self.alias == "low" else diff.high_obj other_obj = diff.low_obj if self.alias == "high" else diff.high_obj - if other_obj is not None and sync_decision.mockify: + if other_obj is not None and sync_instruction.mockify: other_obj = other_obj.create_shareable_sync_copy(mock=True) - if sync_decision.decision != self.alias: # chose for the other - if diff.status == "DIFF": + if ( + sync_instruction.decision and sync_instruction.decision.value != self.alias + ): # chose for the other + if diff.status == "MODIFIED": # keep IDs comparison here, otherwise it will break with actionobjects if other_obj.id not in [x.id for x in self.update_objs]: # type: ignore self.update_objs.append(other_obj) @@ -800,17 +1714,44 @@ def add_sync_decision(self, sync_decision: SyncDecision) -> None: self.delete_objs.append(my_obj) if self.alias == "low": - self.new_permissions.extend(sync_decision.new_permissions_lowside) + for obj_type in sync_instruction.new_permissions_lowside.keys(): + if obj_type in self.new_permissions: + self.new_permissions[obj_type].extend( + sync_instruction.new_permissions_lowside[obj_type] + ) + else: + self.new_permissions[obj_type] = ( + sync_instruction.new_permissions_lowside[obj_type] + ) self.new_storage_permissions.extend( - sync_decision.new_storage_permissions_lowside + sync_instruction.new_storage_permissions_lowside ) elif self.alias == "high": + for obj_type in sync_instruction.new_permissions_highside.keys(): + if obj_type in self.new_permissions: + self.new_permissions[obj_type].extend( + sync_instruction.new_permissions_highside[obj_type] + ) + else: + self.new_permissions[obj_type] = ( + sync_instruction.new_permissions_highside[obj_type] + ) self.new_storage_permissions.extend( - sync_decision.new_storage_permissions_highside + sync_instruction.new_storage_permissions_highside ) else: raise ValueError("Invalid alias") + @property + def is_empty(self) -> bool: + return ( + len(self.create_objs) == 0 + and len(self.update_objs) == 0 + and len(self.delete_objs) == 0 + and len(self.new_permissions) == 0 + and len(self.new_storage_permissions) == 0 + ) + def __repr__(self) -> str: return ( f"ResolvedSyncState(\n" @@ -818,6 +1759,8 @@ def __repr__(self) -> str: f" update_objs={self.update_objs},\n" f" delete_objs={self.delete_objs}\n" f" new_permissions={self.new_permissions}\n" + f" new_storage_permissions={self.new_storage_permissions}\n" + f" ignored_batches={list(self.ignored_batches.keys())}\n" f")" ) diff --git a/packages/syft/src/syft/service/sync/resolve_widget.py b/packages/syft/src/syft/service/sync/resolve_widget.py new file mode 100644 index 00000000000..1f4b7b1bf24 --- /dev/null +++ b/packages/syft/src/syft/service/sync/resolve_widget.py @@ -0,0 +1,854 @@ +# stdlib +from collections.abc import Callable +from enum import Enum +from enum import auto +from functools import partial +import html +import secrets +from typing import Any +from uuid import uuid4 + +# third party +from IPython import display +import ipywidgets as widgets +from ipywidgets import Button +from ipywidgets import Checkbox +from ipywidgets import HBox +from ipywidgets import HTML +from ipywidgets import Layout +from ipywidgets import VBox + +# relative +from ...client.sync_decision import SyncDecision +from ...client.sync_decision import SyncDirection +from ...types.errors import SyftException +from ...types.uid import UID +from ...util.notebook_ui.components.sync import Alert +from ...util.notebook_ui.components.sync import CopyIDButton +from ...util.notebook_ui.components.sync import MainDescription +from ...util.notebook_ui.components.sync import SyncWidgetHeader +from ...util.notebook_ui.components.sync import TypeLabel +from ...util.notebook_ui.components.tabulator_template import build_tabulator_table +from ...util.notebook_ui.components.tabulator_template import highlight_single_row +from ...util.notebook_ui.components.tabulator_template import update_table_cell +from ...util.notebook_ui.styles import CSS_CODE +from ..action.action_object import ActionObject +from ..api.api import TwinAPIEndpoint +from ..log.log import SyftLog +from ..response import SyftSuccess +from .diff_state import ObjectDiff +from .diff_state import ObjectDiffBatch +from .widget_output import Output + +# Standard div Jupyter Lab uses for notebook outputs +# This is needed to use alert styles from SyftSuccess and SyftException +NOTEBOOK_OUTPUT_DIV = """ + +""" + + +class DiffStatus(Enum): + NEW = auto() + SAME = auto() + MODIFIED = auto() + DELETED = auto() + + +background_colors = { + DiffStatus.NEW: "#D5F1D5;", + DiffStatus.SAME: "transparent", + DiffStatus.MODIFIED: "#FEE9CD;", + DiffStatus.DELETED: "#ffdddd;", +} + + +colors = { + DiffStatus.NEW: "#256B24;", + DiffStatus.SAME: "#353243", + DiffStatus.MODIFIED: "#B8520A;", + DiffStatus.DELETED: "#353243", +} + + +def create_diff_html( + title: str, + properties: dict[str, str], + statuses: dict[str, DiffStatus], +) -> str: + html_str = f"
    {title}
    " + html_str += "
    " + + for attr, val in properties.items(): + status = statuses[attr] + val = val if val is not None else "" + style = f"background-color: {background_colors[status]}; color: {colors[status]}; display: block; white-space: pre-wrap; margin-bottom: 5px;" # noqa: E501 + content = html.escape(f"{attr}: {val}") + html_str += f"
    {content}
    " + + html_str += "
    " + + return html_str + + +# TODO move CSS/HTML/JS outside function + + +class MainObjectDiffWidget: + def __init__( + self, + diff: ObjectDiff, + direction: SyncDirection, + with_box: bool = True, + show_share_warning: bool = False, + build_state: bool = True, + ): + build_state = build_state + + if build_state: + self.low_properties = diff.repr_attr_dict("low") + self.high_properties = diff.repr_attr_dict("high") + self.statuses = diff.repr_attr_diffstatus_dict() + else: + self.low_properties = {} + self.high_properties = {} + self.statuses = {} + + self.direction = direction + self.diff: ObjectDiff = diff + self.with_box = with_box + self.show_share_warning = show_share_warning + self.sync = True + self.is_main_widget: bool = True + + self.widget = self.build() + + def set_share_private_data(self) -> None: + # No-op for main widget + pass + + @property + def mockify(self) -> bool: + return not self.share_private_data + + @property + def share_private_data(self) -> bool: + # there are TwinAPIEndpoint.__private_sync_attr_mocks__ + return not isinstance(self.diff.non_empty_object, TwinAPIEndpoint) + + @property + def warning_html(self) -> str: + if isinstance(self.diff.non_empty_object, TwinAPIEndpoint): + message = "Only the public function of a TwinAPI will be synced to the low-side server." + return Alert(message=message).to_html() + elif self.show_share_warning: + message = ( + "By default only the object wrapper will be synced. " + "If you would like to sync the real data please " + 'activate the "Sync Real Data" button above.' + ) + return Alert(message=message).to_html() + else: + return "" + + def build(self) -> widgets.HBox: + all_keys = list(self.low_properties.keys()) + list(self.high_properties.keys()) + low_properties = {} + high_properties = {} + for k in all_keys: + low_properties[k] = self.low_properties.get(k, None) + high_properties[k] = self.high_properties.get(k, None) + + if self.direction == SyncDirection.LOW_TO_HIGH: + from_properties = low_properties + to_properties = high_properties + source_side = "Low side" + target_side = "High side" + else: + from_properties = high_properties + to_properties = low_properties + source_side = "High side" + target_side = "Low side" + + html_from = create_diff_html( + f"From {source_side} (new values)", from_properties, self.statuses + ) + html_to = create_diff_html( + f"To {target_side} (old values)", to_properties, self.statuses + ) + + widget_from = widgets.HTML( + value=html_from, layout=widgets.Layout(width="50%", overflow="auto") + ) + widget_to = widgets.HTML( + value=html_to, layout=widgets.Layout(width="50%", overflow="auto") + ) + css_accordion = """ + + """ + + result = widgets.HBox([HTML(css_accordion), widget_from, widget_to]) + + warning = self.warning_html + if warning: + result = VBox([widgets.HTML(warning), result]) + + if self.with_box: + result._dom_classes = result._dom_classes + ("diff-container",) + + return result + + +class CollapsableObjectDiffWidget: + def __init__( + self, + diff: ObjectDiff, + direction: SyncDirection, + build_state: bool = True, + ): + self.direction = direction + self.build_state = build_state + self.share_private_data = False + self.diff: ObjectDiff = diff + self.sync: bool = False + self.is_main_widget: bool = False + self.has_private_data = isinstance( + self.diff.non_empty_object, SyftLog | ActionObject | TwinAPIEndpoint + ) + self.widget = self.build() + self.set_and_disable_sync() + + @property + def mockify(self) -> bool: + if self.has_private_data and not self.share_private_data: + return True + else: + return False + + @property + def warning_html(self) -> str: + if self.show_share_button: + message = ( + "By default only the object wrapper will be synced. " + "If you would like to sync the real log data please " + "activate the “Real Data” button above." + ) + return Alert(message=message).to_html() + return "" + + @property + def show_share_button(self) -> bool: + return self.has_private_data + + @property + def title(self) -> str: + object = self.diff.non_empty_object + if object is None: + return "n/a" + type_html = TypeLabel(object=object).to_html() + description_html = MainDescription(object=object).to_html() + copy_id_button = CopyIDButton(copy_text=str(object.id.id), max_width=60) + + second_line_html = f""" +
    +
    + {type_html} {description_html} +
    + {copy_id_button.to_html()} +
    + """ # noqa: E501 + return second_line_html + + def set_and_disable_sync(self) -> None: + self._sync_checkbox.disabled = True + self._sync_checkbox.value = True + + def enable_sync(self) -> None: + if self.show_sync_button: + self._sync_checkbox.disabled = False + + def set_share_private_data(self) -> None: + if self.show_share_button: + self._share_private_checkbox.value = True + + def build(self) -> widgets.VBox: + content = MainObjectDiffWidget( + self.diff, + self.direction, + with_box=False, + show_share_warning=self.show_share_button, + build_state=self.build_state, + ).widget + + accordion, share_private_checkbox, sync_checkbox = self.build_accordion( + accordion_body=content, + show_sync_checkbox=True, + show_share_private_checkbox=self.show_share_button, + ) + + self._sync_checkbox = sync_checkbox + self._sync_checkbox.observe(self._on_sync_change, "value") + + self._share_private_checkbox = share_private_checkbox + self._share_private_checkbox.observe( + self._on_share_private_data_change, "value" + ) + + return accordion + + def create_accordion_css( + self, header_id: str, body_id: str, class_name: str + ) -> str: + css_accordion = f""" + + """ + return css_accordion + + def build_accordion( + self, + accordion_body: MainObjectDiffWidget, + show_sync_checkbox: bool = True, + show_share_private_checkbox: bool = True, + ) -> VBox: + uid = str(uuid4()) + body_id = f"accordion-body-{uid}" + header_id = f"accordion-header-{uid}" + class_name = f"accordion-{uid}" + caret_id = f"caret-{uid}" + + toggle_hide_body_js = f""" + var body = document.getElementsByClassName('{body_id}')[0]; + var caret = document.getElementById('{caret_id}'); + if (body.classList.contains('body-hidden')) {{ + var vbox = document.getElementsByClassName('{class_name}-folded')[0]; + body.classList.remove('body-hidden'); + body.classList.add('body-visible'); + vbox.classList.remove('{class_name}-folded'); + vbox.classList.add('{class_name}-unfolded'); + caret.classList.remove('fa-caret-right'); + caret.classList.add('fa-caret-down'); + }} else {{ + var vbox = document.getElementsByClassName('{class_name}-unfolded')[0]; + body.classList.remove('body-visible'); + body.classList.add('body-hidden'); + vbox.classList.remove('{class_name}-unfolded'); + vbox.classList.add('{class_name}-folded'); + caret.classList.remove('fa-caret-down'); + caret.classList.add('fa-caret-right'); + }} + """ + caret = f'' + title_html = HTML( + value=f"
    {caret} {self.title}
    ", # noqa: E501 + layout=Layout(flex="1"), + ) + + if isinstance(self.diff.non_empty_object, ActionObject): + share_data_description = "Share real data and approve" + else: + share_data_description = "Share real data" + share_private_data_checkbox = Checkbox( + description=share_data_description, + layout=Layout(width="auto", margin="0 2px 0 0"), + ) + sync_checkbox = Checkbox( + description="Sync", layout=Layout(width="auto", margin="0 2px 0 0") + ) + + checkboxes = [] + if show_share_private_checkbox: + checkboxes.append(share_private_data_checkbox) + if show_sync_checkbox: + checkboxes.append(sync_checkbox) + + accordion_header = HBox( + [title_html] + checkboxes, + layout=Layout(width="100%", justify_content="space-between"), + ) + + accordion_body.add_class(body_id) + accordion_body.add_class("body-hidden") + + style = HTML(value=self.create_accordion_css(header_id, body_id, class_name)) + + accordion = VBox( + [style, accordion_header, accordion_body], + _dom_classes=(f"accordion-{uid}-folded", "accordion"), + ) + return accordion, share_private_data_checkbox, sync_checkbox + + def _on_sync_change(self, change: Any) -> None: + self.sync = change["new"] + + def _on_share_private_data_change(self, change: Any) -> None: + self.share_private_data = change["new"] + + +class ResolveWidget: + def __init__( + self, + obj_diff_batch: ObjectDiffBatch, + on_sync_callback: Callable | None = None, + on_ignore_callback: Callable | None = None, + build_state: bool = True, + ): + self.build_state = build_state + self.obj_diff_batch: ObjectDiffBatch = obj_diff_batch + self.id2widget: dict[ + UID, CollapsableObjectDiffWidget | MainObjectDiffWidget + ] = {} + self.on_sync_callback = on_sync_callback + self.on_ignore_callback = on_ignore_callback + self.main_widget = self.build() + self.result_widget = VBox() # Placeholder for SyftSuccess / SyftException + self.widget = VBox( + [self.build_css_widget(), self.main_widget, self.result_widget] + ) + self.is_synced = False + self.hide_result_widget() + + def build_css_widget(self) -> HTML: + return widgets.HTML(value=CSS_CODE) + + def _repr_mimebundle_(self, **kwargs: dict) -> dict[str, str] | None: + return self.widget._repr_mimebundle_(**kwargs) + + def click_share_all_private_data(self) -> None: + for widget in self.id2widget.values(): + widget.set_share_private_data() + + def click_share_private_data(self, uid: UID | str) -> SyftSuccess: + if isinstance(uid, str): + uid = UID(uid) + if uid not in self.id2widget: + raise SyftException(public_message="Object not found in this widget") + + widget = self.id2widget[uid] + widget.set_share_private_data() + return SyftSuccess(message="Private data shared") + + def get_share_private_data_state(self) -> dict[UID, bool]: + return { + uid: widget.share_private_data for uid, widget in self.id2widget.items() + } + + def get_mockify_state(self) -> dict[UID, bool]: + return {uid: widget.mockify for uid, widget in self.id2widget.items()} + + def ignore(self) -> None: + # self.obj_diff_batch.ignore() + self.obj_diff_batch.ignore() + if self.on_ignore_callback: + self.on_ignore_callback() + + def deny_and_ignore(self, reason: str) -> None: + self.ignore() + batch = self.obj_diff_batch + # relative + from ..request.request import Request + + assert batch.root_type == Request, "method can only be excecuted on requests" # nosec: B101 + request = batch.root.low_obj + assert request is not None # nosec: B101 + request.deny(reason) + + def click_sync(self, *args: list, **kwargs: dict) -> SyftSuccess: + # relative + from ...client.syncing import handle_sync_batch + + if self.is_synced: + raise SyftException( + public_message="The changes in this widget have already been synced." + ) + + res = handle_sync_batch( + obj_diff_batch=self.obj_diff_batch, + share_private_data=self.get_share_private_data_state(), + mockify=self.get_mockify_state(), + ) + + self.set_widget_result_state(res) + if self.on_sync_callback: + self.on_sync_callback() + return res + + @property + def batch_diff_widgets(self) -> list[CollapsableObjectDiffWidget]: + dependents = self.obj_diff_batch.get_dependents( + include_roots=False, include_batch_root=False + ) + dependent_diff_widgets = [ + CollapsableObjectDiffWidget( + diff, + direction=self.obj_diff_batch.sync_direction, + build_state=self.build_state, + ) + for diff in dependents + ] + return dependent_diff_widgets + + @property + def dependency_root_diff_widgets(self) -> list[CollapsableObjectDiffWidget]: + dependencies = self.obj_diff_batch.get_dependencies( + include_roots=True, include_batch_root=False + ) + + # we show these above the line + dependents = self.obj_diff_batch.get_dependents( + include_roots=False, include_batch_root=False + ) + dependent_ids = [x.object_id for x in dependents] + # we skip the ones we already show above the line in the widget + context_diffs = [d for d in dependencies if d.object_id not in dependent_ids] + widgets = [ + CollapsableObjectDiffWidget( + diff, + direction=self.obj_diff_batch.sync_direction, + build_state=self.build_state, + ) + for diff in context_diffs + ] + return widgets + + @property + def main_object_diff_widget(self) -> MainObjectDiffWidget: + obj_diff_widget = MainObjectDiffWidget( + self.obj_diff_batch.root_diff, + direction=self.obj_diff_batch.sync_direction, + build_state=self.build_state, + ) + return obj_diff_widget + + def set_widget_result_state(self, res: SyftSuccess) -> None: + self.is_synced = True + self.set_result_message(res) + self.hide_main_widget() + self.show_result_widget() + + def set_result_message(self, result: SyftSuccess) -> None: + result_html = result._repr_html_() + # Wrap in div to match Jupyter Lab output styling + result_html = NOTEBOOK_OUTPUT_DIV.format(content=result_html) + self.result_widget.children = [widgets.HTML(value=result_html)] + + def hide_main_widget(self) -> None: + self.main_widget.layout.display = "none" + + def show_main_widget(self) -> None: + self.main_widget.layout.display = "block" + + def hide_result_widget(self) -> None: + self.result_widget.layout.display = "none" + + def show_result_widget(self) -> None: + self.result_widget.layout.display = "block" + + def build(self) -> VBox: + self.id2widget = {} + + batch_diff_widgets = self.batch_diff_widgets + dependent_batch_diff_widgets = self.dependency_root_diff_widgets + main_object_diff_widget = self.main_object_diff_widget + + self.id2widget[main_object_diff_widget.diff.object_id] = main_object_diff_widget + + for widget in batch_diff_widgets: + self.id2widget[widget.diff.object_id] = widget + + for widget in dependent_batch_diff_widgets: + self.id2widget[widget.diff.object_id] = widget + + # put a 4px spacer between each item + main_batch_items = widgets.VBox( + children=[d.widget for d in batch_diff_widgets], + ) + + dependency_items = widgets.VBox( + children=[d.widget for d in dependent_batch_diff_widgets], + ) + + full_widget = widgets.VBox( + [ + self.build_header(), + self.main_object_diff_widget.widget, + self.spacer(8), + main_batch_items, + self.separator(), + dependency_items, + self.spacer(8), + self.sync_button(), + ] + ) + return full_widget + + def sync_button(self) -> Button: + sync_button = Button( + description="Apply Selected Changes", + style={ + "text_color": "#464A91", + "button_color": "transparent", + "float": "right", + }, + layout=Layout(border="#464A91 solid 1.5px", width="200px"), + ) + sync_button.on_click(self.click_sync) + return sync_button + + def spacer(self, height: int) -> widgets.HTML: + return widgets.HTML(f"
    ") + + def separator(self) -> widgets.HTML: + return widgets.HTML( + value='
    ', + layout=Layout(width="100%"), + ) + + def build_header(self) -> HTML: + header_html = SyncWidgetHeader(diff_batch=self.obj_diff_batch).to_html() + return HTML(value=header_html) + + +class PaginationControl: + def __init__(self, data: list, callback: Callable[[int], None]): + self.data = data + self.callback = callback + self.current_index = 0 + self.index_label = widgets.Label(value=f"Index: {self.current_index}") + + self.first_button = widgets.Button(description="First") + self.previous_button = widgets.Button(description="Previous") + self.next_button = widgets.Button(description="Next") + self.last_button = widgets.Button(description="Last") + + self.first_button.on_click(self.go_to_first) + self.previous_button.on_click(self.go_to_previous) + self.next_button.on_click(self.go_to_next) + self.last_button.on_click(self.go_to_last) + self.output = Output() + + self.buttons = widgets.HBox( + [ + self.first_button, + self.previous_button, + self.next_button, + self.last_button, + ] + ) + self.update_buttons() + self.update_index_callback() + + def update_index_label(self) -> None: + self.index_label.value = f"Current: {self.current_index}" + + def update_buttons(self) -> None: + self.first_button.disabled = self.current_index == 0 + self.previous_button.disabled = self.current_index == 0 + self.next_button.disabled = self.current_index == len(self.data) - 1 + self.last_button.disabled = self.current_index == len(self.data) - 1 + + def go_to_first(self, b: Button | None) -> None: + self.current_index = 0 + self.update_index_callback() + + def go_to_previous(self, b: Button | None) -> None: + if self.current_index > 0: + self.current_index -= 1 + self.update_index_callback() + + def go_to_next(self, b: Button | None) -> None: + if self.current_index < len(self.data) - 1: + self.current_index += 1 + self.update_index_callback() + + def go_to_last(self, b: Button | None) -> None: + self.current_index = len(self.data) - 1 + self.update_index_callback() + + def update_index_callback(self) -> None: + self.update_index_label() + self.update_buttons() + + # NOTE self.output is required to display IPython.display.HTML + # IPython.display.HTML is used to execute JS code + with self.output: + self.callback(self.current_index) + + def build(self) -> widgets.VBox: + return widgets.VBox( + [widgets.HBox([self.buttons, self.index_label]), self.output] + ) + + +class PaginatedWidget: + def __init__( + self, children: list, on_paginate_callback: Callable[[int], None] | None = None + ): + # on_paginate_callback is an optional secondary callback, + # called after updating the page index and displaying the new widget + self.children = children + self.on_paginate_callback = on_paginate_callback + self.current_index = 0 + self.container = widgets.VBox() + + self.pagination_control = PaginationControl(children, self.on_paginate) + + # Initial display + self.on_paginate(self.pagination_control.current_index) + + def __getitem__(self, index: int) -> widgets.Widget: + return self.children[index] + + def on_paginate(self, index: int) -> None: + self.container.children = [self.children[index]] if self.children else [] + if self.on_paginate_callback: + self.on_paginate_callback(index) + + def spacer(self, height: int) -> widgets.HTML: + return widgets.HTML(f"
    ") + + def build(self) -> widgets.VBox: + return widgets.VBox( + [self.pagination_control.build(), self.spacer(8), self.container] + ) + + +class PaginatedResolveWidget: + """ + PaginatedResolveWidget is a widget that displays + a ResolveWidget for each ObjectDiffBatch, + paginated by a PaginationControl widget. + """ + + def __init__(self, batches: list[ObjectDiffBatch], build_state: bool = True): + self.build_state = build_state + self.batches = batches + self.resolve_widgets: list[ResolveWidget] = [ + ResolveWidget( + batch, + on_sync_callback=partial(self.on_click_sync, i), + on_ignore_callback=partial(self.on_ignore, i), + build_state=build_state, + ) + for i, batch in enumerate(self.batches) + ] + + self.table_uid = secrets.token_hex(4) + + # Disable the table pagination to avoid the double pagination buttons + + self.paginated_widget = PaginatedWidget( + children=[widget.widget for widget in self.resolve_widgets], + on_paginate_callback=self.on_paginate, + ) + + self.table_output = Output() + self.draw_table() + + self.widget = self.build() + + def draw_table(self) -> None: + self.batch_table = build_tabulator_table( + obj=self.batches, + uid=self.table_uid, + max_height=500, + pagination=False, + header_sort=False, + ) + self.table_output.clear_output() + with self.table_output: + display.display(display.HTML(self.batch_table)) + highlight_single_row( + self.table_uid, self.paginated_widget.current_index, jump_to_row=True + ) + + def on_ignore(self, index: int) -> None: + self.update_table_sync_decision(index) + self.draw_table() + + def on_click_sync(self, index: int) -> None: + self.update_table_sync_decision(index) + if self.batches[index].decision is not None: + self.paginated_widget.pagination_control.go_to_next(None) + + def update_table_sync_decision(self, index: int) -> None: + new_decision = self.batches[index].decision_badge() + with self.table_output: + update_table_cell( + uid=self.table_uid, + index=index, + field="Decision", + value=new_decision, + ) + + def __getitem__(self, index: int) -> ResolveWidget: + return self.resolve_widgets[index] + + def __len__(self) -> int: + return len(self.resolve_widgets) + + def on_paginate(self, index: int) -> None: + return highlight_single_row(self.table_uid, index, jump_to_row=True) + + def build(self) -> widgets.VBox: + return widgets.VBox([self.table_output, self.paginated_widget.build()]) + + def click_sync(self, index: int) -> SyftSuccess: + return self.resolve_widgets[index].click_sync() + + def click_share_all_private_data(self, index: int) -> None: + self.resolve_widgets[index].click_share_all_private_data() + + def _share_all(self) -> None: + for widget in self.resolve_widgets: + widget.click_share_all_private_data() + + def _sync_all(self) -> None: + for idx, widget in enumerate(self.resolve_widgets): + if widget.obj_diff_batch.decision in [ + SyncDecision.IGNORE, + SyncDecision.SKIP, + ]: + print(f"skipping row {idx} (skipped/ignored)") + else: + widget.click_sync() + + def _repr_mimebundle_(self, **kwargs: dict) -> dict[str, str] | None: + return self.widget._repr_mimebundle_(**kwargs) diff --git a/packages/syft/src/syft/service/sync/sync_service.py b/packages/syft/src/syft/service/sync/sync_service.py index 16720f50e9e..b6cc955ac4f 100644 --- a/packages/syft/src/syft/service/sync/sync_service.py +++ b/packages/syft/src/syft/service/sync/sync_service.py @@ -1,19 +1,18 @@ # stdlib from collections import defaultdict +import logging from typing import Any -from typing import cast - -# third party -from result import Result # relative -from ...abstract_node import AbstractNode -from ...client.api import NodeIdentity -from ...node.credentials import SyftVerifyKey +from ...client.api import ServerIdentity from ...serde.serializable import serializable -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException from ...store.linked_obj import LinkedObject +from ...types.datetime import DateTime +from ...types.errors import SyftException +from ...types.result import as_result from ...types.syft_object import SyftObject from ...types.syncable_object import SyncableSyftObject from ...types.uid import UID @@ -22,11 +21,12 @@ from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from ..action.action_permissions import StoragePermission +from ..api.api import TwinAPIEndpoint from ..code.user_code import UserCodeStatusCollection from ..context import AuthedServiceContext from ..job.job_stash import Job -from ..output.output_service import ExecutionOutput -from ..response import SyftError +from ..log.log import SyftLog +from ..request.request import Request from ..response import SyftSuccess from ..service import AbstractService from ..service import TYPE_TO_SERVICE @@ -35,51 +35,51 @@ from .sync_stash import SyncStash from .sync_state import SyncState +logger = logging.getLogger(__name__) + + +def get_store(context: AuthedServiceContext, item: SyncableSyftObject) -> ObjectStash: + return get_store_by_type(context=context, obj_type=type(item)) -def get_store(context: AuthedServiceContext, item: SyncableSyftObject) -> Any: - if isinstance(item, ActionObject): - service = context.node.get_service("actionservice") # type: ignore - return service.store # type: ignore - service = context.node.get_service(TYPE_TO_SERVICE[type(item)]) # type: ignore - return service.stash.partition + +def get_store_by_type(context: AuthedServiceContext, obj_type: type) -> ObjectStash: + if issubclass(obj_type, ActionObject): + service = context.server.services.action # type: ignore + return service.stash # type: ignore + service = context.server.get_service(TYPE_TO_SERVICE[obj_type]) # type: ignore + return service.stash @instrument -@serializable() +@serializable(canonical_name="SyncService", version=1) class SyncService(AbstractService): - store: DocumentStore stash: SyncStash - def __init__(self, store: DocumentStore): - self.store = store + def __init__(self, store: DBManager): self.stash = SyncStash(store=store) def add_actionobject_read_permissions( self, context: AuthedServiceContext, action_object: ActionObject, - permissions_other: list[str], + new_permissions: list[ActionObjectPermission], ) -> None: - read_permissions = [x for x in permissions_other if "READ" in x] + action_stash = context.server.services.action.stash + for permission in new_permissions: + if permission.permission == ActionPermission.READ: + action_stash.add_permission(permission) - _id = action_object.id.id blob_id = action_object.syft_blob_storage_entry_id - - store_to = context.node.get_service("actionservice").store # type: ignore - store_to_blob = context.node.get_service("blobstorageservice").stash.partition # type: ignore - - for read_permission in read_permissions: - creds, perm_str = read_permission.split("_") - perm = ActionPermission[perm_str] - permission = ActionObjectPermission( - uid=_id, permission=perm, credentials=SyftVerifyKey(creds) - ) - store_to.add_permission(permission) - - permission_blob = ActionObjectPermission( - uid=blob_id, permission=perm, credentials=SyftVerifyKey(creds) - ) - store_to_blob.add_permission(permission_blob) + if blob_id: + blob_stash = context.server.services.blob_storage.stash + for permission in new_permissions: + if permission.permission == ActionPermission.READ: + permission_blob = ActionObjectPermission( + uid=blob_id, + permission=permission.permission, + credentials=permission.credentials, + ) + blob_stash.add_permission(permission_blob) def set_obj_ids(self, context: AuthedServiceContext, x: Any) -> None: if hasattr(x, "__dict__") and isinstance(x, SyftObject): @@ -92,29 +92,32 @@ def set_obj_ids(self, context: AuthedServiceContext, x: Any) -> None: self.set_obj_ids(context, v) else: self.set_obj_ids(context, val) - x.syft_node_location = context.node.id # type: ignore + x.syft_server_location = context.server.id # type: ignore x.syft_client_verify_key = context.credentials - if hasattr(x, "node_uid"): - x.node_uid = context.node.id # type: ignore + if hasattr(x, "server_uid"): + x.server_uid = context.server.id # type: ignore def transform_item( - self, context: AuthedServiceContext, item: SyftObject + self, + context: AuthedServiceContext, + item: SyncableSyftObject, ) -> SyftObject: if isinstance(item, UserCodeStatusCollection): - identity = NodeIdentity.from_node(context.node) + identity = ServerIdentity.from_server(context.server) res = {} - for key in item.status_dict.keys(): - # todo, check if they are actually only two nodes - res[identity] = item.status_dict[key] + for approval_decision in item.status_dict.values(): + # todo, check if they are actually only two servers + res[identity] = approval_decision item.status_dict = res self.set_obj_ids(context, item) return item + @as_result(ValueError) def get_stash_for_item( self, context: AuthedServiceContext, item: SyftObject - ) -> BaseStash: - services = list(context.node.service_path_map.values()) # type: ignore + ) -> ObjectStash: + services = list(context.server.service_path_map.values()) # type: ignore all_stashes = {} for serv in services: @@ -122,56 +125,63 @@ def get_stash_for_item( all_stashes[_stash.object_type] = _stash stash = all_stashes.get(type(item), None) + if stash is None: + raise ValueError(f"Could not find stash for {type(item)}") return stash def add_permissions_for_item( self, context: AuthedServiceContext, item: SyftObject, - permissions_other: set[ActionObjectPermission], + new_permissions: list[ActionObjectPermission], ) -> None: - if isinstance(item, Job) and context.node.node_side_type.value == "low": # type: ignore - _id = item.id - read_permissions = [x for x in permissions_other if "READ" in x] # type: ignore - job_store = context.node.get_service("jobservice").stash.partition # type: ignore - for read_permission in read_permissions: - creds, perm_str = read_permission.split("_") - perm = ActionPermission[perm_str] - permission = ActionObjectPermission( - uid=_id, permission=perm, credentials=SyftVerifyKey(creds) - ) - job_store.add_permission(permission) + if isinstance(item, ActionObject): + raise ValueError("ActionObject permissions should be added separately") + else: + store = get_store(context, item) # type: ignore + for permission in new_permissions: + if permission.permission == ActionPermission.READ: + store.add_permission(permission) def add_storage_permissions_for_item( self, context: AuthedServiceContext, item: SyftObject, - permissions_other: set[UID], + new_permissions: list[StoragePermission], ) -> None: - _id = item.id.id - permissions = [ - StoragePermission(uid=_id, node_uid=p) for p in permissions_other - ] - store = get_store(context, item) - store.add_storage_permissions(permissions) + store.add_storage_permissions(new_permissions) + @as_result(SyftException) def set_object( self, context: AuthedServiceContext, item: SyncableSyftObject - ) -> Result[SyftObject, str]: - stash = self.get_stash_for_item(context, item) + ) -> SyftObject: + stash = self.get_stash_for_item(context, item).unwrap() creds = context.credentials - exists = stash.get_by_uid(context.credentials, item.id).ok() is not None + obj = None + try: + obj = stash.get_by_uid(context.credentials, item.id).unwrap() + except (SyftException, KeyError): + obj = None + + exists = obj is not None + + if isinstance(item, TwinAPIEndpoint): + # we need the side effect of set function + # to create an action object + res = context.server.services.api.set(context=context, endpoint=item) + return item + if exists: - res = stash.update(creds, item) + res = stash.update(creds, item).unwrap() else: # Storage permissions are added separately res = stash.set( creds, item, add_storage_permission=False, - ) + ).unwrap() return res @@ -179,19 +189,52 @@ def set_object( path="sync.sync_items", name="sync_items", roles=ADMIN_ROLE_LEVEL, + unwrap_on_success=False, ) def sync_items( self, context: AuthedServiceContext, - items: list[ActionObject | SyftObject], - permissions: dict[UID, set[str]], - storage_permissions: dict[UID, set[UID]], - ) -> SyftSuccess | SyftError: - permissions = defaultdict(set, permissions) - storage_permissions = defaultdict(set, storage_permissions) + items: list[SyncableSyftObject], + permissions: dict[type, list[ActionObjectPermission]], + storage_permissions: list[StoragePermission], + ignored_batches: dict[UID, int], + unignored_batches: set[UID], + ) -> SyftSuccess: + permissions_dict = defaultdict(list) + for permission_list in permissions.values(): + for permission in permission_list: + permissions_dict[permission.uid].append(permission) + + item_ids = [item.id.id for item in items] + + # If we just want to add permissions without having an object + # This should happen only for the high side when we sync results but + # we need to add permissions for the DS to properly show the status of the requests + for obj_type, permission_list in permissions.items(): + for permission in permission_list: + if permission.uid in item_ids: + continue + if obj_type not in [Job, SyftLog, Request] and not issubclass( + obj_type, ActionObject + ): + raise SyftException( + public_message="Permission for object type not supported!" + ) + if issubclass(obj_type, ActionObject): + store = context.server.services.action.stash + else: + service = context.server.get_service(TYPE_TO_SERVICE[obj_type]) + store = service.stash # type: ignore[assignment] + if permission.permission == ActionPermission.READ: + store.add_permission(permission) + + storage_permissions_dict = defaultdict(list) + for storage_permission in storage_permissions: + storage_permissions_dict[storage_permission.uid].append(storage_permission) + for item in items: - new_permissions = permissions[item.id.id] - new_storage_permissions = storage_permissions[item.id.id] + new_permissions = permissions_dict[item.id.id] + new_storage_permissions = storage_permissions_dict[item.id.id] if isinstance(item, ActionObject): self.add_actionobject_read_permissions(context, item, new_permissions) self.add_storage_permissions_for_item( @@ -199,16 +242,30 @@ def sync_items( ) else: item = self.transform_item(context, item) # type: ignore[unreachable] - res = self.set_object(context, item) + self.set_object(context, item).unwrap() + self.add_permissions_for_item(context, item, new_permissions) + self.add_storage_permissions_for_item( + context, item, new_storage_permissions + ) - if res.is_ok(): - self.add_permissions_for_item(context, item, new_permissions) - self.add_storage_permissions_for_item( - context, item, new_storage_permissions - ) - else: - return SyftError(message=f"Failed to sync {res.err()}") - return SyftSuccess(message=f"Synced {len(items)} items") + # NOTE include_items=False to avoid snapshotting the database + # Snapshotting is disabled to avoid mongo size limit and performance issues + new_state = self.build_current_state( + context, + new_items=items, + new_ignored_batches=ignored_batches, + new_unignored_batches=unignored_batches, + include_items=False, + ).unwrap() + + self.stash.set(context.credentials, new_state).unwrap() + + message = f"Synced {len(items)} items" + if len(ignored_batches) > 0: + message += f", ignored {len(ignored_batches)} batches" + if len(unignored_batches) > 0: + message += f", unignored {len(unignored_batches)} batches" + return SyftSuccess(message=message) @service_method( path="sync.get_permissions", @@ -219,79 +276,181 @@ def get_permissions( self, context: AuthedServiceContext, items: list[SyncableSyftObject], - ) -> tuple[dict[UID, set[str]], dict[UID, set[str]]]: - permissions = {} - storage_permissions = {} + ) -> tuple[dict[UID, set[str]], dict[UID, set[UID]]]: + permissions: dict[UID, set[str]] = {} + storage_permissions: dict[UID, set[UID]] = {} for item in items: store = get_store(context, item) if store is not None: - _id = item.id.id - permissions[_id] = store.permissions[_id] - storage_permissions[_id] = store.storage_permissions[_id] + # TODO fix error handling + uid = item.id.id + permissions[uid] = store._get_permissions_for_uid(uid).unwrap() + + # TODO fix error handling for storage permissions + storage_permissions[uid] = store._get_storage_permissions_for_uid( + uid + ).unwrap() return permissions, storage_permissions - @service_method( - path="sync._get_state", - name="_get_state", - roles=ADMIN_ROLE_LEVEL, - ) - def _get_state( - self, context: AuthedServiceContext, add_to_store: bool = False - ) -> SyncState | SyftError: - node = cast(AbstractNode, context.node) + @as_result(SyftException) + def _get_all_items_for_jobs( + self, + context: AuthedServiceContext, + ) -> tuple[list[SyncableSyftObject], dict[UID, str]]: + """ + Returns all Jobs, along with their Logs, ExecutionOutputs and ActionObjects + """ + items_for_jobs: list[SyncableSyftObject] = [] + errors = {} + jobs = context.server.services.job.get_all(context) + + for job in jobs: + try: + job_items = self._get_job_batch(context, job).unwrap() + items_for_jobs.extend(job_items) + except SyftException as exc: + logger.info( + f"Job {job.id} could not be added to SyncState: {exc._private_message or exc.public_message}" + ) + errors[job.id] = str(exc) + + return (items_for_jobs, errors) + + @as_result(SyftException) + def _get_job_batch( + self, context: AuthedServiceContext, job: Job + ) -> list[SyncableSyftObject]: + job_batch = [job] + + log = context.server.services.log.get(context, job.log_id) + job_batch.append(log) - new_state = SyncState(node_uid=node.id) + try: + output = context.server.services.output.get_by_job_id(context, job.id) + except NotFoundException: + output = None + if output is not None: + job_batch.append(output) + job_result_ids = set(output.output_id_list) + else: + job_result_ids = set() + + if isinstance(job.result, ActionObject): + job_result_ids.add(job.result.id.id) + + for result_id in job_result_ids: + # TODO: unwrap + action_object = context.server.services.action.get(context, result_id) + job_batch.append(action_object) + + return job_batch + + @as_result(SyftException) + def get_all_syncable_items( + self, context: AuthedServiceContext + ) -> tuple[list[SyncableSyftObject], dict[UID, str]]: + all_items: list[SyncableSyftObject] = [] + + # NOTE Jobs are handled separately services_to_sync = [ "requestservice", "usercodeservice", - "jobservice", - "logservice", - "outputservice", "usercodestatusservice", + "apiservice", ] for service_name in services_to_sync: - service = node.get_service(service_name) + service = context.server.get_service(service_name) items = service.get_all(context) - new_state.add_objects(items, api=node.root_client.api) # type: ignore - - # TODO workaround, we only need action objects from outputs for now - action_object_ids = set() - for obj in new_state.objects.values(): - if isinstance(obj, ExecutionOutput): - action_object_ids |= set(obj.output_id_list) - elif isinstance(obj, Job) and obj.result is not None: - if isinstance(obj.result, ActionObject): - obj.result = obj.result.as_empty() - action_object_ids.add(obj.result.id) - - action_objects = [] - for uid in action_object_ids: - action_object = node.get_service("actionservice").get(context, uid) # type: ignore - if action_object.is_err(): - return SyftError(message=action_object.err()) - action_objects.append(action_object.ok()) - new_state.add_objects(action_objects) - - new_state._build_dependencies(api=node.root_client.api) # type: ignore - - permissions, storage_permissions = self.get_permissions( - context, new_state.objects.values() + all_items.extend(items) + + # Gather jobs, logs, outputs and action objects + items_for_jobs, errors = self._get_all_items_for_jobs(context).unwrap() + # items_for_jobs, errors = items_for_jobs + all_items.extend(items_for_jobs) + + return (all_items, errors) + + @as_result(SyftException) + def build_current_state( + self, + context: AuthedServiceContext, + new_items: list[SyncableSyftObject] | None = None, + new_ignored_batches: dict[UID, int] | None = None, + new_unignored_batches: set[UID] | None = None, + include_items: bool = True, + ) -> SyncState: + new_items = new_items if new_items is not None else [] + new_ignored_batches = ( + new_ignored_batches if new_ignored_batches is not None else {} ) - new_state.permissions = permissions - new_state.storage_permissions = storage_permissions + unignored_batches: set[UID] = ( + new_unignored_batches if new_unignored_batches is not None else set() + ) + if include_items: + objects, errors = self.get_all_syncable_items(context).unwrap() + permissions, storage_permissions = self.get_permissions(context, objects) + else: + objects = [] + errors = {} + permissions = {} + storage_permissions = {} + + try: + previous_state = self.stash.get_latest( + credentials=context.credentials + ).unwrap() + except NotFoundException: + previous_state = None - previous_state = self.stash.get_latest(context=context) if previous_state is not None: - new_state.previous_state_link = LinkedObject.from_obj( + previous_state_link = LinkedObject.from_obj( obj=previous_state, service_type=SyncService, - node_uid=context.node.id, # type: ignore + server_uid=context.server.id, # type: ignore ) + previous_ignored_batches = previous_state.ignored_batches + else: + previous_state_link = None + previous_ignored_batches = {} + + ignored_batches = { + **previous_ignored_batches, + **new_ignored_batches, + } - if add_to_store: - self.stash.set(context.credentials, new_state) + ignored_batches = { + k: v for k, v in ignored_batches.items() if k not in unignored_batches + } + + object_sync_dates = ( + previous_state.object_sync_dates.copy() if previous_state else {} + ) + for obj in new_items: + object_sync_dates[obj.id.id] = DateTime.now() + + new_state = SyncState( + server_uid=context.server.id, # type: ignore + server_name=context.server.name, # type: ignore + server_side_type=context.server.server_side_type, # type: ignore + previous_state_link=previous_state_link, + permissions=permissions, + storage_permissions=storage_permissions, + ignored_batches=ignored_batches, + object_sync_dates=object_sync_dates, + errors=errors, + ) + + new_state.add_objects(objects, context) return new_state + + @service_method( + path="sync._get_state", + name="_get_state", + roles=ADMIN_ROLE_LEVEL, + ) + def _get_state(self, context: AuthedServiceContext) -> SyncState: + return self.build_current_state(context).unwrap() diff --git a/packages/syft/src/syft/service/sync/sync_stash.py b/packages/syft/src/syft/service/sync/sync_stash.py index 208af56fa6a..81bc26db263 100644 --- a/packages/syft/src/syft/service/sync/sync_stash.py +++ b/packages/syft/src/syft/service/sync/sync_stash.py @@ -1,45 +1,31 @@ -# stdlib - # relative from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...types.datetime import DateTime -from ...util.telemetry import instrument -from ..context import AuthedServiceContext -from ..response import SyftError +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import StashException +from ...types.result import as_result from .sync_state import SyncState -OrderByDatePartitionKey = PartitionKey(key="created_at", type_=DateTime) - - -@instrument -@serializable() -class SyncStash(BaseUIDStoreStash): - object_type = SyncState - settings: PartitionSettings = PartitionSettings( - name=SyncState.__canonical_name__, - object_type=SyncState, - ) - def __init__(self, store: DocumentStore): +@serializable(canonical_name="SyncStash", version=1) +class SyncStash(ObjectStash[SyncState]): + def __init__(self, store: DBManager) -> None: super().__init__(store) - self.store = store - self.settings = self.settings - self._object_type = self.object_type - - def get_latest(self, context: AuthedServiceContext) -> SyncState | None | SyftError: - all_states = self.get_all( - credentials=context.node.verify_key, # type: ignore - order_by=OrderByDatePartitionKey, - ) - - if all_states.is_err(): - return SyftError(message=all_states.err()) - - all_states = all_states.ok() - if len(all_states) > 0: - return all_states[-1] + self.last_state: SyncState | None = None + + @as_result(StashException) + def get_latest(self, credentials: SyftVerifyKey) -> SyncState | None: + if self.last_state is not None: + return self.last_state + + states = self.get_all( + credentials=credentials, + order_by="created_at", + sort_order="desc", + limit=1, + ).unwrap() + + if len(states) > 0: + return states[0] return None diff --git a/packages/syft/src/syft/service/sync/sync_state.py b/packages/syft/src/syft/service/sync/sync_state.py index 7886a3acef5..0e00070fe64 100644 --- a/packages/syft/src/syft/service/sync/sync_state.py +++ b/packages/syft/src/syft/service/sync/sync_state.py @@ -1,10 +1,13 @@ # stdlib -import html +from datetime import timedelta from typing import Any from typing import Optional -from typing import TYPE_CHECKING + +# third party +from pydantic import Field # relative +from ...abstract_server import ServerSideType from ...serde.serializable import serializable from ...store.linked_obj import LinkedObject from ...types.datetime import DateTime @@ -13,10 +16,8 @@ from ...types.syncable_object import SyncableSyftObject from ...types.uid import LineageID from ...types.uid import UID - -if TYPE_CHECKING: - # relative - from .diff_state import NodeDiff +from ...util.notebook_ui.components.sync import SyncTableObject +from ..context import AuthedServiceContext def get_hierarchy_level_prefix(level: int) -> str: @@ -26,7 +27,6 @@ def get_hierarchy_level_prefix(level: int) -> str: return "--" * level + " " -@serializable() class SyncStateRow(SyftObject): """A row in the SyncState table""" @@ -37,20 +37,47 @@ class SyncStateRow(SyftObject): previous_object: SyftObject | None = None current_state: str previous_state: str + status: str level: int = 0 + last_sync_date: DateTime | None = None + + __syft_include_id_coll_repr__ = False # TODO table formatting __repr_attrs__ = [ "previous_state", "current_state", ] + __table_coll_widths__ = ["min-content", "auto", "auto", "auto"] + + def status_badge(self) -> dict[str, str]: + status = self.status + if status == "NEW": + badge_color = "label-green" + elif status == "SAME": + badge_color = "label-gray" + else: + badge_color = "label-orange" + return {"value": status.upper(), "type": badge_color} def _coll_repr_(self) -> dict[str, Any]: - current_state = f"{self.status}\n{self.current_state}" - previous_state = f"{self.status}\n{self.previous_state}" + obj_view = SyncTableObject(object=self.object) + + if self.last_sync_date is not None: + last_sync_date = self.last_sync_date + last_sync_delta = timedelta( + seconds=DateTime.now().utc_timestamp - last_sync_date.utc_timestamp + ) + last_sync_delta_str = td_format(last_sync_delta) + last_sync_html = ( + f"

    {last_sync_delta_str} ago

    " + ) + else: + last_sync_html = "

    n/a

    " return { - "previous_state": html.escape(previous_state), - "current_state": html.escape(current_state), + "Status": self.status_badge(), + "Summary": obj_view.to_html(), + "Last Sync": last_sync_html, } @property @@ -58,15 +85,29 @@ def object_type(self) -> str: prefix = get_hierarchy_level_prefix(self.level) return f"{prefix}{type(self.object).__name__}" - @property - def status(self) -> str: - # TODO use Diffs to determine status - if self.previous_object is None: - return "NEW" - elif self.previous_object.syft_eq(ext_obj=self.object): - return "SAME" - else: - return "UPDATED" + +def td_format(td_object: timedelta) -> str: + seconds = int(td_object.total_seconds()) + if seconds == 0: + return "0 seconds" + + periods = [ + ("year", 60 * 60 * 24 * 365), + ("month", 60 * 60 * 24 * 30), + ("day", 60 * 60 * 24), + ("hour", 60 * 60), + ("minute", 60), + ("second", 1), + ] + + strings = [] + for period_name, period_seconds in periods: + if seconds >= period_seconds: + period_value, seconds = divmod(seconds, period_seconds) + has_s = "s" if period_value > 1 else "" + strings.append(f"{period_value} {period_name}{has_s}") + + return ", ".join(strings) @serializable() @@ -74,16 +115,48 @@ class SyncState(SyftObject): __canonical_name__ = "SyncState" __version__ = SYFT_OBJECT_VERSION_1 - node_uid: UID + server_uid: UID + server_name: str + server_side_type: ServerSideType objects: dict[UID, SyncableSyftObject] = {} dependencies: dict[UID, list[UID]] = {} - created_at: DateTime = DateTime.now() + created_at: DateTime = Field(default_factory=DateTime.now) previous_state_link: LinkedObject | None = None permissions: dict[UID, set[str]] = {} storage_permissions: dict[UID, set[UID]] = {} + ignored_batches: dict[UID, int] = {} + object_sync_dates: dict[UID, DateTime] = {} + errors: dict[UID, str] = {} + + # NOTE importing ServerDiff annotation with TYPE_CHECKING does not work here, + # since typing.get_type_hints does not check for TYPE_CHECKING-imported types + _previous_state_diff: Any = None __attr_searchable__ = ["created_at"] + def _set_previous_state_diff(self) -> None: + # relative + from .diff_state import ServerDiff + + # Re-use ServerDiff to compare to previous state + # Low = previous state, high = current state + # NOTE No previous sync state means everything is new + previous_state = self.previous_state or SyncState( + server_uid=self.server_uid, + server_name=self.server_name, + server_side_type=self.server_side_type, + syft_client_verify_key=self.syft_client_verify_key, + ) + self._previous_state_diff = ServerDiff.from_sync_state( + previous_state, self, _include_server_status=False, direction=None + ) + + def get_previous_state_diff(self) -> Any: + if self._previous_state_diff is None: + self._set_previous_state_diff() + + return self._previous_state_diff + @property def previous_state(self) -> Optional["SyncState"]: if self.previous_state_link is not None: @@ -94,37 +167,43 @@ def previous_state(self) -> Optional["SyncState"]: def all_ids(self) -> set[UID]: return set(self.objects.keys()) - def add_objects(self, objects: list[SyncableSyftObject], api: Any = None) -> None: + def get_status(self, uid: UID) -> str | None: + previous_state_diff = self.get_previous_state_diff() + if previous_state_diff is None: + return None + diff = previous_state_diff.obj_uid_to_diff.get(uid) + + if diff is None: + return None + return diff.status + + def add_objects( + self, objects: list[SyncableSyftObject], context: AuthedServiceContext + ) -> None: for obj in objects: if isinstance(obj.id, LineageID): self.objects[obj.id.id] = obj - else: + elif isinstance(obj.id, UID): self.objects[obj.id] = obj + else: + raise ValueError(f"Unsupported id type: {type(obj.id)}") # TODO might get slow with large states, # need to build dependencies every time to not have UIDs # in dependencies that are not in objects - self._build_dependencies(api=api) + self._build_dependencies(context=context) - def _build_dependencies(self, api: Any = None) -> None: + def _build_dependencies(self, context: AuthedServiceContext) -> None: self.dependencies = {} all_ids = self.all_ids for obj in self.objects.values(): if hasattr(obj, "get_sync_dependencies"): - deps = obj.get_sync_dependencies(api=api) + deps = obj.get_sync_dependencies(context=context) deps = [d.id for d in deps if d.id in all_ids] # type: ignore + # TODO: Why is this en check here? here? if len(deps): - self.dependencies[obj.id] = deps - - def get_previous_state_diff(self) -> "NodeDiff": - # Re-use DiffState to compare to previous state - # Low = previous, high = current - # relative - from .diff_state import NodeDiff - - previous_state = self.previous_state or SyncState(node_uid=self.node_uid) - return NodeDiff.from_sync_state(previous_state, self) + self.dependencies[obj.id.id] = deps @property def rows(self) -> list[SyncStateRow]: @@ -132,20 +211,54 @@ def rows(self) -> list[SyncStateRow]: ids = set() previous_diff = self.get_previous_state_diff() - for hierarchy in previous_diff.hierarchies: - for diff, level in zip(hierarchy.diffs, hierarchy.hierarchy_levels): - if diff.object_id in ids: - continue - ids.add(diff.object_id) - row = SyncStateRow( - object=diff.high_obj, - previous_object=diff.low_obj, - current_state=diff.diff_side_str("high"), - previous_state=diff.diff_side_str("low"), - level=level, - ) - result.append(row) + if previous_diff is None: + raise ValueError("No previous state to compare to") + for batch in previous_diff.batches: + # NOTE we re-use ServerDiff to compare to previous state, + # low_obj is previous state, high_obj is current state + diff = batch.root_diff + + # If there is no high object, it means it was deleted + # and we don't need to show it in the table + if diff.high_obj is None: + continue + if diff.object_id in ids: + continue + + ids.add(diff.object_id) + row = SyncStateRow( + object=diff.high_obj, + previous_object=diff.low_obj, + current_state=diff.diff_side_str("high"), + previous_state=diff.diff_side_str("low"), + level=0, # TODO add levels to table + status=batch.status, + last_sync_date=diff.last_sync_date, + ) + result.append(row) return result def _repr_html_(self) -> str: - return self.rows._repr_html_() + prop_template = ( + "

    {}: {}

    " + ) + name_html = prop_template.format("name", self.server_name) + if self.previous_state_link is not None: + previous_state = self.previous_state_link.resolve + delta = timedelta( + seconds=self.created_at.utc_timestamp + - previous_state.created_at.utc_timestamp + ) + val = f"{td_format(delta)} ago" + date_html = prop_template.format("last sync", val) + else: + date_html = prop_template.format("last sync", "not synced yet") + + repr = f""" +
    +

    + {name_html} + {date_html} +
    +""" + return repr + self.rows._repr_html_() diff --git a/packages/syft/src/syft/service/sync/widget_output.py b/packages/syft/src/syft/service/sync/widget_output.py new file mode 100644 index 00000000000..53c22493f49 --- /dev/null +++ b/packages/syft/src/syft/service/sync/widget_output.py @@ -0,0 +1,20 @@ +# stdlib +import os + +# third party +from ipywidgets.widgets import Output as ipyOutput + + +class Output(ipyOutput): + # This is a workaround for the issue that + # the Output widget causes halt when running in Jupyter Notebook + # from cli, e.g. tests. + # + # No-op when running in Jupyter Notebook. + if "JPY_PARENT_PID" in os.environ: + + def __enter__(self): # type: ignore + pass + + def __exit__(self, *args, **kwargs): # type: ignore + pass diff --git a/packages/syft/src/syft/service/user/errors.py b/packages/syft/src/syft/service/user/errors.py new file mode 100644 index 00000000000..218bd0207b4 --- /dev/null +++ b/packages/syft/src/syft/service/user/errors.py @@ -0,0 +1,50 @@ +# relative +from ...types.errors import SyftException + + +class UserError(SyftException): + public_message = "UserError. Please contact the admin." + + +class UserCreateError(UserError): + public_message = "Failed to create user." + + +class UserDeleteError(UserError): + public_message = "Failed to delete user." + + +class UserUpdateError(UserError): + public_message = "Failed to update user." + + +class UserPasswordMismatchError(UserError): + public_message = "Passwords do not match!" + + +class UserInvalidEmailError(UserError): + public_message = "Invalid email address." + + +class UserSearchBadParamsError(UserError): ... + + +# public_message = ( +# f"Invalid Search parameters. Allowed params: " +# f"{list(UserSearch.model_fields.keys())}" +# ) + + +class UserPermissionError(UserError): + public_message = "You are not permitted to perform this action." + + +class UserExchangeCredentials(UserError): + public_message = "Invalid credential exchange. Please contact the admin." + + +class UserEnclaveAdminLoginError(UserError): + public_message = ( + "Admins are not allowed to login to Enclaves.\n" + "Kindly register a new data scientist account via `client.register`." + ) diff --git a/packages/syft/src/syft/service/user/user.py b/packages/syft/src/syft/service/user/user.py index aa10737c3a4..0fb8a87e7fd 100644 --- a/packages/syft/src/syft/service/user/user.py +++ b/packages/syft/src/syft/service/user/user.py @@ -1,6 +1,8 @@ # stdlib from collections.abc import Callable +from datetime import datetime from getpass import getpass +import re from typing import Any # third party @@ -9,17 +11,17 @@ from bcrypt import hashpw from pydantic import EmailStr from pydantic import ValidationError -from pydantic import field_validator # relative -from ...client.api import APIRegistry -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...types.errors import SyftException from ...types.syft_metaclass import Empty +from ...types.syft_migration import migrate from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_3 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.transforms import drop @@ -30,16 +32,16 @@ from ...types.transforms import validate_email from ...types.uid import UID from ..notifier.notifier_enums import NOTIFIERS -from ..response import SyftError from ..response import SyftSuccess +from .errors import UserPasswordMismatchError from .user_roles import ServiceRole @serializable() -class User(SyftObject): +class UserV1(SyftObject): # version __canonical_name__ = "User" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore[assignment] @@ -69,10 +71,77 @@ class User(SyftObject): __repr_attrs__ = ["name", "email"] +@serializable() +class User(SyftObject): + # version + __canonical_name__ = "User" + __version__ = SYFT_OBJECT_VERSION_2 + + id: UID | None = None # type: ignore[assignment] + + # fields + notifications_enabled: dict[NOTIFIERS, bool] = { + NOTIFIERS.EMAIL: True, + NOTIFIERS.SMS: False, + NOTIFIERS.SLACK: False, + NOTIFIERS.APP: False, + } + email: EmailStr | None = None + name: str | None = None + hashed_password: str | None = None + salt: str | None = None + signing_key: SyftSigningKey | None = None + verify_key: SyftVerifyKey | None = None + role: ServiceRole | None = None + institution: str | None = None + website: str | None = None + created_at: str | None = None + # TODO where do we put this flag? + mock_execution_permission: bool = False + reset_token: str | None = None + reset_token_date: datetime | None = None + # serde / storage rules + __attr_searchable__ = ["name", "email", "verify_key", "role", "reset_token"] + __attr_unique__ = ["email", "signing_key", "verify_key"] + __repr_attrs__ = ["name", "email"] + + +@migrate(UserV1, User) +def migrate_server_user_update_v1_current() -> list[Callable]: + return [ + make_set_default("reset_token", None), + make_set_default("reset_token_date", None), + drop("__attr_searchable__"), + make_set_default( + "__attr_searchable__", + ["name", "email", "verify_key", "role", "reset_token"], + ), + ] + + +@migrate(User, UserV1) +def migrate_server_user_downgrade_current_v1() -> list[Callable]: + return [ + drop("reset_token"), + drop("reset_token_date"), + drop("__attr_searchable__"), + make_set_default( + "__attr_searchable__", ["name", "email", "verify_key", "role"] + ), + ] + + def default_role(role: ServiceRole) -> Callable: return make_set_default(key="role", value=role) +def validate_password(password: str) -> bool: + # Define the regex pattern for the password + pattern = re.compile(r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d).{8,}$") + + return bool(pattern.match(password)) + + def hash_password(context: TransformContext) -> TransformContext: if context.output is None: return context @@ -116,14 +185,7 @@ def check_pwd(password: str, hashed_password: str) -> bool: @serializable() class UserUpdate(PartialSyftObject): __canonical_name__ = "UserUpdate" - __version__ = SYFT_OBJECT_VERSION_3 - - @field_validator("role", mode="before") - @classmethod - def str_to_role(cls, v: Any) -> Any: - if isinstance(v, str) and hasattr(ServiceRole, v.upper()): - return getattr(ServiceRole, v.upper()) - return v + __version__ = SYFT_OBJECT_VERSION_1 email: EmailStr name: str @@ -139,7 +201,7 @@ def str_to_role(cls, v: Any) -> Any: @serializable() class UserCreate(SyftObject): __canonical_name__ = "UserCreate" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 email: EmailStr name: str @@ -158,7 +220,7 @@ class UserCreate(SyftObject): @serializable() class UserSearch(PartialSyftObject): __canonical_name__ = "UserSearch" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID email: EmailStr @@ -169,7 +231,7 @@ class UserSearch(PartialSyftObject): @serializable() class UserView(SyftObject): __canonical_name__ = "UserView" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 notifications_enabled: dict[NOTIFIERS, bool] = { NOTIFIERS.EMAIL: True, @@ -206,25 +268,18 @@ def _coll_repr_(self) -> dict[str, Any]: ), } - def _set_password(self, new_password: str) -> SyftError | SyftSuccess: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") + def _set_password(self, new_password: str) -> SyftSuccess: + client = self.get_api() + + client.services.user.update(uid=self.id, password=new_password) - api.services.user.update( - uid=self.id, user_update=UserUpdate(password=new_password) - ) return SyftSuccess( - message=f"Successfully updated password for " - f"user '{self.name}' with email '{self.email}'." + message=f"Successfully updated password for user '{self.email}'." ) def set_password( self, new_password: str | None = None, confirm: bool = True - ) -> SyftError | SyftSuccess: + ) -> SyftSuccess: """Set a new password interactively with confirmed password from user input""" # TODO: Add password validation for special characters if not new_password: @@ -233,33 +288,22 @@ def set_password( if confirm: confirmed_password: str = getpass("Please confirm your password: ") if confirmed_password != new_password: - return SyftError(message="Passwords do not match !") - return self._set_password(new_password) + raise UserPasswordMismatchError - def set_email(self, email: str) -> SyftSuccess | SyftError: - # validate email address - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") + return self._set_password(new_password) + def set_email(self, email: str) -> SyftSuccess: try: user_update = UserUpdate(email=email) except ValidationError: - return SyftError(message="{email} is not a valid email address.") + raise SyftException(public_message=f"Invalid email: '{email}'.") - result = api.services.user.update(uid=self.id, user_update=user_update) + api = self.get_api() - if isinstance(result, SyftError): - return result + # TODO: Shouldn't this trigger an update on self? + result = api.services.user.update(uid=self.id, email=user_update.email) - self.email = email - return SyftSuccess( - message=f"Successfully updated email for the user " - f"'{self.name}' to '{self.email}'." - ) + return SyftSuccess(message=f"Email updated to '{result.email}'.") def update( self, @@ -268,39 +312,32 @@ def update( website: type[Empty] | str = Empty, role: type[Empty] | str = Empty, mock_execution_permission: type[Empty] | bool = Empty, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: """Used to update name, institution, website of a user.""" - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") - user_update = UserUpdate( + api = self.get_api() + + result = api.services.user.update( + uid=self.id, name=name, institution=institution, website=website, role=role, mock_execution_permission=mock_execution_permission, ) - result = api.services.user.update(uid=self.id, user_update=user_update) - - if isinstance(result, SyftError): - return result for attr, val in result.to_dict(exclude_empty=True).items(): setattr(self, attr, val) return SyftSuccess(message="User details successfully updated.") - def allow_mock_execution(self, allow: bool = True) -> SyftSuccess | SyftError: + def allow_mock_execution(self, allow: bool = True) -> SyftSuccess: return self.update(mock_execution_permission=allow) @serializable() class UserViewPage(SyftObject): __canonical_name__ = "UserViewPage" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 users: list[UserView] total: int @@ -328,6 +365,24 @@ def user_create_to_user() -> list[Callable]: ] +@transform(UserV1, UserView) +def userv1_to_view_user() -> list[Callable]: + return [ + keep( + [ + "id", + "email", + "name", + "role", + "institution", + "website", + "mock_execution_permission", + "notifications_enabled", + ] + ) + ] + + @transform(User, UserView) def user_to_view_user() -> list[Callable]: return [ @@ -349,13 +404,18 @@ def user_to_view_user() -> list[Callable]: @serializable() class UserPrivateKey(SyftObject): __canonical_name__ = "UserPrivateKey" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 email: str signing_key: SyftSigningKey role: ServiceRole +@transform(UserV1, UserPrivateKey) +def userv1_to_user_verify() -> list[Callable]: + return [keep(["email", "signing_key", "id", "role"])] + + @transform(User, UserPrivateKey) def user_to_user_verify() -> list[Callable]: return [keep(["email", "signing_key", "id", "role"])] diff --git a/packages/syft/src/syft/service/user/user_roles.py b/packages/syft/src/syft/service/user/user_roles.py index 6ed7f4a9796..61cafd4b999 100644 --- a/packages/syft/src/syft/service/user/user_roles.py +++ b/packages/syft/src/syft/service/user/user_roles.py @@ -3,6 +3,9 @@ from typing import Any # third party +from pydantic import GetCoreSchemaHandler +from pydantic_core import CoreSchema +from pydantic_core import core_schema from typing_extensions import Self # relative @@ -19,10 +22,16 @@ class ServiceRoleCapability(Enum): CAN_MANAGE_INFRASTRUCTURE = 64 CAN_UPLOAD_DATA = 128 CAN_UPLOAD_LEGAL_DOCUMENT = 256 - CAN_EDIT_DOMAIN_SETTINGS = 512 + CAN_EDIT_DATASITE_SETTINGS = 512 -@serializable() +def _str_to_role(v: Any) -> Any: + if isinstance(v, str) and hasattr(ServiceRole, v_upper := v.upper()): + return getattr(ServiceRole, v_upper) + return v + + +@serializable(canonical_name="ServiceRole", version=1) class ServiceRole(Enum): NONE = 0 GUEST = 1 @@ -34,9 +43,7 @@ class ServiceRole(Enum): # @property @classmethod def roles_descending(cls) -> list[tuple[int, Self]]: - tuples = [] - for x in cls: - tuples.append((x.value, x)) + tuples = [(x.value, x) for x in cls] return sorted(tuples, reverse=True) @classmethod @@ -56,6 +63,19 @@ def roles_for_level(cls, level: int | Self) -> list[Self]: level_float = level_float % role_num return roles + @classmethod + def __get_pydantic_core_schema__( + cls, + _source_type: Any, + _handler: GetCoreSchemaHandler, + ) -> CoreSchema: + return core_schema.chain_schema( + [ + core_schema.no_info_plain_validator_function(_str_to_role), + core_schema.is_instance_schema(cls), + ] + ) + def capabilities(self) -> list[ServiceRoleCapability]: return ROLE_TO_CAPABILITIES[self] diff --git a/packages/syft/src/syft/service/user/user_service.py b/packages/syft/src/syft/service/user/user_service.py index 3e10325c858..47c41d1ab84 100644 --- a/packages/syft/src/syft/service/user/user_service.py +++ b/packages/syft/src/syft/service/user/user_service.py @@ -1,157 +1,379 @@ # stdlib +from datetime import datetime +from datetime import timedelta +import secrets +import string +from typing import TypeVar from typing import cast # relative -from ...abstract_node import AbstractNode -from ...abstract_node import NodeType -from ...exceptions.user import UserAlreadyExistsException -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey -from ...node.credentials import UserLoginCredentials +from ...abstract_server import ServerType from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject +from ...types.errors import CredentialsError +from ...types.errors import SyftException +from ...types.result import as_result from ...types.syft_metaclass import Empty from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from ..context import AuthedServiceContext -from ..context import NodeServiceContext +from ..context import ServerServiceContext from ..context import UnauthedServiceContext from ..notification.email_templates import OnBoardEmailTemplate +from ..notification.email_templates import PasswordResetTemplate from ..notification.notification_service import CreateNotification -from ..notification.notification_service import NotificationService from ..notifier.notifier_enums import NOTIFIERS -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES from ..service import TYPE_TO_SERVICE from ..service import service_method +from ..settings.settings import PwdTokenResetConfig from ..settings.settings_stash import SettingsStash +from .errors import UserEnclaveAdminLoginError +from .errors import UserError +from .errors import UserPermissionError +from .errors import UserUpdateError from .user import User from .user import UserCreate from .user import UserPrivateKey from .user import UserSearch from .user import UserUpdate from .user import UserView -from .user import UserViewPage from .user import check_pwd from .user import salt_and_hash_password +from .user import validate_password +from .user_roles import ADMIN_ROLE_LEVEL from .user_roles import DATA_OWNER_ROLE_LEVEL +from .user_roles import DATA_SCIENTIST_ROLE_LEVEL from .user_roles import GUEST_ROLE_LEVEL from .user_roles import ServiceRole from .user_roles import ServiceRoleCapability from .user_stash import UserStash +T = TypeVar("T") -@instrument -@serializable() + +def _paginate( + list_objs: list[T], page_size: int | None = 0, page_index: int | None = 0 +) -> list[T]: + # If chunk size is defined, then split list into evenly sized chunks + if page_size: + _list_objs = [ + list_objs[i : i + page_size] for i in range(0, len(list_objs), page_size) + ] + + # Return the proper slice using chunk_index + if page_index is not None: + _list_objs = _list_objs[page_index] # type: ignore + else: + _list_objs = _list_objs[0] # type: ignore + return _list_objs # type: ignore + + return list_objs + + +@serializable(canonical_name="UserService", version=1) class UserService(AbstractService): - store: DocumentStore stash: UserStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = UserStash(store=store) - @service_method(path="user.create", name="create") + @as_result(StashException) + def _add_user(self, credentials: SyftVerifyKey, user: User) -> User: + return self.stash.set( + credentials=credentials, + obj=user, + add_permissions=[ + ActionObjectPermission( + uid=user.id, permission=ActionPermission.ALL_READ + ), + ], + ).unwrap() + + def _check_if_email_exists(self, credentials: SyftVerifyKey, email: str) -> bool: + try: + self.stash.get_by_email(credentials=credentials, email=email).unwrap() + return True + except NotFoundException: + return False + + @service_method(path="user.create", name="create", autosplat="user_create") def create( self, context: AuthedServiceContext, user_create: UserCreate - ) -> UserView | SyftError: + ) -> UserView: """Create a new user""" user = user_create.to(User) - result = self.stash.get_by_email( + + user_exists = self._check_if_email_exists( credentials=context.credentials, email=user.email ) - if result.is_err(): - return SyftError(message=str(result.err())) - user_exists = result.ok() is not None + + # TODO: Ensure we don't leak out the existence of a user if user_exists: - return SyftError(message=f"User already exists with email: {user.email}") + raise SyftException(public_message=f"User {user.email} already exists") - result = self.stash.set( - credentials=context.credentials, - user=user, - add_permissions=[ - ActionObjectPermission( - uid=user.id, permission=ActionPermission.ALL_READ - ), - ], + new_user = self._add_user(context.credentials, user).unwrap() + return new_user.to(UserView) + + def forgot_password( + self, context: UnauthedServiceContext, email: str + ) -> SyftSuccess: + success_msg = ( + "If the email is valid, we sent a password " + + "reset token to your email or a password request to the admin." ) + root_key = self.root_verify_key + + root_context = AuthedServiceContext(server=context.server, credentials=root_key) + + result = self.stash.get_by_email(credentials=root_key, email=email) + + # Isn't a valid email if result.is_err(): - return SyftError(message=str(result.err())) + return SyftSuccess(message=success_msg) user = result.ok() - return user.to(UserView) - @service_method(path="user.view", name="view") - def view( - self, context: AuthedServiceContext, uid: UID - ) -> UserView | None | SyftError: - """Get user for given uid""" - result = self.stash.get_by_uid(credentials=context.credentials, uid=uid) - if result.is_ok(): - user = result.ok() - if user is None: - return SyftError(message=f"No user exists for given: {uid}") - return user.to(UserView) + if user is None: + return SyftSuccess(message=success_msg) + + user_role = self.get_role_for_credentials(user.verify_key).unwrap() + if user_role == ServiceRole.ADMIN: + raise SyftException( + public_message="You can't request password reset for an Admin user." + ) + + # Email is valid + # Notifications disabled + # We should just sent a notification to the admin/user about password reset + # Notifications Enabled + # Instead of changing the password here, we would change it in email template generation. + link = LinkedObject.with_context(user, context=root_context) + # Notifier is active + notifier = root_context.server.services.notifier.settings( + context=root_context + ).unwrap() + notification_is_enabled = notifier.active + # Email is enabled + email_is_enabled = notifier.email_enabled + # User Preferences allow email notification + user_allow_email_notifications = user.notifications_enabled[NOTIFIERS.EMAIL] + + # This checks if the user will safely receive the email reset. + not_receive_emails = ( + not notification_is_enabled + or not email_is_enabled + or not user_allow_email_notifications + ) + + # If notifier service is not enabled. + if not_receive_emails: + message = CreateNotification( + subject="You requested password reset.", + from_user_verify_key=root_key, + to_user_verify_key=user.verify_key, + linked_obj=link, + ) + result = root_context.server.services.notification.send( + context=root_context, notification=message + ) + message = CreateNotification( + subject="User requested password reset.", + from_user_verify_key=user.verify_key, + to_user_verify_key=root_key, + linked_obj=link, + ) - return SyftError(message=str(result.err())) + result = root_context.server.services.notification.send( + context=root_context, notification=message + ) + else: + # Email notification is Enabled + # Therefore, we can directly send a message to the + # user with its new password. + message = CreateNotification( + subject="You requested a password reset.", + from_user_verify_key=root_key, + to_user_verify_key=user.verify_key, + linked_obj=link, + notifier_types=[NOTIFIERS.EMAIL], + email_template=PasswordResetTemplate, + ) + result = root_context.server.services.notification.send( + context=root_context, notification=message + ) + + return SyftSuccess(message=success_msg) @service_method( - path="user.get_all", - name="get_all", - roles=DATA_OWNER_ROLE_LEVEL, + path="user.request_password_reset", + name="request_password_reset", + roles=ADMIN_ROLE_LEVEL, ) + def request_password_reset(self, context: AuthedServiceContext, uid: UID) -> str: + user = self.stash.get_by_uid(credentials=context.credentials, uid=uid).unwrap() + user_role = self.get_role_for_credentials(user.verify_key).unwrap() + + if user_role == ServiceRole.ADMIN: + raise SyftException( + public_message="You can't request password reset for an Admin user." + ) + + user.reset_token = self.generate_new_password_reset_token( + context.server.settings.pwd_token_config + ) + user.reset_token_date = datetime.now() + + self.stash.update( + credentials=context.credentials, obj=user, has_permission=True + ).unwrap() + + return user.reset_token + + def reset_password( + self, context: UnauthedServiceContext, token: str, new_password: str + ) -> SyftSuccess: + """Resets a certain user password using a temporary token.""" + root_key = self.root_verify_key + + root_context = AuthedServiceContext(server=context.server, credentials=root_key) + try: + user = self.stash.get_by_reset_token( + credentials=root_context.credentials, token=token + ).unwrap() + except NotFoundException: + raise SyftException( + public_message="Failed to reset user password. Token is invalid or expired." + ) + # + if user is None: + raise SyftException( + public_message="Failed to reset user password. Token is invalid or expired." + ) + now = datetime.now() + if user.reset_token_date is not None: + time_difference = now - user.reset_token_date + else: + raise SyftException( + public_message="Failed to reset user password. Reset Token Invalid!" + ) + + # If token expired + expiration_time = root_context.server.settings.pwd_token_config.token_exp_min + if time_difference > timedelta(seconds=expiration_time): + raise SyftException( + public_message="Failed to reset user password. Token is invalid or expired." + ) + + if not validate_password(new_password): + raise SyftException( + public_message=( + "Your new password must have at least 8 characters, an upper case " + "and lower case character; and at least one number." + ) + ) + + salt, hashed = salt_and_hash_password(new_password, 12) + user.hashed_password = hashed + user.salt = salt + + user.reset_token = None + user.reset_token_date = None + + self.stash.update( + credentials=root_context.credentials, obj=user, has_permission=True + ).unwrap() + + return SyftSuccess(message="User Password updated successfully.") + + def generate_new_password_reset_token( + self, token_config: PwdTokenResetConfig + ) -> str: + valid_characters = "" + if token_config.ascii: + valid_characters += string.ascii_letters + + if token_config.numbers: + valid_characters += string.digits + + generated_token = "".join( + secrets.choice(valid_characters) for _ in range(token_config.token_len) + ) + + return generated_token + + @service_method(path="user.view", name="view", roles=DATA_SCIENTIST_ROLE_LEVEL) + def view(self, context: AuthedServiceContext, uid: UID) -> UserView: + """Get user for given uid""" + user = self.stash.get_by_uid(credentials=context.credentials, uid=uid).unwrap() + return user.to(UserView) + + @service_method(path="user.get_all", name="get_all", roles=DATA_OWNER_ROLE_LEVEL) def get_all( self, context: AuthedServiceContext, + order_by: str | None = None, + sort_order: str | None = None, page_size: int | None = 0, page_index: int | None = 0, - ) -> list[UserView] | UserViewPage | UserView | SyftError: - if context.role in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN]: - result = self.stash.get_all(context.credentials, has_permission=True) - else: - result = self.stash.get_all(context.credentials) - if result.is_ok(): - results = [user.to(UserView) for user in result.ok()] - - # If chunk size is defined, then split list into evenly sized chunks - if page_size: - total = len(results) - results = [ - results[i : i + page_size] - for i in range(0, len(results), page_size) - ] - # Return the proper slice using chunk_index - if page_index is not None: - results = results[page_index] - results = UserViewPage(users=results, total=total) - - return results - - # 🟡 TODO: No user exists will happen when result.ok() is empty list - return SyftError(message="No users exists") + ) -> list[UserView]: + users = self.stash.get_all( + context.credentials, + order_by=order_by, + sort_order=sort_order, + ).unwrap() + users = [user.to(UserView) for user in users] + return _paginate(users, page_size, page_index) + + @service_method( + path="user.get_index", name="get_index", roles=DATA_OWNER_ROLE_LEVEL + ) + def get_index( + self, + context: AuthedServiceContext, + index: int, + ) -> UserView: + return ( + self.stash.get_index(credentials=context.credentials, index=index) + .unwrap() + .to(UserView) + ) + def signing_key_for_verify_key(self, verify_key: SyftVerifyKey) -> UserPrivateKey: + user = self.stash.get_by_verify_key( + credentials=self.stash.root_verify_key, verify_key=verify_key + ).unwrap() + + return user.to(UserPrivateKey) + + @as_result(SyftException) def get_role_for_credentials( self, credentials: SyftVerifyKey | SyftSigningKey - ) -> ServiceRole | None | SyftError: - # they could be different - if isinstance(credentials, SyftVerifyKey): - result = self.stash.get_by_verify_key( - credentials=credentials, verify_key=credentials - ) - else: - result = self.stash.get_by_signing_key( - credentials=credentials, signing_key=credentials - ) - if result.is_ok(): - # this seems weird that we get back None as Ok(None) - user = result.ok() - if user: - return user.role - return ServiceRole.GUEST + ) -> ServiceRole: + try: + # they could be different + # TODO: This fn is cryptic -- when does each situation occur? + if isinstance(credentials, SyftVerifyKey): + role = self.stash.get_role(credentials=credentials) + return role + elif isinstance(credentials, SyftSigningKey): + user = self.stash.get_by_signing_key( + credentials=credentials.verify_key, + signing_key=credentials, # type: ignore + ).unwrap() + else: + raise CredentialsError + except NotFoundException: + return ServiceRole.GUEST + + return cast(ServiceRole, user.role) @service_method(path="user.search", name="search", autosplat=["user_search"]) def search( @@ -160,100 +382,108 @@ def search( user_search: UserSearch, page_size: int | None = 0, page_index: int | None = 0, - ) -> UserViewPage | None | list[UserView] | SyftError: + ) -> list[UserView]: kwargs = user_search.to_dict(exclude_empty=True) - + kwargs.pop("created_date") + kwargs.pop("updated_date") + kwargs.pop("deleted_date") if len(kwargs) == 0: - valid_search_params = list(UserSearch.__fields__.keys()) - return SyftError( - message=f"Invalid Search parameters. \ - Allowed params: {valid_search_params}" - ) - result = self.stash.find_all(credentials=context.credentials, **kwargs) + raise SyftException(public_message="Invalid search parameters") - if result.is_err(): - return SyftError(message=str(result.err())) - users = result.ok() - results = [user.to(UserView) for user in users] if users is not None else [] - - # If page size is defined, then split list into evenly sized chunks - if page_size: - total = len(results) - results = [ - results[i : i + page_size] for i in range(0, len(results), page_size) - ] - - # Return the proper slice using page_index - if page_index is not None: - results = results[page_index] - results = UserViewPage(users=results, total=total) - - return results - - # @service_method(path="user.get_admin", name="get_admin", roles=GUEST_ROLE_LEVEL) - # def get_admin(self, context: AuthedServiceContext) -> UserView: - # result = self.stash.admin_user() - # if result.is_ok(): - # user = result.ok() - # if user: - # return user - # return SyftError(message=str(result.err())) + users = self.stash.get_all( + credentials=context.credentials, filters=kwargs + ).unwrap() + + users = [user.to(UserView) for user in users] if users is not None else [] + return _paginate(users, page_size, page_index) + + @as_result(StashException, NotFoundException) + def get_user_id_for_credentials(self, credentials: SyftVerifyKey) -> UID: + user = self.stash.get_by_verify_key( + credentials=credentials, verify_key=credentials + ).unwrap() + return cast(UID, user.id) @service_method( path="user.get_current_user", name="get_current_user", roles=GUEST_ROLE_LEVEL ) - def get_current_user(self, context: AuthedServiceContext) -> UserView | SyftError: - result = self.stash.get_by_verify_key( + def get_current_user(self, context: AuthedServiceContext) -> UserView: + user = self.stash.get_by_verify_key( credentials=context.credentials, verify_key=context.credentials - ) - if result.is_ok(): - # this seems weird that we get back None as Ok(None) - user = result.ok() - if user: - return user.to(UserView) - else: - SyftError(message="User not found!") - return SyftError(message=str(result.err())) + ).unwrap() + return user.to(UserView) + + @service_method( + path="user.get_by_verify_key", name="get_by_verify_key", roles=ADMIN_ROLE_LEVEL + ) + def get_by_verify_key_endpoint( + self, context: AuthedServiceContext, verify_key: SyftVerifyKey + ) -> UserView: + user = self.stash.get_by_verify_key( + credentials=context.credentials, verify_key=verify_key + ).unwrap() + return user.to(UserView) @service_method( path="user.update", name="update", roles=GUEST_ROLE_LEVEL, + autosplat="user_update", ) def update( self, context: AuthedServiceContext, uid: UID, user_update: UserUpdate - ) -> UserView | SyftError: + ) -> UserView: updates_role = user_update.role is not Empty # type: ignore[comparison-overlap] can_edit_roles = ServiceRoleCapability.CAN_EDIT_ROLES in context.capabilities() if updates_role and not can_edit_roles: - return SyftError(message=f"{context.role} is not allowed to edit roles") + raise UserPermissionError( + f"User {context.credentials} tried to update user {uid} with {user_update}." + ) + if (user_update.mock_execution_permission is not Empty) and not can_edit_roles: # type: ignore[comparison-overlap] - return SyftError( - message=f"{context.role} is not allowed to update permissions" + raise UserPermissionError( + f"User {context.credentials} with role {context.role} is not allowed" + " to update permissions." ) # Get user to be updated by its UID - result = self.stash.get_by_uid(credentials=context.credentials, uid=uid) + user = self.stash.get_by_uid(credentials=context.credentials, uid=uid).unwrap() - # check if the email already exists (with root's key) - if user_update.email is not Empty: - user_with_email_exists: bool = self.stash.email_exists( - email=user_update.email - ) - if user_with_email_exists: - raise UserAlreadyExistsException.raise_with_context(context=context) + immutable_fields = {"created_date", "updated_date", "deleted_date"} + updated_fields = user_update.to_dict( + exclude_none=True, exclude_empty=True + ).keys() - if result.is_err(): - error_msg = ( - f"Failed to find user with UID: {uid}. Error: {str(result.err())}" - ) - return SyftError(message=error_msg) + for field_name in immutable_fields: + if field_name in updated_fields: + raise SyftException( + public_message=f"You are not allowed to modify '{field_name}'." + ) - user = result.ok() + # important to prevent root admins from shooting themselves in the foot + if ( + user_update.role is not Empty # type: ignore + and user.verify_key == context.server.verify_key + ): + raise SyftException(public_message="Cannot update root role") - if user is None: - return SyftError(message=f"No user exists for given UID: {uid}") + if ( + user_update.verify_key is not Empty + and user.verify_key == context.server.verify_key + ): + raise SyftException(public_message="Cannot update root verify key") + + if user_update.name is not Empty and user_update.name.strip() == "": # type: ignore[comparison-overlap] + raise SyftException(public_message="Name can't be an empty string.") + + # check if the email already exists (with root's key) + if user_update.email is not Empty: + user_exists = self.stash.email_exists(email=user_update.email).unwrap() + if user_exists: + raise UserUpdateError( + public_message=f"User {user_update.email} already exists" + ) if updates_role: if context.role == ServiceRole.ADMIN: @@ -261,29 +491,31 @@ def update( pass elif ( context.role == ServiceRole.DATA_OWNER - and context.role > user.role - and context.role > user_update.role + and user.role is not None + and context.role.value > user.role.value + and context.role.value > user_update.role.value ): # as a data owner, only update lower roles to < data owner pass else: - return SyftError( - message=f"As a {context.role}, you are not allowed to edit {user.role} to {user_update.role}" + raise UserPermissionError( + f"User {context.credentials} tried to update user {uid}" + f" with {user_update}." ) edits_non_role_attrs = any( getattr(user_update, attr) is not Empty - for attr in user_update.dict() - if attr != "role" + for attr in user_update.to_dict() + if attr not in ["role", "created_date", "updated_date", "deleted_date"] ) - if ( edits_non_role_attrs and user.verify_key != context.credentials and ServiceRoleCapability.CAN_MANAGE_USERS not in context.capabilities() ): - return SyftError( - message=f"As a {context.role}, you are not allowed to edit users" + raise UserPermissionError( + f"User {context.credentials} tried to update user {uid}" + f" with {user_update}." ) # Fill User Update fields that will not be changed by replacing it @@ -296,175 +528,137 @@ def update( elif not name.startswith("__") and value is not None: setattr(user, name, value) - result = self.stash.update( - credentials=context.credentials, user=user, has_permission=True - ) - - if result.is_err(): - error_msg = ( - f"Failed to update user with UID: {uid}. Error: {str(result.err())}" - ) - return SyftError(message=error_msg) + user = self.stash.update( + credentials=context.credentials, obj=user, has_permission=True + ).unwrap() - user = result.ok() if user.role == ServiceRole.ADMIN: - settings_stash = SettingsStash(store=self.store) - settings = settings_stash.get_all(context.credentials) - if settings.is_ok() and len(settings.ok()) > 0: - settings_data = settings.ok()[0] + settings_stash = SettingsStash(store=self.stash.db) + settings = settings_stash.get_all( + context.credentials, limit=1, sort_order="desc" + ).unwrap() + + # TODO: Chance to refactor here in settings, as we're always doing get_att[0] + if len(settings) > 0: + settings_data = settings[0] settings_data.admin_email = user.email settings_stash.update( - credentials=context.credentials, settings=settings_data + credentials=context.credentials, obj=settings_data ) return user.to(UserView) - def get_target_object( - self, credentials: SyftVerifyKey, uid: UID - ) -> User | SyftError: - user_result = self.stash.get_by_uid(credentials=credentials, uid=uid) - if user_result.is_err(): - return SyftError(message=str(user_result.err())) - user = user_result.ok() - if user is None: - return SyftError(message=f"No user exists for given id: {uid}") - else: - return user - @service_method(path="user.delete", name="delete", roles=GUEST_ROLE_LEVEL) - def delete(self, context: AuthedServiceContext, uid: UID) -> bool | SyftError: - # third party - user = self.get_target_object(context.credentials, uid) - if isinstance(user, SyftError): - return user - - permission_error = SyftError( - message=str( - f"As a {context.role} you have no permission to delete user with {user.role} permission" + def delete(self, context: AuthedServiceContext, uid: UID) -> UID: + user_to_delete = self.stash.get_by_uid( + credentials=context.credentials, uid=uid + ).unwrap() + + # Cannot delete root user + if user_to_delete.verify_key == self.root_verify_key: + raise UserPermissionError( + private_message=f"User {context.credentials} attempted to delete root user." ) - ) - if context.role == ServiceRole.DATA_OWNER and user.role in [ - ServiceRole.GUEST, - ServiceRole.DATA_SCIENTIST, - ]: - pass - elif context.role == ServiceRole.ADMIN: - pass - else: - return permission_error - result = self.stash.delete_by_uid( - credentials=context.credentials, uid=uid, has_permission=True + # - Admins can delete any user + # - Data Owners can delete Data Scientists and Guests + has_delete_permissions = ( + context.role == ServiceRole.ADMIN + or context.role == ServiceRole.DATA_OWNER + and user_to_delete.role in [ServiceRole.GUEST, ServiceRole.DATA_SCIENTIST] ) - if result.is_err(): - return SyftError(message=str(result.err())) - # TODO: Remove notifications for the deleted user + if not has_delete_permissions: + raise UserPermissionError( + private_message=( + f"User {context.credentials} ({context.role}) tried to delete user " + f"{uid} ({user_to_delete.role})" + ) + ) - return result.ok() + # TODO: Remove notifications for the deleted user + return self.stash.delete_by_uid( + credentials=context.credentials, uid=uid + ).unwrap() - def exchange_credentials( - self, context: UnauthedServiceContext - ) -> UserLoginCredentials | SyftError: + def exchange_credentials(self, context: UnauthedServiceContext) -> SyftSuccess: """Verify user TODO: We might want to use a SyftObject instead """ - result = self.stash.get_by_email( - credentials=self.admin_verify_key(), email=context.login_credentials.email - ) - if result.is_ok(): - user = result.ok() - if user is not None and check_pwd( - context.login_credentials.password, - user.hashed_password, - ): - if ( - context.node - and context.node.node_type == NodeType.ENCLAVE - and user.role == ServiceRole.ADMIN - ): - return SyftError( - message="Admins are not allowed to login to Enclaves." - "\n Kindly register a new data scientist account by your_client.register." - ) - return user.to(UserPrivateKey) - - return SyftError( - message="No user exists with " - f"{context.login_credentials.email} and supplied password." - ) - return SyftError( - message="Failed to retrieve user with " - f"{context.login_credentials.email} with error: {result.err()}" - ) + if context.login_credentials is None: + raise SyftException(public_message="Invalid login credentials") - def admin_verify_key(self) -> SyftVerifyKey | SyftError: - try: - result = self.stash.admin_verify_key() - if result.is_ok(): - return result.ok() - else: - return SyftError(message="failed to get admin verify_key") + user = self.stash.get_by_email( + credentials=self.root_verify_key, email=context.login_credentials.email + ).unwrap() + + if check_pwd(context.login_credentials.password, user.hashed_password): + if ( + context.server + and context.server.server_type == ServerType.ENCLAVE + and user.role == ServiceRole.ADMIN + ): + # FIX: Replace with SyftException + raise SyftException( + public_message=UserEnclaveAdminLoginError.public_message + ) + else: + # FIX: Replace this below + raise SyftException(public_message=CredentialsError.public_message) + + return SyftSuccess(message="Login successful.", value=user.to(UserPrivateKey)) - except Exception as e: - return SyftError(message=str(e)) + @property + def root_verify_key(self) -> SyftVerifyKey: + return self.stash.root_verify_key def register( - self, context: NodeServiceContext, new_user: UserCreate - ) -> tuple[SyftSuccess, UserPrivateKey] | SyftError: + self, context: ServerServiceContext, new_user: UserCreate + ) -> SyftSuccess: """Register new user""" - context.node = cast(AbstractNode, context.node) + # this method handles errors in a slightly different way as it is directly called instead of + # going through Server.handle_message request_user_role = ( ServiceRole.GUEST if new_user.created_by is None - else self.get_role_for_credentials(new_user.created_by) + else self.get_role_for_credentials(new_user.created_by).unwrap() ) can_user_register = ( - context.node.settings.signup_enabled + context.server.settings.signup_enabled or request_user_role in DATA_OWNER_ROLE_LEVEL ) if not can_user_register: - return SyftError( - message=f"You don't have permission to create an account " - f"on the domain: {context.node.name}. Please contact the Domain Owner." + raise SyftException( + public_message="You have no permission to create an account. Please contact the Datasite owner." ) user = new_user.to(User) - result = self.stash.get_by_email(credentials=user.verify_key, email=user.email) - if result.is_err(): - return SyftError(message=str(result.err())) - user_exists = result.ok() is not None - if user_exists: - return SyftError(message=f"User already exists with email: {user.email}") - - result = self.stash.set( - credentials=user.verify_key, - user=user, - add_permissions=[ - ActionObjectPermission( - uid=user.id, permission=ActionPermission.ALL_READ - ), - ], + user_exists = self._check_if_email_exists( + credentials=user.verify_key, email=user.email ) - if result.is_err(): - return SyftError(message=str(result.err())) - user = result.ok() + if user_exists: + raise SyftException(public_message=f"User {user.email} already exists") + user = self._add_user(credentials=user.verify_key, user=user).unwrap( + public_message=f"Failed to create user {user.email}" + ) success_message = f"User '{user.name}' successfully registered!" # Notification Step - root_key = self.admin_verify_key() - root_context = AuthedServiceContext(node=context.node, credentials=root_key) + root_key = self.root_verify_key + root_context = AuthedServiceContext(server=context.server, credentials=root_key) link = None + if new_user.created_by: link = LinkedObject.with_context(user, context=root_context) + message = CreateNotification( subject=success_message, from_user_verify_key=root_key, @@ -473,84 +667,65 @@ def register( notifier_types=[NOTIFIERS.EMAIL], email_template=OnBoardEmailTemplate, ) - - method = context.node.get_service_method(NotificationService.send) - result = method(context=root_context, notification=message) + context.server.services.notification.send( + context=root_context, notification=message + ) if request_user_role in DATA_OWNER_ROLE_LEVEL: success_message += " To see users, run `[your_client].users`" - # TODO: Add a notifications for the new user - - msg = SyftSuccess(message=success_message) - return (msg, user.to(UserPrivateKey)) + return SyftSuccess(message=success_message, value=user.to(UserPrivateKey)) - def user_verify_key(self, email: str) -> SyftVerifyKey | SyftError: + @as_result(StashException) + def user_verify_key(self, email: str) -> SyftVerifyKey: # we are bypassing permissions here, so dont use to return a result directly to the user - credentials = self.admin_verify_key() - result = self.stash.get_by_email(credentials=credentials, email=email) - if result.ok() is not None: - return result.ok().verify_key - return SyftError(message=f"No user with email: {email}") - - def get_by_verify_key(self, verify_key: SyftVerifyKey) -> UserView | SyftError: + credentials = self.root_verify_key + user = self.stash.get_by_email(credentials=credentials, email=email).unwrap() + if user.verify_key is None: + raise UserError(f"User {email} has no verify key") + return user.verify_key + + @as_result(StashException) + def get_by_verify_key(self, verify_key: SyftVerifyKey) -> UserView: # we are bypassing permissions here, so dont use to return a result directly to the user - credentials = self.admin_verify_key() - result = self.stash.get_by_verify_key( + credentials = self.root_verify_key + user = self.stash.get_by_verify_key( credentials=credentials, verify_key=verify_key - ) - if result.is_ok(): - return result.ok() - return SyftError(message=f"No User with verify_key: {verify_key}") - - # TODO: This exposed service is only for the development phase. - # enable/disable notifications will be called from Notifier Service + ).unwrap() + return user.to(UserView) + @as_result(StashException) def _set_notification_status( self, notifier_type: NOTIFIERS, new_status: bool, verify_key: SyftVerifyKey, - ) -> SyftError | None: - result = self.stash.get_by_verify_key( + ) -> None: + user = self.stash.get_by_verify_key( credentials=verify_key, verify_key=verify_key - ) - if result.is_ok(): - # this seems weird that we get back None as Ok(None) - user = result.ok() - else: - return SyftError(message=str(result.err())) - + ).unwrap() user.notifications_enabled[notifier_type] = new_status + self.stash.update(credentials=user.verify_key, obj=user).unwrap() - result = self.stash.update( - credentials=user.verify_key, - user=user, - ) - if result.is_err(): - return SyftError(message=str(result.err())) - else: - return None - + @as_result(SyftException) def enable_notifications( self, context: AuthedServiceContext, notifier_type: NOTIFIERS - ) -> SyftSuccess | SyftError: - result = self._set_notification_status(notifier_type, True, context.credentials) - if result is not None: - return result - else: - return SyftSuccess(message="Notifications enabled successfully!") + ) -> SyftSuccess: + self._set_notification_status( + notifier_type=notifier_type, new_status=True, verify_key=context.credentials + ).unwrap() + return SyftSuccess(message="Notifications enabled successfully!") def disable_notifications( self, context: AuthedServiceContext, notifier_type: NOTIFIERS - ) -> SyftSuccess | SyftError: - result = self._set_notification_status( - notifier_type, False, context.credentials - ) - if result is not None: - return result - else: - return SyftSuccess(message="Notifications disabled successfully!") + ) -> SyftSuccess: + self._set_notification_status( + notifier_type=notifier_type, + new_status=False, + verify_key=context.credentials, + ).unwrap() + + return SyftSuccess(message="Notifications disabled successfully!") TYPE_TO_SERVICE[User] = UserService diff --git a/packages/syft/src/syft/service/user/user_stash.py b/packages/syft/src/syft/service/user/user_stash.py index 3bc8ed2dcfe..92fb87d37b3 100644 --- a/packages/syft/src/syft/service/user/user_stash.py +++ b/packages/syft/src/syft/service/user/user_stash.py @@ -1,132 +1,91 @@ -# stdlib - -# third party -from result import Ok -from result import Result - # relative -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ...store.document_store import UIDPartitionKey -from ...types.uid import UID -from ...util.telemetry import instrument -from ..action.action_permissions import ActionObjectPermission -from ..response import SyftSuccess +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from .user import User from .user_roles import ServiceRole -# 🟡 TODO 27: it would be nice if these could be defined closer to the User -EmailPartitionKey = PartitionKey(key="email", type_=str) -RolePartitionKey = PartitionKey(key="role", type_=ServiceRole) -SigningKeyPartitionKey = PartitionKey(key="signing_key", type_=SyftSigningKey) -VerifyKeyPartitionKey = PartitionKey(key="verify_key", type_=SyftVerifyKey) - - -@instrument -@serializable() -class UserStash(BaseStash): - object_type = User - settings: PartitionSettings = PartitionSettings( - name=User.__canonical_name__, - object_type=User, - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - def set( - self, - credentials: SyftVerifyKey, - user: User, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[User, str]: - res = self.check_type(user, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().set( - credentials=credentials, - obj=res.ok(), - add_permissions=add_permissions, - ignore_duplicates=ignore_duplicates, - add_storage_permission=add_storage_permission, - ) - - def admin_verify_key(self) -> Result[SyftVerifyKey | None, str]: - return Ok(self.partition.root_verify_key) - - def admin_user(self) -> Result[User | None, str]: +@serializable(canonical_name="UserStashSQL", version=1) +class UserStash(ObjectStash[User]): + @as_result(StashException, NotFoundException) + def admin_user(self) -> User: + # TODO: This returns only one user, the first user with the role ADMIN + admin_credentials = self.root_verify_key return self.get_by_role( - credentials=self.admin_verify_key().ok(), role=ServiceRole.ADMIN - ) + credentials=admin_credentials, role=ServiceRole.ADMIN + ).unwrap() - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[User | None, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - return self.query_one(credentials=credentials, qks=qks) + @as_result(StashException, NotFoundException) + def get_by_reset_token(self, credentials: SyftVerifyKey, token: str) -> User: + return self.get_one( + credentials=credentials, + filters={"reset_token": token}, + ).unwrap() - def get_by_email( - self, credentials: SyftVerifyKey, email: str - ) -> Result[User | None, str]: - qks = QueryKeys(qks=[EmailPartitionKey.with_obj(email)]) - return self.query_one(credentials=credentials, qks=qks) + @as_result(StashException, NotFoundException) + def get_by_email(self, credentials: SyftVerifyKey, email: str) -> User: + return self.get_one( + credentials=credentials, + filters={"email": email}, + ).unwrap() + @as_result(StashException) def email_exists(self, email: str) -> bool: - res = self.get_by_email(credentials=self.admin_verify_key().ok(), email=email) - if res.ok() is None: - return False - else: + try: + self.get_by_email(credentials=self.root_verify_key, email=email).unwrap() return True + except NotFoundException: + return False - def get_by_role( - self, credentials: SyftVerifyKey, role: ServiceRole - ) -> Result[User | None, str]: - qks = QueryKeys(qks=[RolePartitionKey.with_obj(role)]) - return self.query_one(credentials=credentials, qks=qks) + @as_result(StashException) + def verify_key_exists(self, verify_key: SyftVerifyKey) -> bool: + try: + self.get_by_verify_key( + credentials=self.root_verify_key, verify_key=verify_key + ).unwrap() + return True + except NotFoundException: + return False + @as_result(StashException, NotFoundException) + def get_by_role(self, credentials: SyftVerifyKey, role: ServiceRole) -> User: + try: + return self.get_one( + credentials=credentials, + filters={"role": role}, + ).unwrap() + except NotFoundException as exc: + private_msg = f"User with role {role} not found" + raise NotFoundException.from_exception(exc, private_message=private_msg) + + @as_result(StashException, NotFoundException) def get_by_signing_key( - self, credentials: SyftVerifyKey, signing_key: SyftSigningKey - ) -> Result[User | None, str]: - if isinstance(signing_key, str): - signing_key = SyftSigningKey.from_string(signing_key) - qks = QueryKeys(qks=[SigningKeyPartitionKey.with_obj(signing_key)]) - return self.query_one(credentials=credentials, qks=qks) - + self, credentials: SyftVerifyKey, signing_key: SyftSigningKey | str + ) -> User: + try: + return self.get_one( + credentials=credentials, + filters={"signing_key": signing_key}, + ).unwrap() + except NotFoundException as exc: + private_msg = f"User with signing key {signing_key} not found" + raise NotFoundException.from_exception(exc, private_message=private_msg) + + @as_result(StashException, NotFoundException) def get_by_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Result[User | None, str]: - if isinstance(verify_key, str): - verify_key = SyftVerifyKey.from_string(verify_key) - qks = QueryKeys(qks=[VerifyKeyPartitionKey.with_obj(verify_key)]) - return self.query_one(credentials=credentials, qks=qks) - - def delete_by_uid( - self, credentials: SyftVerifyKey, uid: UID, has_permission: bool = False - ) -> Result[SyftSuccess, str]: - qk = UIDPartitionKey.with_obj(uid) - result = super().delete( - credentials=credentials, qk=qk, has_permission=has_permission - ) - if result.is_ok(): - return Ok(SyftSuccess(message=f"ID: {uid} deleted")) - return result - - def update( - self, credentials: SyftVerifyKey, user: User, has_permission: bool = False - ) -> Result[User, str]: - res = self.check_type(user, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().update( - credentials=credentials, obj=res.ok(), has_permission=has_permission - ) + ) -> User: + try: + return self.get_one( + credentials=credentials, + filters={"verify_key": verify_key}, + ).unwrap() + + except NotFoundException as exc: + private_msg = f"User with verify key {verify_key} not found" + raise NotFoundException.from_exception(exc, private_message=private_msg) diff --git a/packages/syft/src/syft/service/user/utils.py b/packages/syft/src/syft/service/user/utils.py new file mode 100644 index 00000000000..191fc4fe181 --- /dev/null +++ b/packages/syft/src/syft/service/user/utils.py @@ -0,0 +1,63 @@ +# stdlib +import logging + +# relative +from ...abstract_server import AbstractServer +from .user import User +from .user import UserCreate +from .user_roles import ServiceRole + +logger = logging.getLogger(__name__) + + +def create_root_admin_if_not_exists( + name: str, + email: str, + password: str, + server: AbstractServer, +) -> User | None: + """ + If no root admin exists: + - all exists checks on the user stash will fail, as we cannot get the role for the admin to check if it exists + - result: a new admin is always created + + If a root admin exists with a different email: + - cause: DEFAULT_USER_EMAIL env variable is set to a different email than the root admin in the db + - verify_key_exists will return True + - result: no new admin is created, as the server already has a root admin + """ + user_stash = server.services.user.stash + + email_exists = user_stash.email_exists(email=email).unwrap() + if email_exists: + logger.debug("Admin not created, a user with this email already exists") + return None + + verify_key_exists = user_stash.verify_key_exists(server.verify_key).unwrap() + if verify_key_exists: + logger.debug("Admin not created, this server already has a root admin") + return None + + create_user = UserCreate( + name=name, + email=email, + password=password, + password_verify=password, + role=ServiceRole.ADMIN, + ) + + # New User Initialization + # 🟡 TODO: change later but for now this gives the main user super user automatically + user = create_user.to(User) + user.signing_key = server.signing_key + user.verify_key = server.verify_key + + new_user = user_stash.set( + credentials=server.verify_key, + obj=user, + ignore_duplicates=False, + ).unwrap() + + logger.debug(f"Created admin {new_user.email}") + + return new_user diff --git a/packages/syft/src/syft/service/veilid/__init__.py b/packages/syft/src/syft/service/veilid/__init__.py deleted file mode 100644 index 93f60cd6213..00000000000 --- a/packages/syft/src/syft/service/veilid/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# stdlib -import os -from typing import Any - -# relative -from ...util.util import str_to_bool - -VEILID_ENABLED: bool = str_to_bool(os.environ.get("VEILID_ENABLED", "False")) - - -# Any because circular import -def VeilidServiceProvider(*args: Any, **kwargs: Any) -> Any | None: - if VEILID_ENABLED: - # relative - from .veilid_service import VeilidService - - return VeilidService(*args, **kwargs) - return None diff --git a/packages/syft/src/syft/service/veilid/veilid_endpoints.py b/packages/syft/src/syft/service/veilid/veilid_endpoints.py deleted file mode 100644 index 0e37226dd27..00000000000 --- a/packages/syft/src/syft/service/veilid/veilid_endpoints.py +++ /dev/null @@ -1,8 +0,0 @@ -VEILID_SERVICE_URL = "http://veilid:80" -# Service name of our traefik service -# TODO: Remove this once when we remove reverse proxy in Veilid Connection -VEILID_SYFT_PROXY_URL = "http://proxy:80" -HEALTHCHECK_ENDPOINT = "/healthcheck" -GEN_VLD_KEY_ENDPOINT = "/generate_vld_key" -RET_VLD_KEY_ENDPOINT = "/retrieve_vld_key" -VEILID_PROXY_PATH = "/proxy" diff --git a/packages/syft/src/syft/service/veilid/veilid_service.py b/packages/syft/src/syft/service/veilid/veilid_service.py deleted file mode 100644 index 3fbcd064291..00000000000 --- a/packages/syft/src/syft/service/veilid/veilid_service.py +++ /dev/null @@ -1,92 +0,0 @@ -# stdlib -from collections.abc import Callable - -# third party -import requests - -# relative -from ...serde.serializable import serializable -from ...store.document_store import DocumentStore -from ...util.telemetry import instrument -from ..context import AuthedServiceContext -from ..network.routes import VeilidNodeRoute -from ..response import SyftError -from ..response import SyftSuccess -from ..service import AbstractService -from ..service import service_method -from ..user.user_roles import DATA_OWNER_ROLE_LEVEL -from .veilid_endpoints import GEN_VLD_KEY_ENDPOINT -from .veilid_endpoints import HEALTHCHECK_ENDPOINT -from .veilid_endpoints import RET_VLD_KEY_ENDPOINT -from .veilid_endpoints import VEILID_SERVICE_URL - - -@instrument -@serializable() -class VeilidService(AbstractService): - store: DocumentStore - - def __init__(self, store: DocumentStore) -> None: - self.store = store - - def perform_request( - self, method: Callable, endpoint: str, raw: bool = False - ) -> SyftSuccess | SyftError | str: - try: - response = method(f"{VEILID_SERVICE_URL}{endpoint}") - response.raise_for_status() - message = response.json().get("message") - return message if raw else SyftSuccess(message=message) - except requests.HTTPError: - return SyftError(message=f"{response.json()['detail']}") - except requests.RequestException as e: - return SyftError(message=f"Failed to perform request. {e}") - - def is_veilid_service_healthy(self) -> bool: - res = self.perform_request( - method=requests.get, endpoint=HEALTHCHECK_ENDPOINT, raw=True - ) - return res == "OK" - - @service_method( - path="veilid.generate_vld_key", - name="generate_vld_key", - roles=DATA_OWNER_ROLE_LEVEL, - ) - def generate_vld_key(self, context: AuthedServiceContext) -> str | SyftError: - if not self.is_veilid_service_healthy(): - return SyftError( - message="Veilid service is not healthy. Please try again later." - ) - return self.perform_request( - method=requests.post, - endpoint=GEN_VLD_KEY_ENDPOINT, - ) - - @service_method( - path="veilid.retrieve_vld_key", - name="retrieve_vld_key", - roles=DATA_OWNER_ROLE_LEVEL, - ) - def retrieve_vld_key(self, context: AuthedServiceContext) -> str | SyftError: - if not self.is_veilid_service_healthy(): - return SyftError( - message="Veilid service is not healthy. Please try again later." - ) - return self.perform_request( - method=requests.get, - endpoint=RET_VLD_KEY_ENDPOINT, - raw=True, - ) - - @service_method( - path="veilid.get_veilid_route", - name="get_veilid_route", - ) - def get_veilid_route( - self, context: AuthedServiceContext - ) -> VeilidNodeRoute | SyftError: - vld_key = self.retrieve_vld_key(context) - if isinstance(vld_key, SyftError): - return vld_key - return VeilidNodeRoute(vld_key=vld_key) diff --git a/packages/syft/src/syft/service/warnings.py b/packages/syft/src/syft/service/warnings.py index 36d8cf8a651..c028ca81f68 100644 --- a/packages/syft/src/syft/service/warnings.py +++ b/packages/syft/src/syft/service/warnings.py @@ -8,11 +8,11 @@ from typing_extensions import Self # relative -from ..abstract_node import AbstractNode -from ..abstract_node import NodeSideType -from ..abstract_node import NodeType -from ..node.credentials import SyftCredentials +from ..abstract_server import AbstractServer +from ..abstract_server import ServerSideType +from ..abstract_server import ServerType from ..serde.serializable import serializable +from ..server.credentials import SyftCredentials from ..types.base import SyftBaseModel from ..types.syft_object import Context from .user.user_roles import ServiceRole @@ -21,12 +21,12 @@ class WarningContext( Context, ): - node: AbstractNode | None = None + server: AbstractServer | None = None credentials: SyftCredentials | None = None role: ServiceRole -@serializable() +@serializable(canonical_name="APIEndpointWarning", version=1) class APIEndpointWarning(SyftBaseModel): confirmation: bool = False message: str | None = None @@ -68,33 +68,33 @@ def show(self) -> bool: return True -@serializable() +@serializable(canonical_name="CRUDWarning", version=1) class CRUDWarning(APIEndpointWarning): def message_from(self, context: WarningContext | None = None) -> Self: message = None confirmation = self.confirmation if context is not None: - node = context.node - if node is not None: - node_side_type = cast(NodeSideType, node.node_side_type) - node_type = node.node_type + server = context.server + if server is not None: + server_side_type = cast(ServerSideType, server.server_side_type) + server_type = server.server_type _msg = ( "which could host datasets with private information." - if node_side_type.value == NodeSideType.HIGH_SIDE.value + if server_side_type.value == ServerSideType.HIGH_SIDE.value else "which only hosts mock or synthetic data." ) - if node_type is not None: + if server_type is not None: message = ( "You're performing an operation on " - f"{node_side_type.value} side {node_type.value}, {_msg}" + f"{server_side_type.value} side {server_type.value}, {_msg}" ) - confirmation = node_side_type.value == NodeSideType.HIGH_SIDE.value + confirmation = server_side_type.value == ServerSideType.HIGH_SIDE.value return CRUDWarning(confirmation=confirmation, message=message) -@serializable() +@serializable(canonical_name="CRUDReminder", version=1) class CRUDReminder(CRUDWarning): confirmation: bool = False @@ -102,59 +102,59 @@ def message_from(self, context: WarningContext | None = None) -> Self: message = None confirmation = self.confirmation if context is not None: - node = context.node - if node is not None: - node_side_type = cast(NodeSideType, node.node_side_type) - node_type = node.node_type + server = context.server + if server is not None: + server_side_type = cast(ServerSideType, server.server_side_type) + server_type = server.server_type _msg = ( "which could host datasets with private information." - if node_side_type.value == NodeSideType.HIGH_SIDE.value + if server_side_type.value == ServerSideType.HIGH_SIDE.value else "which only hosts mock or synthetic data." ) - if node_type is not None: + if server_type is not None: message = ( "You're performing an operation on " - f"{node_side_type.value} side {node_type.value}, {_msg}" + f"{server_side_type.value} side {server_type.value}, {_msg}" ) return CRUDReminder(confirmation=confirmation, message=message) -@serializable() +@serializable(canonical_name="LowSideCRUDWarning", version=1) class LowSideCRUDWarning(APIEndpointWarning): def message_from(self, context: WarningContext | None = None) -> Self: confirmation = self.confirmation message = None if context is not None: - node = context.node - if node is not None: - node_side_type = cast(NodeSideType, node.node_side_type) - node_type = cast(NodeType, node.node_type) - if node_side_type.value == NodeSideType.LOW_SIDE.value: + server = context.server + if server is not None: + server_side_type = cast(ServerSideType, server.server_side_type) + server_type = cast(ServerType, server.server_type) + if server_side_type.value == ServerSideType.LOW_SIDE.value: message = ( "You're performing an operation on " - f"{node_side_type.value} side {node_type.value} " + f"{server_side_type.value} side {server_type.value} " "which only hosts mock or synthetic data." ) return LowSideCRUDWarning(confirmation=confirmation, message=message) -@serializable() +@serializable(canonical_name="HighSideCRUDWarning", version=1) class HighSideCRUDWarning(APIEndpointWarning): def message_from(self, context: WarningContext | None = None) -> Self: confirmation = self.confirmation message = None if context is not None: - node = context.node - if node is not None: - node_side_type = cast(NodeSideType, node.node_side_type) - node_type = cast(NodeType, node.node_type) - if node_side_type.value == NodeSideType.HIGH_SIDE.value: + server = context.server + if server is not None: + server_side_type = cast(ServerSideType, server.server_side_type) + server_type = cast(ServerType, server.server_type) + if server_side_type.value == ServerSideType.HIGH_SIDE.value: message = ( "You're performing an operation on " - f"{node_side_type.value} side {node_type.value} " + f"{server_side_type.value} side {server_type.value} " "which could host datasets with private information." ) diff --git a/packages/syft/src/syft/service/worker/image_identifier.py b/packages/syft/src/syft/service/worker/image_identifier.py index 38025752710..85ea2414244 100644 --- a/packages/syft/src/syft/service/worker/image_identifier.py +++ b/packages/syft/src/syft/service/worker/image_identifier.py @@ -10,7 +10,7 @@ from .image_registry import SyftImageRegistry -@serializable() +@serializable(canonical_name="SyftWorkerImageIdentifier", version=1) class SyftWorkerImageIdentifier(SyftBaseModel): """ Class to identify syft worker images. diff --git a/packages/syft/src/syft/service/worker/image_registry.py b/packages/syft/src/syft/service/worker/image_registry.py index e96af35e372..2f7d52fb2c8 100644 --- a/packages/syft/src/syft/service/worker/image_registry.py +++ b/packages/syft/src/syft/service/worker/image_registry.py @@ -1,6 +1,5 @@ # stdlib import re -from urllib.parse import urlparse # third party from pydantic import field_validator @@ -8,24 +7,27 @@ # relative from ...serde.serializable import serializable -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject -from ...types.uid import UID -REGX_DOMAIN = re.compile(r"^(localhost|([a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*))(\:\d{1,5})?$") +# Checks for +# - localhost:[port] +# - (sub.)*.name.tld +# - (sub.)*.name.tld:[port] +REGX_DATASITE = re.compile( + r"^(localhost|([a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*))(\:\d{1,5})?" +) @serializable() class SyftImageRegistry(SyftObject): __canonical_name__ = "SyftImageRegistry" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __attr_searchable__ = ["url"] __attr_unique__ = ["url"] - __repr_attrs__ = ["url"] - id: UID url: str @field_validator("url") @@ -34,20 +36,14 @@ def validate_url(cls, val: str) -> str: if not val: raise ValueError("Invalid Registry URL. Must not be empty") - if not bool(re.match(REGX_DOMAIN, val)): - raise ValueError("Invalid Registry URL. Must be a valid domain.") + if not bool(re.match(REGX_DATASITE, val)): + raise ValueError("Invalid Registry URL. Must be a valid datasite.") return val @classmethod def from_url(cls, full_str: str) -> Self: - # this is only for urlparse - if "://" not in full_str: - full_str = f"http://{full_str}" - parsed = urlparse(full_str) - - # netloc includes the host & port, so local dev should work as expected - return cls(id=UID(), url=parsed.netloc) + return cls(url=full_str) def __hash__(self) -> int: return hash(self.url + str(self.tls_enabled)) diff --git a/packages/syft/src/syft/service/worker/image_registry_service.py b/packages/syft/src/syft/service/worker/image_registry_service.py index 00963f629bb..f87e9818027 100644 --- a/packages/syft/src/syft/service/worker/image_registry_service.py +++ b/packages/syft/src/syft/service/worker/image_registry_service.py @@ -2,10 +2,10 @@ # relative from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...types.errors import SyftException from ...types.uid import UID from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -18,37 +18,33 @@ __all__ = ["SyftImageRegistryService"] -@serializable() +@serializable(canonical_name="SyftImageRegistryService", version=1) class SyftImageRegistryService(AbstractService): - store: DocumentStore stash: SyftImageRegistryStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = SyftImageRegistryStash(store=store) @service_method( path="image_registry.add", name="add", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) def add( self, context: AuthedServiceContext, url: str, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: try: registry = SyftImageRegistry.from_url(url) except Exception as e: - return SyftError(message=f"Failed to create registry. {e}") - - res = self.stash.set(context.credentials, registry) - - if res.is_err(): - return SyftError(message=f"Failed to create registry. {res.err()}") + raise SyftException(public_message=f"Failed to create registry. {e}") + stored_registry = self.stash.set(context.credentials, registry).unwrap() return SyftSuccess( - message=f"Image Registry ID: {registry.id} created successfully" + message=f"Image Registry ID: {registry.id} created successfully", + value=stored_registry, ) @service_method( @@ -61,28 +57,24 @@ def delete( context: AuthedServiceContext, uid: UID | None = None, url: str | None = None, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: # TODO - we need to make sure that there are no workers running an image bound to this registry # if url is provided, get uid from url if url: - res = self.stash.delete_by_url(context.credentials, url) - if res.is_err(): - return SyftError(message=res.err()) + self.stash.delete_by_url(context.credentials, url).unwrap() return SyftSuccess( message=f"Image Registry URL: {url} successfully deleted." ) # if uid is provided, delete by uid if uid: - res = self.stash.delete_by_uid(context.credentials, uid) - if res.is_err(): - return SyftError(message=res.err()) + self.stash.delete_by_uid(context.credentials, uid).unwrap() return SyftSuccess( message=f"Image Registry ID: {uid} successfully deleted." ) else: - return SyftError(message="Either UID or URL must be provided.") + raise SyftException(message="Either UID or URL must be provided.") @service_method( path="image_registry.get_all", @@ -92,24 +84,16 @@ def delete( def get_all( self, context: AuthedServiceContext, - ) -> list[SyftImageRegistry] | SyftError: - result = self.stash.get_all(context.credentials) - if result.is_err(): - return SyftError(message=result.err()) - return result + ) -> list[SyftImageRegistry]: + return self.stash.get_all(context.credentials).unwrap() @service_method( path="image_registry.get_by_id", name="get_by_id", roles=DATA_OWNER_ROLE_LEVEL, ) - def get_by_id( - self, context: AuthedServiceContext, uid: UID - ) -> SyftImageRegistry | SyftError: - result = self.stash.get_by_uid(context.credentials, uid) - if result.is_err(): - return SyftError(message=result.err()) - return result + def get_by_id(self, context: AuthedServiceContext, uid: UID) -> SyftImageRegistry: + return self.stash.get_by_uid(context.credentials, uid).unwrap() TYPE_TO_SERVICE[SyftImageRegistry] = SyftImageRegistryService diff --git a/packages/syft/src/syft/service/worker/image_registry_stash.py b/packages/syft/src/syft/service/worker/image_registry_stash.py index b60aa7374e2..cfb71b9848b 100644 --- a/packages/syft/src/syft/service/worker/image_registry_stash.py +++ b/packages/syft/src/syft/service/worker/image_registry_stash.py @@ -1,50 +1,34 @@ # stdlib - -# third party -from result import Ok -from result import Result +from typing import Literal # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys -from ..response import SyftSuccess +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result from .image_registry import SyftImageRegistry -__all__ = ["SyftImageRegistryStash"] - - -URLPartitionKey = PartitionKey(key="url", type_=str) - - -@serializable() -class SyftImageRegistryStash(BaseUIDStoreStash): - object_type = SyftImageRegistry - settings: PartitionSettings = PartitionSettings( - name=SyftImageRegistry.__canonical_name__, - object_type=SyftImageRegistry, - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="SyftImageRegistrySQLStash", version=1) +class SyftImageRegistryStash(ObjectStash[SyftImageRegistry]): + @as_result(SyftException, StashException, NotFoundException) def get_by_url( self, credentials: SyftVerifyKey, url: str, - ) -> Result[SyftImageRegistry | None, str]: - qks = QueryKeys(qks=[URLPartitionKey.with_obj(url)]) - return self.query_one(credentials=credentials, qks=qks) - - def delete_by_url( - self, credentials: SyftVerifyKey, url: str - ) -> Result[SyftSuccess, str]: - qk = URLPartitionKey.with_obj(url) - result = super().delete(credentials=credentials, qk=qk) - if result.is_ok(): - return Ok(SyftSuccess(message=f"URL: {url} deleted")) - return result + ) -> SyftImageRegistry: + return self.get_one( + credentials=credentials, + filters={"url": url}, + ).unwrap() + + @as_result(SyftException, StashException) + def delete_by_url(self, credentials: SyftVerifyKey, url: str) -> Literal[True]: + item = self.get_by_url(credentials=credentials, url=url).unwrap() + self.delete_by_uid(credentials=credentials, uid=item.id).unwrap() + + # TODO standardize delete return type + return True diff --git a/packages/syft/src/syft/service/worker/utils.py b/packages/syft/src/syft/service/worker/utils.py index 93ab483e9c8..d5967364593 100644 --- a/packages/syft/src/syft/service/worker/utils.py +++ b/packages/syft/src/syft/service/worker/utils.py @@ -1,5 +1,6 @@ # stdlib import contextlib +import logging import os from pathlib import Path import socket @@ -13,19 +14,19 @@ from kr8s.objects import Pod # relative -from ...abstract_node import AbstractNode +from ...abstract_server import AbstractServer from ...custom_worker.builder import CustomWorkerBuilder from ...custom_worker.builder_types import ImageBuildResult from ...custom_worker.builder_types import ImagePushResult -from ...custom_worker.config import DockerWorkerConfig from ...custom_worker.config import PrebuiltWorkerConfig from ...custom_worker.k8s import KubeUtils from ...custom_worker.k8s import PodStatus from ...custom_worker.runner_k8s import KubernetesRunner -from ...node.credentials import SyftVerifyKey +from ...server.credentials import SyftVerifyKey +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID from ...util.util import get_queue_address -from ..response import SyftError from .image_identifier import SyftWorkerImageIdentifier from .worker_image import SyftWorkerImage from .worker_image_stash import SyftWorkerImageStash @@ -35,9 +36,11 @@ from .worker_pool import WorkerOrchestrationType from .worker_pool import WorkerStatus +logger = logging.getLogger(__name__) + DEFAULT_WORKER_IMAGE_TAG = "openmined/default-worker-image-cpu:0.0.1" DEFAULT_WORKER_POOL_NAME = "default-pool" -K8S_NODE_CREDS_NAME = "node-creds" +K8S_SERVER_CREDS_NAME = "server-creds" def backend_container_name() -> str: @@ -82,21 +85,36 @@ def extract_config_from_backend( environment = details["Config"]["Env"] # Extract Volume Binds + vol_binds = {} + + # ignore any irrelevant binds for the worker like + # packages/grid/backend/grid:/root/app/grid + # packages/syft:/root/app/syft + # packages/grid/data/package-cache:/root/.cache + valid_binds = { + "/var/run/docker.sock", + "/root/.cache", + } + for vol in host_config["Binds"]: parts = vol.split(":") key = parts[0] bind = parts[1] mode = parts[2] - if "/storage" in bind: - # we need this because otherwise we are using the same node private key + + if "/root/data/creds" in vol: + # we need this because otherwise we are using the same server private key # which will make account creation fail - worker_postfix = worker_name.split("-", 1)[1] - key = f"{key}-{worker_postfix}" - extracted_config["volume_binds"][key] = {"bind": bind, "mode": mode} + key = f"{key}-{worker_name}" + elif bind not in valid_binds: + continue + + vol_binds[key] = {"bind": bind, "mode": mode} # Extract Environment Variables extracted_config["environment"] = dict([e.split("=", 1) for e in environment]) extracted_config["network_mode"] = f"container:{backend_container.id}" + extracted_config["volume_binds"] = vol_binds return extracted_config @@ -222,7 +240,7 @@ def run_container_using_docker( def run_workers_in_threads( - node: AbstractNode, + server: AbstractServer, pool_name: str, number: int, start_idx: int = 0, @@ -233,23 +251,24 @@ def run_workers_in_threads( error = None worker_name = f"{pool_name}-{worker_count}" worker = SyftWorker( + id=UID.with_seed(worker_name), name=worker_name, status=WorkerStatus.RUNNING, worker_pool_name=pool_name, healthcheck=WorkerHealth.HEALTHY, ) try: - port = node.queue_config.client_config.queue_port + port = server.queue_config.client_config.queue_port address = get_queue_address(port) - node.add_consumer_for_service( + server.add_consumer_for_service( service_name=pool_name, syft_worker_id=worker.id, address=address, ) except Exception as e: - print( - "Failed to start consumer for " - f"pool={pool_name} worker={worker_name}. Error: {e}" + logger.error( + f"Failed to start consumer for pool={pool_name} worker={worker_name}", + exc_info=e, ) worker.status = WorkerStatus.STOPPED error = str(e) @@ -282,9 +301,9 @@ def prepare_kubernetes_pool_env( if creds_path is not None and not creds_path.exists(): raise ValueError("Credentials file does not exist") - # create a secret for the node credentials owned by the backend, not the pool. - node_secret = KubeUtils.create_secret( - secret_name=K8S_NODE_CREDS_NAME, + # create a secret for the server credentials owned by the backend, not the pool. + server_secret = KubeUtils.create_secret( + secret_name=K8S_SERVER_CREDS_NAME, type="Opaque", component=backend_pod_name, data={creds_path.name: creds_path.read_text()}, @@ -295,7 +314,7 @@ def prepare_kubernetes_pool_env( backend_env = runner.get_pod_env_vars(backend_pod_name) or [] env_vars_: list = KubeUtils.patch_env_vars(backend_env, env_vars) mount_secrets = { - node_secret.metadata.name: { + server_secret.metadata.name: { "mountPath": str(creds_path), "subPath": creds_path.name, }, @@ -304,6 +323,7 @@ def prepare_kubernetes_pool_env( return env_vars_, mount_secrets +@as_result(SyftException) def create_kubernetes_pool( runner: KubernetesRunner, tag: str, @@ -311,21 +331,17 @@ def create_kubernetes_pool( replicas: int, queue_port: int, debug: bool, - reg_username: str | None = None, - reg_password: str | None = None, + registry_username: str | None = None, + registry_password: str | None = None, reg_url: str | None = None, + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, **kwargs: Any, -) -> list[Pod] | SyftError: +) -> list[Pod]: pool = None - error = False try: - print( - "Creating new pool " - f"name={pool_name} " - f"tag={tag} " - f"replicas={replicas}" - ) + logger.info(f"Creating new pool name={pool_name} tag={tag} replicas={replicas}") env_vars, mount_secrets = prepare_kubernetes_pool_env( runner, @@ -337,6 +353,13 @@ def create_kubernetes_pool( "N_CONSUMERS": "1", "CREATE_PRODUCER": "False", "INMEMORY_WORKERS": "False", + "OTEL_SERVICE_NAME": f"{pool_name}", + "OTEL_EXPORTER_OTLP_ENDPOINT": os.environ.get( + "OTEL_EXPORTER_OTLP_ENDPOINT" + ), + "OTEL_EXPORTER_OTLP_PROTOCOL": os.environ.get( + "OTEL_EXPORTER_OTLP_PROTOCOL" + ), }, ) @@ -347,38 +370,50 @@ def create_kubernetes_pool( replicas=replicas, env_vars=env_vars, mount_secrets=mount_secrets, - reg_username=reg_username, - reg_password=reg_password, + registry_username=registry_username, + registry_password=registry_password, reg_url=reg_url, + pod_annotations=pod_annotations, + pod_labels=pod_labels, ) except Exception as e: - error = True - return SyftError(message=f"Failed to start workers {e}") - finally: - if error and pool: - pool.delete() + if pool: + try: + pool.delete() # this raises another exception if the pool never starts + except Exception as e2: + logger.error( + f"Failed to delete pool {pool_name} after failed creation. {e2}" + ) + # stdlib + import traceback + + raise SyftException( + public_message=f"Failed to start workers {e} {e.__class__} {e.args} {traceback.format_exc()}." + ) return runner.get_pool_pods(pool_name=pool_name) +@as_result(SyftException) def scale_kubernetes_pool( runner: KubernetesRunner, pool_name: str, replicas: int, -) -> list[Pod] | SyftError: +) -> list[Pod]: pool = runner.get_pool(pool_name) if not pool: - return SyftError(message=f"Pool does not exist. name={pool_name}") + raise SyftException(public_message=f"Pool does not exist. name={pool_name}") try: - print(f"Scaling pool name={pool_name} to replicas={replicas}") + logger.info(f"Scaling pool name={pool_name} to replicas={replicas}") runner.scale_pool(pool_name=pool_name, replicas=replicas) except Exception as e: - return SyftError(message=f"Failed to scale workers {e}") + raise SyftException(public_message=f"Failed to scale workers {e}") return runner.get_pool_pods(pool_name=pool_name) +@as_result(SyftException) def run_workers_in_kubernetes( worker_image: SyftWorkerImage, worker_count: int, @@ -386,11 +421,13 @@ def run_workers_in_kubernetes( queue_port: int, start_idx: int = 0, debug: bool = False, - reg_username: str | None = None, - reg_password: str | None = None, + registry_username: str | None = None, + registry_password: str | None = None, reg_url: str | None = None, + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, **kwargs: Any, -) -> list[ContainerSpawnStatus] | SyftError: +) -> list[ContainerSpawnStatus]: spawn_status = [] runner = KubernetesRunner() @@ -403,24 +440,27 @@ def run_workers_in_kubernetes( replicas=worker_count, queue_port=queue_port, debug=debug, - reg_username=reg_username, - reg_password=reg_password, + registry_username=registry_username, + registry_password=registry_password, reg_url=reg_url, - ) + pod_annotations=pod_annotations, + pod_labels=pod_labels, + ).unwrap() else: - return SyftError( - message=f"image with uid {worker_image.id} does not have an image identifier" + raise SyftException( + public_message=f"image with uid {worker_image.id} does not have an image identifier" ) else: - pool_pods = scale_kubernetes_pool(runner, pool_name, worker_count) + # TODO: see if this is resultify-able... looks like it. + try: + pool_pods = scale_kubernetes_pool(runner, pool_name, worker_count).unwrap() + except SyftException as exc: + raise SyftException(public_message=exc.public_message) if isinstance(pool_pods, list) and len(pool_pods) > 0: # slice only those pods that we're interested in pool_pods = pool_pods[start_idx:] - if isinstance(pool_pods, SyftError): - return pool_pods - # create worker object for pod in pool_pods: status: PodStatus | WorkerStatus | None = runner.get_pod_status(pod) @@ -477,6 +517,7 @@ def map_pod_to_worker_status( return worker_status, worker_healthcheck, worker_error +@as_result(SyftException) def run_containers( pool_name: str, worker_image: SyftWorkerImage, @@ -485,16 +526,18 @@ def run_containers( queue_port: int, dev_mode: bool = False, start_idx: int = 0, - reg_username: str | None = None, - reg_password: str | None = None, + registry_username: str | None = None, + registry_password: str | None = None, reg_url: str | None = None, -) -> list[ContainerSpawnStatus] | SyftError: + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, +) -> list[ContainerSpawnStatus]: results = [] if not worker_image.is_built: - return SyftError(message="Image must be built before running it.") + raise SyftException(public_message="Image must be built before running it.") - print(f"Starting workers with start_idx={start_idx} count={number}") + logger.info(f"Starting workers with start_idx={start_idx} count={number}") if orchestration == WorkerOrchestrationType.DOCKER: with contextlib.closing(docker.from_env()) as client: @@ -508,8 +551,8 @@ def run_containers( pool_name=pool_name, queue_port=queue_port, debug=dev_mode, - username=reg_username, - password=reg_password, + username=registry_username, + password=registry_password, registry_url=reg_url, ) results.append(spawn_result) @@ -521,75 +564,46 @@ def run_containers( queue_port=queue_port, debug=dev_mode, start_idx=start_idx, - reg_username=reg_username, - reg_password=reg_password, + registry_username=registry_username, + registry_password=registry_password, reg_url=reg_url, - ) + pod_annotations=pod_annotations, + pod_labels=pod_labels, + ).unwrap() return results +@as_result(SyftException) def create_default_image( credentials: SyftVerifyKey, image_stash: SyftWorkerImageStash, tag: str, in_kubernetes: bool = False, -) -> SyftError | SyftWorkerImage: - # TODO: Hardcode worker dockerfile since not able to COPY - # worker_cpu.dockerfile to backend in backend.dockerfile. - - # default_cpu_dockerfile = get_syft_cpu_dockerfile() - # DockerWorkerConfig.from_path(default_cpu_dockerfile) - +) -> SyftWorkerImage: if not in_kubernetes: - default_cpu_dockerfile = f"""ARG SYFT_VERSION_TAG='{tag}' \n""" - default_cpu_dockerfile += """FROM openmined/grid-backend:${SYFT_VERSION_TAG} - ARG PYTHON_VERSION="3.12" - ARG SYSTEM_PACKAGES="" - ARG PIP_PACKAGES="pip --dry-run" - ARG CUSTOM_CMD='echo "No custom commands passed"' - - # Worker specific environment variables go here - ENV SYFT_WORKER="true" - ENV DOCKER_TAG=${SYFT_VERSION_TAG} - - RUN apk update && \ - apk add ${SYSTEM_PACKAGES} && \ - pip install --user ${PIP_PACKAGES} && \ - bash -c "$CUSTOM_CMD" - """ - worker_config = DockerWorkerConfig(dockerfile=default_cpu_dockerfile) - _new_image = SyftWorkerImage( - config=worker_config, - created_by=credentials, - ) - else: - # in k8s we don't need to build the image, just the tag of backend is enough - worker_config = PrebuiltWorkerConfig( - tag=tag, - description="Prebuilt default worker image", - ) + tag = f"openmined/syft-backend:{tag}" + + worker_config = PrebuiltWorkerConfig( + tag=tag, + description="Prebuilt default worker image", + ) + result = image_stash.get_by_worker_config( + credentials=credentials, + config=worker_config, + ) + if result.is_err(): # create SyftWorkerImage from a pre-built image _new_image = SyftWorkerImage( config=worker_config, created_by=credentials, image_identifier=SyftWorkerImageIdentifier.from_str(tag), ) - - result = image_stash.get_by_docker_config( - credentials=credentials, - config=worker_config, - ) - - if result.ok() is None: - result = image_stash.set(credentials, _new_image) - if result.is_err(): - return SyftError(message=f"Failed to save image stash: {result.err()}") - - default_syft_image = result.ok() - - return default_syft_image + return image_stash.set(credentials, _new_image).unwrap( + public_message="Failed to save image stash" + ) + return result.unwrap() def _get_healthcheck_based_on_status(status: WorkerStatus) -> WorkerHealth: @@ -599,9 +613,8 @@ def _get_healthcheck_based_on_status(status: WorkerStatus) -> WorkerHealth: return WorkerHealth.UNHEALTHY -def image_build( - image: SyftWorkerImage, **kwargs: dict[str, Any] -) -> ImageBuildResult | SyftError: +@as_result(SyftException) +def image_build(image: SyftWorkerImage, **kwargs: dict[str, Any]) -> ImageBuildResult: if image.image_identifier is not None: full_tag = image.image_identifier.full_name_with_tag try: @@ -609,33 +622,31 @@ def image_build( return builder.build_image( config=image.config, tag=full_tag, - # rm=True, - # forcerm=True, **kwargs, ) except docker.errors.APIError as e: - return SyftError( - message=f"Docker API error when building '{full_tag}'. Reason - {e}" + raise SyftException( + public_message=f"Docker API error when building '{full_tag}'. Reason - {e}" ) except docker.errors.DockerException as e: - return SyftError( - message=f"Docker exception when building '{full_tag}'. Reason - {e}" + raise SyftException( + public_message=f"Docker exception when building '{full_tag}'. Reason - {e}" ) except Exception as e: - return SyftError( - message=f"Unknown exception when building '{full_tag}'. Reason - {e}" + raise SyftException( + public_message=f"Unknown exception when building '{full_tag}'. Reason - {e}" ) - else: - return SyftError( - message=f"image with uid {image.id} does not have an image identifier" - ) + raise SyftException( + public_message=f"image with uid {image.id} does not have an image identifier" + ) +@as_result(SyftException) def image_push( image: SyftWorkerImage, username: str | None = None, password: str | None = None, -) -> ImagePushResult | SyftError: +) -> ImagePushResult: if image.image_identifier is not None: full_tag = image.image_identifier.full_name_with_tag try: @@ -648,29 +659,30 @@ def image_push( password=password, ) - if "error" in result.logs.lower() or result.exit_code: - return SyftError( - message=f"Failed to push {full_tag}. " + if "error" in result.logs.lower() or result.has_failed: + raise SyftException( + public_message=f"Failed to push {full_tag}. " f"Exit code: {result.exit_code}. " f"Logs:\n{result.logs}" ) return result except docker.errors.APIError as e: - return SyftError(message=f"Docker API error when pushing {full_tag}. {e}") + raise SyftException( + public_message=f"Docker API error when pushing {full_tag}. {e}" + ) except docker.errors.DockerException as e: - return SyftError( - message=f"Docker exception when pushing {full_tag}. Reason - {e}" + raise SyftException( + public_message=f"Docker exception when pushing {full_tag}. Reason - {e}" ) except Exception as e: - return SyftError( - message=f"Unknown exception when pushing {image.image_identifier}. Reason - {e}" + raise SyftException( + public_message=f"Unknown exception when pushing {image.image_identifier}. Reason - {e}" ) - else: - return SyftError( - message=f"image with uid {image.id} does not have an " - "image identifier and tag, hence we can't push it." - ) + raise SyftException( + public_message=f"image with uid {image.id} does not have an " + "image identifier and tag, hence we can't push it." + ) def get_orchestration_type() -> WorkerOrchestrationType: diff --git a/packages/syft/src/syft/service/worker/worker.py b/packages/syft/src/syft/service/worker/worker.py index d318dc1469b..9a0fdcb4ca2 100644 --- a/packages/syft/src/syft/service/worker/worker.py +++ b/packages/syft/src/syft/service/worker/worker.py @@ -1,36 +1,18 @@ # stdlib -from collections.abc import Callable from typing import Any # relative from ...serde.serializable import serializable -from ...store.document_store import SYFT_OBJECT_VERSION_2 +from ...store.document_store import SYFT_OBJECT_VERSION_1 from ...store.document_store import SyftObject from ...types.datetime import DateTime -from ...types.syft_migration import migrate -from ...types.transforms import drop -from ...types.transforms import make_set_default - - -@serializable() -class DockerWorkerV1(SyftObject): - # version - __canonical_name__ = "ContainerImage" - __version__ = SYFT_OBJECT_VERSION_2 - - __attr_searchable__ = ["container_id"] - __attr_unique__ = ["container_id"] - __repr_attrs__ = ["container_id", "created_at"] - - container_id: str - created_at: DateTime = DateTime.now() @serializable() class DockerWorker(SyftObject): # version __canonical_name__ = "ContainerImage" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __attr_searchable__ = ["container_id", "container_name"] __attr_unique__ = ["container_id"] @@ -46,13 +28,3 @@ def _coll_repr_(self) -> dict[str, Any]: "container_id": self.container_id, "created_at": self.created_at, } - - -@migrate(DockerWorker, DockerWorkerV1) -def downgrade_job_v2_to_v1() -> list[Callable]: - return [drop(["container_name"])] - - -@migrate(DockerWorkerV1, DockerWorker) -def upgrade_job_v2_to_v3() -> list[Callable]: - return [make_set_default("job_consumer_id", None)] diff --git a/packages/syft/src/syft/service/worker/worker_image.py b/packages/syft/src/syft/service/worker/worker_image.py index eb5066d932c..10a581439fa 100644 --- a/packages/syft/src/syft/service/worker/worker_image.py +++ b/packages/syft/src/syft/service/worker/worker_image.py @@ -1,11 +1,16 @@ # stdlib +# stdlib +from collections.abc import Callable + # relative from ...custom_worker.config import PrebuiltWorkerConfig from ...custom_worker.config import WorkerConfig -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey from ...types.datetime import DateTime +from ...types.syft_migration import migrate +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject from ...types.uid import UID @@ -13,12 +18,13 @@ @serializable() -class SyftWorkerImage(SyftObject): +class SyftWorkerImageV1(SyftObject): __canonical_name__ = "SyftWorkerImage" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __attr_unique__ = ["config"] __attr_searchable__ = ["config", "image_hash", "created_by"] + __repr_attrs__ = [ "image_identifier", "image_hash", @@ -35,6 +41,44 @@ class SyftWorkerImage(SyftObject): image_hash: str | None = None built_at: DateTime | None = None + @property + def is_prebuilt(self) -> bool: + return isinstance(self.config, PrebuiltWorkerConfig) + + +@serializable() +class SyftWorkerImage(SyftObject): + __canonical_name__ = "SyftWorkerImage" + __version__ = SYFT_OBJECT_VERSION_2 + + __attr_unique__ = ["config_hash"] + __attr_searchable__ = [ + "config", + "image_hash", + "created_by", + "config_hash", + ] + + __repr_attrs__ = [ + "image_identifier", + "image_hash", + "created_at", + "built_at", + "config", + ] + + id: UID + config: WorkerConfig + created_by: SyftVerifyKey + created_at: DateTime = DateTime.now() + image_identifier: SyftWorkerImageIdentifier | None = None + image_hash: str | None = None + built_at: DateTime | None = None + + @property + def config_hash(self) -> str: + return self.config.hash() + @property def is_built(self) -> bool: """Returns True if the image has been built or is prebuilt.""" @@ -52,3 +96,8 @@ def built_image_tag(self) -> str | None: if self.is_built and self.image_identifier: return self.image_identifier.full_name_with_tag return None + + +@migrate(SyftWorkerImageV1, SyftWorkerImage) +def migrate_syft_worker_image_v1_to_v2() -> list[Callable]: + return [] # no migrations needed at data level, only unique and searchable attributes changed diff --git a/packages/syft/src/syft/service/worker/worker_image_service.py b/packages/syft/src/syft/service/worker/worker_image_service.py index 21c14ba2ea5..dd0795cf8df 100644 --- a/packages/syft/src/syft/service/worker/worker_image_service.py +++ b/packages/syft/src/syft/service/worker/worker_image_service.py @@ -1,29 +1,27 @@ # stdlib import contextlib -from typing import cast # third party import docker import pydantic # relative -from ...abstract_node import AbstractNode -from ...custom_worker.config import DockerWorkerConfig +from ...custom_worker.config import PrebuiltWorkerConfig +from ...custom_worker.config import WorkerConfig from ...custom_worker.k8s import IN_KUBERNETES from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager from ...types.datetime import DateTime from ...types.dicttuple import DictTuple +from ...types.errors import SyftException from ...types.uid import UID from ..context import AuthedServiceContext -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import service_method from ..user.user_roles import DATA_OWNER_ROLE_LEVEL from ..user.user_roles import DATA_SCIENTIST_ROLE_LEVEL from .image_registry import SyftImageRegistry -from .image_registry_service import SyftImageRegistryService from .utils import image_build from .utils import image_push from .worker_image import SyftWorkerImage @@ -31,40 +29,58 @@ from .worker_image_stash import SyftWorkerImageStash -@serializable() +@serializable(canonical_name="SyftWorkerImageService", version=1) class SyftWorkerImageService(AbstractService): - store: DocumentStore stash: SyftWorkerImageStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = SyftWorkerImageStash(store=store) @service_method( - path="worker_image.submit_dockerfile", - name="submit_dockerfile", + path="worker_image.submit", + name="submit", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) - def submit_dockerfile( - self, context: AuthedServiceContext, docker_config: DockerWorkerConfig - ) -> SyftSuccess | SyftError: + def submit( + self, context: AuthedServiceContext, worker_config: WorkerConfig + ) -> SyftSuccess: + image_identifier: SyftWorkerImageIdentifier | None = None + if isinstance(worker_config, PrebuiltWorkerConfig): + try: + image_identifier = SyftWorkerImageIdentifier.from_str(worker_config.tag) + except Exception: + raise SyftException( + public_message=( + f"Invalid Docker image name: {worker_config.tag}.\n" + + "Please specify the image name in this format /:." + ) + ) worker_image = SyftWorkerImage( - config=docker_config, + config=worker_config, created_by=context.credentials, + image_identifier=image_identifier, ) - res = self.stash.set(context.credentials, worker_image) - if res.is_err(): - return SyftError(message=res.err()) + # TODO: I think this was working in python mode due to a bug because + # it wasn't saying it was duplicate + # why can we only have a prebuilt or a non prebuilt with the same tag? + # bigquery uses prebuilt but we need to build and then test that prebuilt works + # so we kind of need to use one then the other and have it pull from the first + stored_image = self.stash.set( + context.credentials, worker_image, ignore_duplicates=True + ).unwrap() return SyftSuccess( - message=f"Dockerfile ID: {worker_image.id} successfully submitted." + message=f"Dockerfile ID: {worker_image.id} successfully submitted.", + value=stored_image, ) @service_method( path="worker_image.build", name="build", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) def build( self, @@ -72,32 +88,24 @@ def build( image_uid: UID, tag: str, registry_uid: UID | None = None, - pull: bool = True, - ) -> SyftSuccess | SyftError: + pull_image: bool = True, + force_build: bool = False, + ) -> SyftSuccess: registry: SyftImageRegistry | None = None - context.node = cast(AbstractNode, context.node) - if IN_KUBERNETES and registry_uid is None: - return SyftError(message="Registry UID is required in Kubernetes mode.") - - result = self.stash.get_by_uid(credentials=context.credentials, uid=image_uid) - if result.is_err(): - return SyftError( - message=f"Failed to get image for uid: {image_uid}. Error: {result.err()}" + raise SyftException( + public_message="Registry UID is required in Kubernetes mode." ) - worker_image: SyftWorkerImage = result.ok() - + worker_image = self.stash.get_by_uid( + credentials=context.credentials, uid=image_uid + ).unwrap() if registry_uid: # get registry from image registry service - image_registry_service: AbstractService = context.node.get_service( - SyftImageRegistryService + registry = context.server.services.syft_image_registry.get_by_id( + context, registry_uid ) - registry_result = image_registry_service.get_by_id(context, registry_uid) - if registry_result.is_err(): - return registry_result - registry = registry_result.ok() try: if registry: @@ -107,7 +115,7 @@ def build( else: image_identifier = SyftWorkerImageIdentifier.from_str(tag=tag) except pydantic.ValidationError as e: - return SyftError(message=f"Failed to create tag: {e}") + raise SyftException(public_message=f"Failed to create tag: {e}") # if image is already built and identifier is unchanged, return an error if ( @@ -115,16 +123,17 @@ def build( and worker_image.image_identifier and worker_image.image_identifier.full_name_with_tag == image_identifier.full_name_with_tag + and not force_build ): - return SyftError(message=f"Image ID: {image_uid} is already built") + raise SyftException( + public_message=f"Image ID: {image_uid} is already built" + ) worker_image.image_identifier = image_identifier result = None - if not context.node.in_memory_workers: - build_result = image_build(worker_image, pull=pull) - if isinstance(build_result, SyftError): - return build_result + if not context.server.in_memory_workers: + build_result = image_build(worker_image, pull=pull_image).unwrap() worker_image.image_hash = build_result.image_hash worker_image.built_at = DateTime.now() @@ -137,52 +146,39 @@ def build( message="Image building skipped, since using in-memory workers." ) - update_result = self.stash.update(context.credentials, obj=worker_image) - - if update_result.is_err(): - return SyftError( - message=f"Failed to update image meta information: {update_result.err()}" - ) - + self.stash.update(context.credentials, obj=worker_image).unwrap() return result @service_method( path="worker_image.push", name="push", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) def push( self, context: AuthedServiceContext, - image: UID, + image_uid: UID, username: str | None = None, password: str | None = None, - ) -> SyftSuccess | SyftError: - result = self.stash.get_by_uid(credentials=context.credentials, uid=image) - if result.is_err(): - return SyftError( - message=f"Failed to get Image ID: {image}. Error: {result.err()}" - ) - worker_image: SyftWorkerImage = result.ok() + ) -> SyftSuccess: + worker_image = self.stash.get_by_uid( + credentials=context.credentials, uid=image_uid + ).unwrap() if not worker_image.is_built: - return SyftError(message=f"Image ID: {worker_image.id} is not built yet.") + raise SyftException( + public_message=f"Image ID: {worker_image.id} is not built yet." + ) elif ( worker_image.image_identifier is None or worker_image.image_identifier.registry_host == "" ): - return SyftError( - message=f"Image ID: {worker_image.id} does not have a valid registry host." + raise SyftException( + public_message=f"Image ID: {worker_image.id} does not have a valid registry host." ) - result = image_push( - image=worker_image, - username=username, - password=password, - ) - - if isinstance(result, SyftError): - return result + image_push(image=worker_image, username=username, password=password).unwrap() return SyftSuccess( message=f'Pushed Image ID: {worker_image.id} to "{worker_image.image_identifier.full_name_with_tag}".' @@ -193,50 +189,29 @@ def push( name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def get_all( - self, context: AuthedServiceContext - ) -> DictTuple[str, SyftWorkerImage] | SyftError: + def get_all(self, context: AuthedServiceContext) -> DictTuple[str, SyftWorkerImage]: """ One image one docker file for now """ - result = self.stash.get_all(credentials=context.credentials) - if result.is_err(): - return SyftError(message=f"{result.err()}") - images: list[SyftWorkerImage] = result.ok() - - res = {} - # if image is built, index it by full_name_with_tag - for im in images: - if im.is_built and im.image_identifier is not None: - res[im.image_identifier.full_name_with_tag] = im - # and then index all images by id - # TODO: jupyter repr needs to be updated to show unique values - # (even if multiple keys point to same value) - res.update({im.id.to_string(): im for im in images if not im.is_built}) - - return DictTuple(res) + images = self.stash.get_all(credentials=context.credentials).unwrap() + return DictTuple({image.id.to_string(): image for image in images}) @service_method( path="worker_image.remove", name="remove", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) - def remove( - self, context: AuthedServiceContext, uid: UID - ) -> SyftSuccess | SyftError: + def remove(self, context: AuthedServiceContext, uid: UID) -> SyftSuccess: # Delete Docker image given image tag - res = self.stash.get_by_uid(credentials=context.credentials, uid=uid) - if res.is_err(): - return SyftError(message=f"{res.err()}") - image: SyftWorkerImage = res.ok() + image = self.stash.get_by_uid(credentials=context.credentials, uid=uid).unwrap() - context.node = cast(AbstractNode, context.node) - if context.node.in_memory_workers: + if context.server.in_memory_workers: pass elif IN_KUBERNETES: # TODO: Implement image deletion in kubernetes - return SyftError( - message="Image Deletion is not yet implemented in Kubernetes !!" + raise SyftException( + public_message="Image Deletion is not yet implemented in Kubernetes !!" ) elif image and image.image_identifier: try: @@ -244,38 +219,22 @@ def remove( with contextlib.closing(docker.from_env()) as client: client.images.remove(image=full_tag) except docker.errors.ImageNotFound: - return SyftError(message=f"Image Tag: {full_tag} not found.") + raise SyftException(public_message=f"Image Tag: {full_tag} not found.") except Exception as e: - return SyftError( - message=f"Failed to delete Image Tag: {full_tag}. Error: {e}" + raise SyftException( + public_message=f"Failed to delete Image Tag: {full_tag}. Error: {e}" ) - result = self.stash.delete_by_uid(credentials=context.credentials, uid=uid) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - returned_message: str = ( - result.ok().message + f". Image ID: {uid} deleted successfully." - ) - - return SyftSuccess(message=returned_message) + self.stash.delete_by_uid(credentials=context.credentials, uid=uid).unwrap() + return SyftSuccess(message=f"Image ID: {uid} deleted successfully.") @service_method( path="worker_image.get_by_uid", name="get_by_uid", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def get_by_uid( - self, context: AuthedServiceContext, uid: UID - ) -> SyftWorkerImage | SyftError: - res = self.stash.get_by_uid(credentials=context.credentials, uid=uid) - if res.is_err(): - return SyftError( - message=f"Failed to get image with uid {uid}. Error: {res.err()}" - ) - image: SyftWorkerImage = res.ok() - return image + def get_by_uid(self, context: AuthedServiceContext, uid: UID) -> SyftWorkerImage: + return self.stash.get_by_uid(credentials=context.credentials, uid=uid).unwrap() @service_method( path="worker_image.get_by_config", @@ -283,14 +242,8 @@ def get_by_uid( roles=DATA_SCIENTIST_ROLE_LEVEL, ) def get_by_config( - self, context: AuthedServiceContext, docker_config: DockerWorkerConfig - ) -> SyftWorkerImage | SyftError: - res = self.stash.get_by_docker_config( - credentials=context.credentials, config=docker_config - ) - if res.is_err(): - return SyftError( - message=f"Failed to get image with docker config {docker_config}. Error: {res.err()}" - ) - image: SyftWorkerImage = res.ok() - return image + self, context: AuthedServiceContext, worker_config: WorkerConfig + ) -> SyftWorkerImage: + return self.stash.get_by_worker_config( + credentials=context.credentials, config=worker_config + ).unwrap() diff --git a/packages/syft/src/syft/service/worker/worker_image_stash.py b/packages/syft/src/syft/service/worker/worker_image_stash.py index 900bcdd7cd6..29755e9ef07 100644 --- a/packages/syft/src/syft/service/worker/worker_image_stash.py +++ b/packages/syft/src/syft/service/worker/worker_image_stash.py @@ -1,37 +1,30 @@ # stdlib # third party -from result import Err -from result import Result + +# third party +from sqlalchemy.orm import Session # relative from ...custom_worker.config import DockerWorkerConfig from ...custom_worker.config import WorkerConfig -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.db.stash import with_session +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from .worker_image import SyftWorkerImage -WorkerConfigPK = PartitionKey(key="config", type_=WorkerConfig) - - -@serializable() -class SyftWorkerImageStash(BaseUIDStoreStash): - object_type = SyftWorkerImage - settings: PartitionSettings = PartitionSettings( - name=SyftWorkerImage.__canonical_name__, - object_type=SyftWorkerImage, - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="SyftWorkerImageSQLStash", version=1) +class SyftWorkerImageStash(ObjectStash[SyftWorkerImage]): + @as_result(SyftException, StashException, NotFoundException) + @with_session def set( self, credentials: SyftVerifyKey, @@ -39,31 +32,56 @@ def set( add_permissions: list[ActionObjectPermission] | None = None, add_storage_permission: bool = True, ignore_duplicates: bool = False, - ) -> Result[SyftWorkerImage, str]: - add_permissions = [] if add_permissions is None else add_permissions - + session: Session = None, + skip_check_type: bool = False, + ) -> SyftWorkerImage: # By default syft images have all read permission + add_permissions = [] if add_permissions is None else add_permissions add_permissions.append( ActionObjectPermission(uid=obj.id, permission=ActionPermission.ALL_READ) ) if isinstance(obj.config, DockerWorkerConfig): - result = self.get_by_docker_config( + worker_config_exists = self.worker_config_exists( credentials=credentials, config=obj.config - ) - if result.is_ok() and result.ok() is not None: - return Err(f"Image already exists for: {obj.config}") + ).unwrap() + if worker_config_exists: + raise SyftException( + public_message=f"Worker Image with config {obj.config} already exists" + ) - return super().set( - credentials, - obj, - add_permissions=add_permissions, - add_storage_permission=add_storage_permission, - ignore_duplicates=ignore_duplicates, + return ( + super() + .set( + credentials, + obj, + add_permissions=add_permissions, + add_storage_permission=add_storage_permission, + ignore_duplicates=ignore_duplicates, + session=session, + ) + .unwrap() ) - def get_by_docker_config( - self, credentials: SyftVerifyKey, config: DockerWorkerConfig - ) -> Result[SyftWorkerImage | None, str]: - qks = QueryKeys(qks=[WorkerConfigPK.with_obj(config)]) - return self.query_one(credentials=credentials, qks=qks) + @as_result(StashException, NotFoundException) + def worker_config_exists( + self, credentials: SyftVerifyKey, config: WorkerConfig + ) -> bool: + try: + self.get_by_worker_config(credentials=credentials, config=config).unwrap() + return True + except NotFoundException: + return False + + @as_result(StashException, NotFoundException) + def get_by_worker_config( + self, credentials: SyftVerifyKey, config: WorkerConfig + ) -> SyftWorkerImage: + # TODO cannot search on fields containing objects + all_images = self.get_all(credentials=credentials).unwrap() + for image in all_images: + if image.config == config: + return image + raise NotFoundException( + public_message=f"Worker Image with config {config} not found" + ) diff --git a/packages/syft/src/syft/service/worker/worker_pool.py b/packages/syft/src/syft/service/worker/worker_pool.py index 4b90c8db679..fffa0deba70 100644 --- a/packages/syft/src/syft/service/worker/worker_pool.py +++ b/packages/syft/src/syft/service/worker/worker_pool.py @@ -1,4 +1,5 @@ # stdlib +from collections.abc import Callable from enum import Enum from typing import Any from typing import cast @@ -8,24 +9,25 @@ from docker.models.containers import Container # relative -from ...client.api import APIRegistry from ...serde.serializable import serializable from ...store.linked_obj import LinkedObject from ...types.base import SyftBaseModel from ...types.datetime import DateTime +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_migration import migrate +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject from ...types.syft_object import short_uid +from ...types.transforms import TransformContext from ...types.uid import UID -from ...util import options -from ...util.colors import SURFACE -from ...util.fonts import ITABLES_CSS -from ...util.fonts import fonts_css from ..response import SyftError from .worker_image import SyftWorkerImage +from .worker_image import SyftWorkerImageV1 -@serializable() +@serializable(canonical_name="WorkerStatus", version=1) class WorkerStatus(Enum): PENDING = "Pending" RUNNING = "Running" @@ -33,26 +35,56 @@ class WorkerStatus(Enum): RESTARTED = "Restarted" -@serializable() +@serializable(canonical_name="ConsumerState", version=1) class ConsumerState(Enum): IDLE = "Idle" CONSUMING = "Consuming" DETACHED = "Detached" -@serializable() +@serializable(canonical_name="WorkerHealth", version=1) class WorkerHealth(Enum): HEALTHY = "✅" UNHEALTHY = "❌" +@serializable() +class SyftWorkerV1(SyftObject): + __canonical_name__ = "SyftWorker" + __version__ = SYFT_OBJECT_VERSION_1 + + __attr_unique__ = ["name"] + __attr_searchable__ = ["name", "container_id", "to_be_deleted"] + __repr_attrs__ = [ + "name", + "container_id", + "image", + "status", + "healthcheck", + "worker_pool_name", + "created_at", + ] + + id: UID + name: str + container_id: str | None = None + created_at: DateTime = DateTime.now() + healthcheck: WorkerHealth | None = None + status: WorkerStatus + image: SyftWorkerImageV1 | None = None + worker_pool_name: str + consumer_state: ConsumerState = ConsumerState.DETACHED + job_id: UID | None = None + to_be_deleted: bool = False + + @serializable() class SyftWorker(SyftObject): __canonical_name__ = "SyftWorker" __version__ = SYFT_OBJECT_VERSION_2 __attr_unique__ = ["name"] - __attr_searchable__ = ["name", "container_id"] + __attr_searchable__ = ["name", "container_id", "to_be_deleted"] __repr_attrs__ = [ "name", "container_id", @@ -73,25 +105,15 @@ class SyftWorker(SyftObject): worker_pool_name: str consumer_state: ConsumerState = ConsumerState.DETACHED job_id: UID | None = None + to_be_deleted: bool = False @property - def logs(self) -> str | SyftError: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") - return api.services.worker.logs(uid=self.id) + def logs(self) -> str: + return self.get_api().services.worker.logs(uid=self.id) def get_job_repr(self) -> str: if self.job_id is not None: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") + api = self.get_api() job = api.services.job.get(self.job_id) if job.action.user_code_id is not None: func_name = api.services.code.get_by_id( @@ -103,18 +125,8 @@ def get_job_repr(self) -> str: else: return "" - def refresh_status(self) -> SyftError | None: - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - return SyftError(message=f"You must login to {self.node_uid}") - - res = api.services.worker.status(uid=self.id) - if isinstance(res, SyftError): - return res - + def refresh_status(self) -> None: + res = self.get_api().services.worker.status(uid=self.id) self.status, self.healthcheck = res return None @@ -143,7 +155,7 @@ def _coll_repr_(self) -> dict[str, Any]: @serializable() class WorkerPool(SyftObject): __canonical_name__ = "WorkerPool" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __attr_unique__ = ["name"] __attr_searchable__ = ["name", "image_id"] @@ -154,6 +166,7 @@ class WorkerPool(SyftObject): "workers", "created_at", ] + __table_sort_attr__ = "Created at" name: str image_id: UID | None = None @@ -162,40 +175,37 @@ class WorkerPool(SyftObject): created_at: DateTime = DateTime.now() @property - def image(self) -> SyftWorkerImage | SyftError | None: + def image(self) -> SyftWorkerImage | None: """ Get the pool's image using the worker_image service API. This way we get the latest state of the image from the SyftWorkerImageStash """ - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api is not None and api.services is not None: + api = self.get_api_wrapped() + if api.is_ok() and api.unwrap().services is not None: + api = api.unwrap() return api.services.worker_image.get_by_uid(uid=self.image_id) else: return None @property - def running_workers(self) -> list[SyftWorker] | SyftError: + def running_workers(self) -> list[SyftWorker]: """Query the running workers using an API call to the server""" - _running_workers = [] - for worker in self.workers: - if worker.status == WorkerStatus.RUNNING: - _running_workers.append(worker) + _running_workers = [ + worker for worker in self.workers if worker.status == WorkerStatus.RUNNING + ] return _running_workers @property - def healthy_workers(self) -> list[SyftWorker] | SyftError: + def healthy_workers(self) -> list[SyftWorker]: """ Query the healthy workers using an API call to the server """ - _healthy_workers = [] - - for worker in self.workers: - if worker.healthcheck == WorkerHealth.HEALTHY: - _healthy_workers.append(worker) + _healthy_workers = [ + worker + for worker in self.workers + if worker.healthcheck == WorkerHealth.HEALTHY + ] return _healthy_workers @@ -213,16 +223,8 @@ def _coll_repr_(self) -> dict[str, Any]: "Created at": str(self.created_at), } - def _repr_html_(self) -> Any: + def _repr_html_(self) -> str: return f""" -

    {self.name}

    @@ -244,22 +246,25 @@ def _repr_html_(self) -> Any: def workers(self) -> list[SyftWorker]: resolved_workers = [] for worker in self.worker_list: - resolved_worker = worker.resolve - if isinstance(resolved_worker, SyftError) or resolved_worker is None: + try: + resolved_worker = worker.resolve + except SyftException: + resolved_worker = None + if resolved_worker is None: continue resolved_worker.refresh_status() resolved_workers.append(resolved_worker) return resolved_workers -@serializable() +@serializable(canonical_name="WorkerOrchestrationType", version=1) class WorkerOrchestrationType(Enum): DOCKER = "docker" KUBERNETES = "k8s" PYTHON = "python" -@serializable() +@serializable(canonical_name="ContainerSpawnStatus", version=1) class ContainerSpawnStatus(SyftBaseModel): __repr_attrs__ = ["worker_name", "worker", "error"] @@ -268,17 +273,20 @@ class ContainerSpawnStatus(SyftBaseModel): error: str | None = None +@as_result(SyftException) def _get_worker_container( client: docker.DockerClient, worker: SyftWorker, -) -> Container | SyftError: +) -> Container: try: return cast(Container, client.containers.get(worker.container_id)) except docker.errors.NotFound as e: - return SyftError(message=f"Worker {worker.id} container not found. Error {e}") + raise SyftException( + public_message=f"Worker {worker.id} container not found. Error {e}" + ) except docker.errors.APIError as e: - return SyftError( - message=f"Unable to access worker {worker.id} container. " + raise SyftException( + public_message=f"Unable to access worker {worker.id} container. " + f"Container server error {e}" ) @@ -296,20 +304,33 @@ def _get_worker_container( ) +@as_result(SyftException) def _get_worker_container_status( client: docker.DockerClient, worker: SyftWorker, container: Container | None = None, -) -> Container | SyftError: +) -> Container: if container is None: - container = _get_worker_container(client, worker) - - if isinstance(container, SyftError): - return container - + container = _get_worker_container(client, worker).unwrap() container_status = container.status return _CONTAINER_STATUS_TO_WORKER_STATUS.get( container_status, SyftError(message=f"Unknown container status: {container_status}"), ) + + +def migrate_worker_image_v1_to_v2(context: TransformContext) -> TransformContext: + old_image = context["image"] + if isinstance(old_image, SyftWorkerImageV1): + new_image = old_image.migrate_to( + version=SYFT_OBJECT_VERSION_2, + context=context.to_server_context(), + ) + context["image"] = new_image + return context + + +@migrate(SyftWorkerV1, SyftWorker) +def migrate_worker_v1_to_v2() -> list[Callable]: + return [migrate_worker_image_v1_to_v2] diff --git a/packages/syft/src/syft/service/worker/worker_pool_service.py b/packages/syft/src/syft/service/worker/worker_pool_service.py index 9ffd6122f33..3db794a2834 100644 --- a/packages/syft/src/syft/service/worker/worker_pool_service.py +++ b/packages/syft/src/syft/service/worker/worker_pool_service.py @@ -1,21 +1,24 @@ # stdlib +import logging from typing import Any -from typing import cast # third party import pydantic -from result import OkErr # relative -from ...abstract_node import AbstractNode -from ...custom_worker.config import CustomWorkerConfig +from ...custom_worker.config import DockerWorkerConfig +from ...custom_worker.config import PrebuiltWorkerConfig from ...custom_worker.config import WorkerConfig from ...custom_worker.k8s import IN_KUBERNETES from ...custom_worker.runner_k8s import KubernetesRunner from ...serde.serializable import serializable -from ...store.document_store import DocumentStore +from ...store.db.db import DBManager +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException from ...store.linked_obj import LinkedObject from ...types.dicttuple import DictTuple +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID from ..context import AuthedServiceContext from ..request.request import Change @@ -23,8 +26,6 @@ from ..request.request import CreateCustomWorkerPoolChange from ..request.request import Request from ..request.request import SubmitRequest -from ..request.request_service import RequestService -from ..response import SyftError from ..response import SyftSuccess from ..service import AbstractService from ..service import SERVICE_TO_TYPES @@ -46,17 +47,33 @@ from .worker_service import WorkerService from .worker_stash import WorkerStash +logger = logging.getLogger(__name__) -@serializable() + +@serializable(canonical_name="SyftWorkerPoolService", version=1) class SyftWorkerPoolService(AbstractService): - store: DocumentStore stash: SyftWorkerPoolStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = SyftWorkerPoolStash(store=store) self.image_stash = SyftWorkerImageStash(store=store) + @as_result(StashException) + def pool_exists(self, context: AuthedServiceContext, pool_name: str) -> bool: + try: + self.stash.get_by_name(context.credentials, pool_name=pool_name).unwrap() + return True + except NotFoundException: + return False + + @as_result(StashException) + def image_exists(self, context: AuthedServiceContext, uid: UID) -> bool: + try: + self.image_stash.get_by_uid(context.credentials, uid=uid).unwrap() + return True + except NotFoundException: + return False + @service_method( path="worker_pool.launch", name="launch", @@ -65,12 +82,14 @@ def __init__(self, store: DocumentStore) -> None: def launch( self, context: AuthedServiceContext, - name: str, + pool_name: str, image_uid: UID | None, num_workers: int, - reg_username: str | None = None, - reg_password: str | None = None, - ) -> list[ContainerSpawnStatus] | SyftError: + registry_username: str | None = None, + registry_password: str | None = None, + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, + ) -> list[ContainerSpawnStatus]: """Creates a pool of workers from the given SyftWorkerImage. - Retrieves the image for the given UID @@ -85,69 +104,52 @@ def launch( num_workers (int): the number of SyftWorker that needs to be created in the pool """ - result = self.stash.get_by_name(context.credentials, pool_name=name) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - if result.ok() is not None: - return SyftError(message=f"Worker Pool with name: {name} already exists !!") + pool_exists = self.pool_exists(context, pool_name=pool_name).unwrap() + if pool_exists: + raise SyftException( + public_message=f"Worker Pool with name: {pool_name} already exists !!" + ) # If image uid is not passed, then use the default worker image # to create the worker pool if image_uid is None: - result = self.stash.get_by_name( + default_worker_pool = self.stash.get_by_name( context.credentials, pool_name=DEFAULT_WORKER_POOL_NAME - ) - default_worker_pool = result.ok() + ).unwrap() image_uid = default_worker_pool.image_id # Get the image object for the given image id - result = self.image_stash.get_by_uid( + worker_image = self.image_stash.get_by_uid( credentials=context.credentials, uid=image_uid - ) - if result.is_err(): - return SyftError( - message=f"Failed to retrieve Worker Image with id: {image_uid}. Error: {result.err()}" - ) + ).unwrap() - worker_image: SyftWorkerImage = result.ok() - context.node = cast(AbstractNode, context.node) - worker_service: AbstractService = context.node.get_service("WorkerService") - worker_stash = worker_service.stash + worker_stash = context.server.services.worker.stash # Create worker pool from given image, with the given worker pool # and with the desired number of workers - result = _create_workers_in_pool( + worker_list, container_statuses = _create_workers_in_pool( context=context, - pool_name=name, + pool_name=pool_name, existing_worker_cnt=0, worker_cnt=num_workers, worker_image=worker_image, worker_stash=worker_stash, - reg_username=reg_username, - reg_password=reg_password, - ) - - if isinstance(result, SyftError): - return result - - worker_list, container_statuses = result + registry_username=registry_username, + registry_password=registry_password, + pod_annotations=pod_annotations, + pod_labels=pod_labels, + ).unwrap() # Update the Database with the pool information worker_pool = WorkerPool( - name=name, + name=pool_name, max_count=num_workers, image_id=worker_image.id, worker_list=worker_list, - syft_node_location=context.node.id, + syft_server_location=context.server.id, syft_client_verify_key=context.credentials, ) - result = self.stash.set(credentials=context.credentials, obj=worker_pool) - - if result.is_err(): - return SyftError(message=f"Failed to save Worker Pool: {result.err()}") - + self.stash.set(credentials=context.credentials, obj=worker_pool).unwrap() return container_statuses @service_method( @@ -162,7 +164,9 @@ def create_pool_request( num_workers: int, image_uid: UID, reason: str | None = "", - ) -> SyftError | SyftSuccess: + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, + ) -> Request: """ Create a request to launch the worker pool based on a built image. @@ -174,34 +178,21 @@ def create_pool_request( reason (Optional[str], optional): The reason for creating the worker pool. Defaults to "". """ - # Check if image exists for the given image id - search_result = self.image_stash.get_by_uid( - credentials=context.credentials, uid=image_uid - ) - - if search_result.is_err(): - return SyftError(message=str(search_result.err())) - - worker_image: SyftWorkerImage | None = search_result.ok() + worker_image_exists = self.image_exists(context, uid=image_uid).unwrap() # Raise error if worker image doesn't exists - if worker_image is None: - return SyftError( - message=f"No image exists for given image uid : {image_uid}" + if not worker_image_exists: + raise SyftException( + public_message=f"No image exists for given image uid : {image_uid}" ) # Check if pool already exists for the given pool name - result = self.stash.get_by_name(context.credentials, pool_name=pool_name) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - worker_pool = result.ok() + worker_pool_exists = self.pool_exists(context, pool_name=pool_name).unwrap() - if worker_pool is not None: - return SyftError( - message=f"Worker pool already exists for given pool name: {pool_name}" + if worker_pool_exists: + raise SyftException( + public_message=f"Worker pool already exists for given pool name: {pool_name}" ) # If no worker pool exists for given pool name @@ -211,18 +202,17 @@ def create_pool_request( pool_name=pool_name, num_workers=num_workers, image_uid=image_uid, + pod_annotations=pod_annotations, + pod_labels=pod_labels, ) - changes: list[Change] = [create_worker_pool_change] # Create a the request object with the changes and submit it # for approval. request = SubmitRequest(changes=changes) - context.node = cast(AbstractNode, context.node) - method = context.node.get_service_method(RequestService.submit) - result = method(context=context, request=request, reason=reason) - - return result + return context.server.services.request.submit( + context=context, request=request, reason=reason + ) @service_method( path="worker_pool.create_image_and_pool_request", @@ -234,12 +224,14 @@ def create_image_and_pool_request( context: AuthedServiceContext, pool_name: str, num_workers: int, - tag: str, config: WorkerConfig, + tag: str | None = None, registry_uid: UID | None = None, reason: str | None = "", pull_image: bool = True, - ) -> SyftError | SyftSuccess: + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, + ) -> Request: """ Create a request to launch the worker pool based on a built image. @@ -248,38 +240,44 @@ def create_image_and_pool_request( pool_name (str): The name of the worker pool. num_workers (int): The number of workers in the pool. config: (WorkerConfig): Config of the image to be built. - tag (str): human-readable manifest identifier that is typically a specific version or variant of an image - reason (Optional[str], optional): The reason for creating the worker image and pool. Defaults to "". + tag (str | None, optional): + a human-readable manifest identifier that is typically a specific version or variant of an image, + only needed for `DockerWorkerConfig` to tag the image after it is built. + reason (str | None, optional): The reason for creating the worker image and pool. Defaults to "". """ + if not isinstance(config, DockerWorkerConfig | PrebuiltWorkerConfig): + raise SyftException( + public_message="We only support either `DockerWorkerConfig` or `PrebuiltWorkerConfig`." + ) - if isinstance(config, CustomWorkerConfig): - return SyftError(message="We only support DockerWorkerConfig.") + if isinstance(config, DockerWorkerConfig): + if tag is None: + raise SyftException( + public_message="`tag` is required for `DockerWorkerConfig`." + ) + + # Validate image tag + try: + SyftWorkerImageIdentifier.from_str(tag=tag) + except pydantic.ValidationError as e: + raise SyftException(public_message=f"Invalid `tag`: {e}.") - if IN_KUBERNETES and registry_uid is None: - return SyftError(message="Registry UID is required in Kubernetes mode.") + if IN_KUBERNETES and registry_uid is None: + raise SyftException( + public_message="`registry_uid` is required in Kubernetes mode for `DockerWorkerConfig`." + ) # Check if an image already exists for given docker config - search_result = self.image_stash.get_by_docker_config( + worker_image_exists = self.image_stash.worker_config_exists( credentials=context.credentials, config=config - ) + ).unwrap() - if search_result.is_err(): - return SyftError(message=str(search_result.err())) - - worker_image: SyftWorkerImage | None = search_result.ok() - - if worker_image is not None: - return SyftError( - message="Image already exists for given config. \ + if worker_image_exists: + raise SyftException( + public_message="Image already exists for given config. \ Please use `worker_pool.create_pool_request` to request pool creation." ) - # Validate Image Tag - try: - SyftWorkerImageIdentifier.from_str(tag=tag) - except pydantic.ValidationError as e: - return SyftError(message=f"Failed to create tag: {e}") - # create a list of Change objects and submit a # request for these changes for approval changes: list[Change] = [] @@ -294,16 +292,13 @@ def create_image_and_pool_request( ) # Check if a pool already exists for given pool name - result = self.stash.get_by_name(context.credentials, pool_name=pool_name) - - if result.is_err(): - return SyftError(message=f"{result.err()}") + worker_pool_exists = self.pool_exists(context, pool_name=pool_name).unwrap() # Raise an error if worker pool already exists for the given worker pool name - if result.ok() is not None: - return SyftError( - message=f"Worker Pool with name: {pool_name} already " - f"exists. Please choose another name!" + if worker_pool_exists: + raise SyftException( + public_message=f"Worker Pool with name: {pool_name} already" + f" exists. Please choose another name!" ) # Add create worker pool change @@ -313,35 +308,28 @@ def create_image_and_pool_request( pool_name=pool_name, num_workers=num_workers, config=config, + pod_annotations=pod_annotations, + pod_labels=pod_labels, ) changes += [create_custom_image_change, create_worker_pool_change] # Create a request object and submit a request for approval request = SubmitRequest(changes=changes) - context.node = cast(AbstractNode, context.node) - method = context.node.get_service_method(RequestService.submit) - result = method(context=context, request=request, reason=reason) - - return result + return context.server.services.request.submit( + context=context, request=request, reason=reason + ) @service_method( path="worker_pool.get_all", name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def get_all( - self, context: AuthedServiceContext - ) -> DictTuple[str, WorkerPool] | SyftError: + def get_all(self, context: AuthedServiceContext) -> DictTuple[str, WorkerPool]: # TODO: During get_all, we should dynamically make a call to docker to get the status of the containers # and update the status of the workers in the pool. - result = self.stash.get_all(credentials=context.credentials) - if result.is_err(): - return SyftError(message=f"{result.err()}") - worker_pools: list[WorkerPool] = result.ok() - - res: list[tuple] = [] - for pool in worker_pools: - res.append((pool.name, pool)) + worker_pools = self.stash.get_all(credentials=context.credentials).unwrap() + + res = ((pool.name, pool) for pool in worker_pools) return DictTuple(res) @service_method( @@ -355,9 +343,9 @@ def add_workers( number: int, pool_id: UID | None = None, pool_name: str | None = None, - reg_username: str | None = None, - reg_password: str | None = None, - ) -> list[ContainerSpawnStatus] | SyftError: + registry_username: str | None = None, + registry_password: str | None = None, + ) -> list[ContainerSpawnStatus]: """Add workers to existing worker pool. Worker pool is fetched either using the unique pool id or pool name. @@ -369,78 +357,55 @@ def add_workers( pool_name (Optional[str], optional): Unique name of the pool. Defaults to None. Returns: - Union[List[ContainerSpawnStatus], SyftError]: List of spawned workers with their status and error if any. + List[ContainerSpawnStatus]: List of spawned workers with their status and error if any. """ if number <= 0: - return SyftError(message=f"Invalid number of workers: {number}") + raise SyftException(public_message=f"Invalid number of workers: {number}") # Extract pool using either using pool id or pool name if pool_id: - result = self.stash.get_by_uid(credentials=context.credentials, uid=pool_id) + worker_pool = self.stash.get_by_uid( + credentials=context.credentials, uid=pool_id + ).unwrap() elif pool_name: - result = self.stash.get_by_name( + worker_pool = self.stash.get_by_name( credentials=context.credentials, pool_name=pool_name, - ) - - if result.is_err(): - return SyftError(message=f"{result.err()}") - - worker_pool = result.ok() + ).unwrap() existing_worker_cnt = len(worker_pool.worker_list) - result = self.image_stash.get_by_uid( + worker_image = self.image_stash.get_by_uid( credentials=context.credentials, uid=worker_pool.image_id, - ) + ).unwrap() - if result.is_err(): - return SyftError( - message=f"Failed to retrieve image for worker pool: {worker_pool.name}" - ) - - worker_image: SyftWorkerImage = result.ok() - - context.node = cast(AbstractNode, context.node) - worker_service: AbstractService = context.node.get_service("WorkerService") - worker_stash = worker_service.stash + worker_stash = context.server.services.worker.stash # Add workers to given pool from the given image - result = _create_workers_in_pool( + worker_list, container_statuses = _create_workers_in_pool( context=context, pool_name=worker_pool.name, existing_worker_cnt=existing_worker_cnt, worker_cnt=number, worker_image=worker_image, worker_stash=worker_stash, - reg_username=reg_username, - reg_password=reg_password, - ) - - if isinstance(result, SyftError): - return result - - worker_list, container_statuses = result + registry_username=registry_username, + registry_password=registry_password, + ).unwrap() worker_pool.worker_list += worker_list worker_pool.max_count = existing_worker_cnt + number - update_result = self.stash.update( - credentials=context.credentials, obj=worker_pool - ) - if update_result.is_err(): - return SyftError( - message=f"Failed update worker pool: {worker_pool.name} with err: {result.err()}" - ) - + self.stash.update(credentials=context.credentials, obj=worker_pool).unwrap() return container_statuses @service_method( path="worker_pool.scale", name="scale", roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, ) def scale( self, @@ -448,50 +413,46 @@ def scale( number: int, pool_id: UID | None = None, pool_name: str | None = None, - ) -> SyftError | SyftSuccess: + ) -> SyftSuccess: """ Scale the worker pool to the given number of workers in Kubernetes. Allows both scaling up and down the worker pool. """ - context.node = cast(AbstractNode, context.node) + + client_warning = "" + if not IN_KUBERNETES: - return SyftError(message="Scaling is only supported in Kubernetes mode") + raise SyftException( + public_message="Scaling is only supported in Kubernetes mode" + ) elif number < 0: # zero is a valid scale down - return SyftError(message=f"Invalid number of workers: {number}") + raise SyftException(public_message=f"Invalid number of workers: {number}") - result: Any = self._get_worker_pool(context, pool_id, pool_name) - if isinstance(result, SyftError): - return result - - worker_pool = result + worker_pool: Any = self._get_worker_pool(context, pool_id, pool_name).unwrap() current_worker_count = len(worker_pool.worker_list) if current_worker_count == number: return SyftSuccess(message=f"Worker pool already has {number} workers") elif number > current_worker_count: workers_to_add = number - current_worker_count - result = self.add_workers( + self.add_workers( context=context, number=workers_to_add, pool_id=pool_id, pool_name=pool_name, # kube scaling doesn't require password as it replicates an existing deployment - reg_username=None, - reg_password=None, + registry_username=None, + registry_password=None, ) - if isinstance(result, SyftError): - return result else: # scale down at kubernetes control plane runner = KubernetesRunner() - result = scale_kubernetes_pool( + scale_kubernetes_pool( runner, pool_name=worker_pool.name, replicas=number, - ) - if isinstance(result, SyftError): - return result + ).unwrap() # scale down removes the last "n" workers # workers to delete = len(workers) - number @@ -499,33 +460,33 @@ def scale( -(current_worker_count - number) : ] - worker_stash = context.node.get_service("WorkerService").stash + worker_stash = context.server.services.worker.stash # delete linkedobj workers for worker in workers_to_delete: - delete_result = worker_stash.delete_by_uid( + worker_stash.delete_by_uid( credentials=context.credentials, uid=worker.object_uid, - ) - if delete_result.is_err(): - print(f"Failed to delete worker: {worker.object_uid}") + ).unwrap() + + client_warning += "Scaling down workers doesn't kill the associated jobs. Please delete them manually." # update worker_pool worker_pool.max_count = number worker_pool.worker_list = worker_pool.worker_list[:number] - update_result = self.stash.update( + self.stash.update( credentials=context.credentials, obj=worker_pool, - ) - - if update_result.is_err(): - return SyftError( - message=( - f"Pool {worker_pool.name} was scaled down, " - f"but failed update the stash with err: {update_result.err()}" - ) + ).unwrap( + public_message=( + f"Pool {worker_pool.name} was scaled down, " + f"but failed to update the stash" ) + ) - return SyftSuccess(message=f"Worker pool scaled to {number} workers") + return SyftSuccess( + message=f"Worker pool scaled to {number} workers", + client_warnings=[client_warning] if client_warning else [], + ) @service_method( path="worker_pool.filter_by_image_id", @@ -534,13 +495,8 @@ def scale( ) def filter_by_image_id( self, context: AuthedServiceContext, image_uid: UID - ) -> list[WorkerPool] | SyftError: - result = self.stash.get_by_image_uid(context.credentials, image_uid) - - if result.is_err(): - return SyftError(message=f"Failed to get worker pool for uid: {image_uid}") - - return result.ok() + ) -> list[WorkerPool]: + return self.stash.get_by_image_uid(context.credentials, image_uid).unwrap() @service_method( path="worker_pool.get_by_name", @@ -549,34 +505,28 @@ def filter_by_image_id( ) def get_by_name( self, context: AuthedServiceContext, pool_name: str - ) -> list[WorkerPool] | SyftError: - result = self.stash.get_by_name(context.credentials, pool_name) - - if result.is_err(): - return SyftError( - message=f"Failed to get worker pool with name: {pool_name}" - ) - - return result.ok() + ) -> list[WorkerPool]: + return self.stash.get_by_name(context.credentials, pool_name).unwrap() @service_method( path="worker_pool.sync_pool_from_request", name="sync_pool_from_request", roles=DATA_SCIENTIST_ROLE_LEVEL, + unwrap_on_success=False, ) def sync_pool_from_request( self, context: AuthedServiceContext, request: Request, - ) -> SyftSuccess | SyftError: - """Re-submit request from a different node""" + ) -> Request: + """Re-submit request from a different server""" num_of_changes = len(request.changes) pool_name, num_workers, config, image_uid, tag = None, None, None, None, None if num_of_changes > 2: - return SyftError( - message=f"Invalid pool request object. Only pool request changes allowed. {request.changes}" + raise SyftException( + public_message=f"Invalid pool request object. Only pool request changes allowed. {request.changes}" ) for change in request.changes: @@ -584,6 +534,8 @@ def sync_pool_from_request( pool_name = change.pool_name num_workers = change.num_workers image_uid = change.image_uid + pod_annotations = change.pod_annotations + pod_labels = change.pod_labels elif isinstance(change, CreateCustomImageChange): # type: ignore[unreachable] config = change.config tag = change.tag @@ -594,6 +546,8 @@ def sync_pool_from_request( pool_name=pool_name, num_workers=num_workers, image_uid=image_uid, + pod_annotations=pod_annotations, + pod_labels=pod_labels, ) elif config is not None: return self.create_image_and_pool_request( # type: ignore[unreachable] @@ -602,43 +556,145 @@ def sync_pool_from_request( num_workers=num_workers, config=config, tag=tag, + pod_annotations=pod_annotations, + pod_labels=pod_labels, ) else: - return SyftError( - message=f"Invalid request object. Invalid image uid or config in the request changes. {request.changes}" + raise SyftException( + public_message=( + f"Invalid request object: invalid image uid or config in the request changes: " + f"{request.changes}" + ) ) - def _get_worker_pool( + @service_method( + path="worker_pool.delete", + name="delete", + roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, + ) + def delete( self, context: AuthedServiceContext, pool_id: UID | None = None, pool_name: str | None = None, - ) -> WorkerPool | SyftError: - if pool_id: - result = self.stash.get_by_uid( + ) -> SyftSuccess: + worker_pool = self._get_worker_pool( + context, pool_id=pool_id, pool_name=pool_name + ).unwrap(public_message=f"Failed to get WorkerPool: {pool_id or pool_name}") + + uid = worker_pool.id + + self.purge_workers(context=context, pool_id=pool_id, pool_name=pool_name) + + self.stash.delete_by_uid(credentials=context.credentials, uid=uid).unwrap( + public_message=f"Failed to delete WorkerPool: {worker_pool.name} from stash" + ) + + return SyftSuccess(message=f"Successfully deleted worker pool with id {uid}") + + @service_method( + path="worker_pool.purge_workers", + name="purge_workers", + roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, + ) + def purge_workers( + self, + context: AuthedServiceContext, + pool_id: UID | None = None, + pool_name: str | None = None, + ) -> SyftSuccess: + worker_pool = self._get_worker_pool( + context, pool_id=pool_id, pool_name=pool_name + ).unwrap(public_message=f"Failed to get WorkerPool: {pool_id or pool_name}") + + uid = worker_pool.id + + # relative + from ..queue.queue_stash import Status + + queue_items = context.server.services.queue.stash._get_by_worker_pool( + credentials=context.credentials, + worker_pool=LinkedObject.from_obj( + obj=worker_pool, + service_type=self.__class__, + server_uid=context.server.id, + ), + ).unwrap( + public_message=f"Failed to get queue items mapped to WorkerPool: {worker_pool.name}" + ) + + items_to_interrupt = ( + item + for item in queue_items + if item.status in (Status.CREATED, Status.PROCESSING) + ) + + for item in items_to_interrupt: + item.status = Status.INTERRUPTED + context.server.services.queue.stash.update( credentials=context.credentials, - uid=pool_id, - ) + obj=item, + ).unwrap() + + if IN_KUBERNETES: + # Scale the workers to zero + runner = KubernetesRunner() + if runner.exists(worker_pool.name): + self.scale(context=context, number=0, pool_id=uid) + runner.delete_pool(pool_name=worker_pool.name) else: - result = self.stash.get_by_name( - credentials=context.credentials, - pool_name=pool_name, + workers = ( + worker.resolve_with_context(context=context).unwrap() + for worker in worker_pool.worker_list ) - if result.is_err(): - return SyftError(message=f"{result.err()}") + worker_ids = [] + for worker in workers: + worker_ids.append(worker.id) - worker_pool = result.ok() + for id_ in worker_ids: + context.server.services.worker.delete( + context=context, uid=id_, force=True + ) - return ( - SyftError( - message=f"worker pool : {pool_id if pool_id else pool_name} does not exist" + worker_pool.max_count = 0 + worker_pool.worker_list = [] + self.stash.update( + credentials=context.credentials, + obj=worker_pool, + ).unwrap( + public_message=( + f"Pool {worker_pool.name} was purged, " + f"but failed to update the stash" ) - if worker_pool is None - else worker_pool ) + return SyftSuccess(message=f"Successfully Purged worker pool with id {uid}") + + @as_result(StashException, SyftException) + def _get_worker_pool( + self, + context: AuthedServiceContext, + pool_id: UID | None = None, + pool_name: str | None = None, + ) -> WorkerPool: + if pool_id: + worker_pool = self.stash.get_by_uid( + credentials=context.credentials, + uid=pool_id, + ).unwrap() + else: + worker_pool = self.stash.get_by_name( + credentials=context.credentials, + pool_name=pool_name, + ).unwrap() + + return worker_pool + +@as_result(SyftException) def _create_workers_in_pool( context: AuthedServiceContext, pool_name: str, @@ -646,19 +702,20 @@ def _create_workers_in_pool( worker_cnt: int, worker_image: SyftWorkerImage, worker_stash: WorkerStash, - reg_username: str | None = None, - reg_password: str | None = None, -) -> tuple[list[LinkedObject], list[ContainerSpawnStatus]] | SyftError: - context.node = cast(AbstractNode, context.node) - queue_port = context.node.queue_config.client_config.queue_port + registry_username: str | None = None, + registry_password: str | None = None, + pod_annotations: dict[str, str] | None = None, + pod_labels: dict[str, str] | None = None, +) -> tuple[list[LinkedObject], list[ContainerSpawnStatus]]: + queue_port = context.server.queue_config.client_config.queue_port # Check if workers needs to be run in memory or as containers - start_workers_in_memory = context.node.in_memory_workers + start_workers_in_memory = context.server.in_memory_workers if start_workers_in_memory: # Run in-memory workers in threads container_statuses: list[ContainerSpawnStatus] = run_workers_in_threads( - node=context.node, + server=context.server, pool_name=pool_name, start_idx=existing_worker_cnt, number=worker_cnt + existing_worker_cnt, @@ -669,21 +726,20 @@ def _create_workers_in_pool( if worker_image.image_identifier is not None else None ) - result = run_containers( + container_statuses = run_containers( pool_name=pool_name, worker_image=worker_image, start_idx=existing_worker_cnt, number=worker_cnt + existing_worker_cnt, orchestration=get_orchestration_type(), queue_port=queue_port, - dev_mode=context.node.dev_mode, - reg_username=reg_username, - reg_password=reg_password, + dev_mode=context.server.dev_mode, + registry_username=registry_username, + registry_password=registry_password, reg_url=registry_host, - ) - if isinstance(result, SyftError): - return result - container_statuses = result + pod_annotations=pod_annotations, + pod_labels=pod_labels, + ).unwrap() linked_worker_list = [] @@ -691,23 +747,24 @@ def _create_workers_in_pool( worker = container_status.worker if worker is None: continue - result = worker_stash.set( - credentials=context.credentials, - obj=worker, - ) - if isinstance(result, OkErr): - node = context.node - if result.is_ok(): - worker_obj = LinkedObject.from_obj( - obj=result.ok(), - service_type=WorkerService, - node_uid=node.id, - ) - linked_worker_list.append(worker_obj) - elif isinstance(result, SyftError): - container_status.error = result.err() + server = context.server + + try: + obj = worker_stash.set( + credentials=context.credentials, + obj=worker, + ).unwrap() + + worker_obj = LinkedObject.from_obj( + obj=obj, + service_type=WorkerService, + server_uid=server.id, + ) + linked_worker_list.append(worker_obj) + except SyftException as exc: + container_status.error = exc.public_message return linked_worker_list, container_statuses diff --git a/packages/syft/src/syft/service/worker/worker_pool_stash.py b/packages/syft/src/syft/service/worker/worker_pool_stash.py index 4901f4f4d86..3ae0a2d9ec2 100644 --- a/packages/syft/src/syft/service/worker/worker_pool_stash.py +++ b/packages/syft/src/syft/service/worker/worker_pool_stash.py @@ -1,42 +1,39 @@ # stdlib # third party -from result import Result + +# third party +from sqlalchemy.orm import Session # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.db.stash import with_session +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from ...types.uid import UID from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from .worker_pool import WorkerPool -PoolNamePartitionKey = PartitionKey(key="name", type_=str) -PoolImageIDPartitionKey = PartitionKey(key="image_id", type_=UID) - -@serializable() -class SyftWorkerPoolStash(BaseUIDStoreStash): - object_type = WorkerPool - settings: PartitionSettings = PartitionSettings( - name=WorkerPool.__canonical_name__, - object_type=WorkerPool, - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="SyftWorkerPoolSQLStash", version=1) +class SyftWorkerPoolStash(ObjectStash[WorkerPool]): + @as_result(StashException, NotFoundException) + def get_by_name(self, credentials: SyftVerifyKey, pool_name: str) -> WorkerPool: + result = self.get_one( + credentials=credentials, + filters={"name": pool_name}, + ) - def get_by_name( - self, credentials: SyftVerifyKey, pool_name: str - ) -> Result[WorkerPool | None, str]: - qks = QueryKeys(qks=[PoolNamePartitionKey.with_obj(pool_name)]) - return self.query_one(credentials=credentials, qks=qks) + return result.unwrap( + public_message=f"WorkerPool with name {pool_name} not found" + ) + @as_result(StashException) + @with_session def set( self, credentials: SyftVerifyKey, @@ -44,22 +41,32 @@ def set( add_permissions: list[ActionObjectPermission] | None = None, add_storage_permission: bool = True, ignore_duplicates: bool = False, - ) -> Result[WorkerPool, str]: + session: Session = None, + skip_check_type: bool = False, + ) -> WorkerPool: # By default all worker pools have all read permission add_permissions = [] if add_permissions is None else add_permissions add_permissions.append( ActionObjectPermission(uid=obj.id, permission=ActionPermission.ALL_READ) ) - return super().set( - credentials, - obj, - add_permissions=add_permissions, - add_storage_permission=add_storage_permission, - ignore_duplicates=ignore_duplicates, + return ( + super() + .set( + credentials, + obj, + add_permissions=add_permissions, + add_storage_permission=add_storage_permission, + ignore_duplicates=ignore_duplicates, + session=session, + ) + .unwrap() ) + @as_result(StashException) def get_by_image_uid( self, credentials: SyftVerifyKey, image_uid: UID ) -> list[WorkerPool]: - qks = QueryKeys(qks=[PoolImageIDPartitionKey.with_obj(image_uid)]) - return self.query_all(credentials=credentials, qks=qks) + return self.get_all( + credentials=credentials, + filters={"image_id": image_uid}, + ).unwrap() diff --git a/packages/syft/src/syft/service/worker/worker_service.py b/packages/syft/src/syft/service/worker/worker_service.py index 94a5e1d72db..5f806108b26 100644 --- a/packages/syft/src/syft/service/worker/worker_service.py +++ b/packages/syft/src/syft/service/worker/worker_service.py @@ -8,19 +8,19 @@ from docker.models.containers import Container # relative -from ...abstract_node import AbstractNode from ...custom_worker.k8s import IN_KUBERNETES from ...custom_worker.k8s import PodStatus from ...custom_worker.runner_k8s import KubernetesRunner -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import DocumentStore -from ...store.document_store import SyftSuccess +from ...server.credentials import SyftVerifyKey +from ...store.db.db import DBManager +from ...store.document_store_errors import StashException +from ...types.errors import SyftException +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument +from ..response import SyftSuccess from ..service import AbstractService from ..service import AuthedServiceContext -from ..service import SyftError from ..service import service_method from ..user.user_roles import ADMIN_ROLE_LEVEL from ..user.user_roles import DATA_OWNER_ROLE_LEVEL @@ -37,14 +37,11 @@ from .worker_stash import WorkerStash -@instrument -@serializable() +@serializable(canonical_name="WorkerService", version=1) class WorkerService(AbstractService): - store: DocumentStore stash: WorkerStash - def __init__(self, store: DocumentStore) -> None: - self.store = store + def __init__(self, store: DBManager) -> None: self.stash = WorkerStash(store=store) @service_method( @@ -54,32 +51,27 @@ def __init__(self, store: DocumentStore) -> None: ) def start_workers( self, context: AuthedServiceContext, n: int = 1 - ) -> list[ContainerSpawnStatus] | SyftError: + ) -> list[ContainerSpawnStatus]: """Add a Container Image.""" - context.node = cast(AbstractNode, context.node) - worker_pool_service = context.node.get_service("SyftWorkerPoolService") - return worker_pool_service.add_workers( + + return context.server.services.syft_worker_pool.add_workers( context, number=n, pool_name=DEFAULT_WORKER_POOL_NAME ) @service_method( path="worker.get_all", name="get_all", roles=DATA_SCIENTIST_ROLE_LEVEL ) - def list(self, context: AuthedServiceContext) -> list[SyftWorker] | SyftError: + def list(self, context: AuthedServiceContext) -> list[SyftWorker]: """List all the workers.""" - result = self.stash.get_all(context.credentials) - - if result.is_err(): - return SyftError(message=f"Failed to fetch workers. {result.err()}") + workers = self.stash.get_all(context.credentials).unwrap() - workers: list[SyftWorker] = result.ok() - - if context.node is not None and context.node.in_memory_workers: + if context.server is not None and context.server.in_memory_workers: return workers else: # If container workers, check their statuses - workers = refresh_worker_status(workers, self.stash, context.credentials) - + workers = refresh_worker_status( + workers, self.stash, context.as_root_context().credentials + ).unwrap() return workers @service_method( @@ -89,12 +81,8 @@ def status( self, context: AuthedServiceContext, uid: UID, - ) -> tuple[WorkerStatus, WorkerHealth] | SyftError: + ) -> tuple[WorkerStatus, WorkerHealth | None]: result = self.get(context=context, uid=uid) - - if isinstance(result, SyftError): - return result - return result.status, result.healthcheck @service_method( @@ -102,15 +90,16 @@ def status( name="get", roles=DATA_SCIENTIST_ROLE_LEVEL, ) - def get(self, context: AuthedServiceContext, uid: UID) -> SyftWorker | SyftError: - worker = self._get_worker(context=context, uid=uid) - if isinstance(worker, SyftError): - return worker + def get(self, context: AuthedServiceContext, uid: UID) -> SyftWorker: + worker = self._get_worker(context=context, uid=uid).unwrap() - if context.node is not None and context.node.in_memory_workers: + if context.server is not None and context.server.in_memory_workers: return worker else: - return refresh_worker_status([worker], self.stash, context.credentials)[0] + workers = refresh_worker_status( + [worker], self.stash, context.as_root_context().credentials + ).unwrap() + return workers[0] @service_method( path="worker.logs", @@ -122,71 +111,37 @@ def logs( context: AuthedServiceContext, uid: UID, raw: bool = False, - ) -> bytes | str | SyftError: - worker = self._get_worker(context=context, uid=uid) - if isinstance(worker, SyftError): - return worker + ) -> bytes | str: + worker = self._get_worker(context=context, uid=uid).unwrap() - if context.node is not None and context.node.in_memory_workers: + if context.server is not None and context.server.in_memory_workers: logs = b"Logs not implemented for In Memory Workers" elif IN_KUBERNETES: runner = KubernetesRunner() return runner.get_pod_logs(pod_name=worker.name) else: with contextlib.closing(docker.from_env()) as client: - docker_container = _get_worker_container(client, worker) - if isinstance(docker_container, SyftError): - return docker_container - + docker_container = _get_worker_container(client, worker).unwrap() try: logs = cast(bytes, docker_container.logs()) except docker.errors.APIError as e: - return SyftError( - f"Failed to get worker {worker.id} container logs. Error {e}" + raise SyftException( + public_message=f"Failed to get worker {worker.id} container logs. Error {e}" ) return logs if raw else logs.decode(errors="ignore") - @service_method( - path="worker.delete", - name="delete", - roles=DATA_OWNER_ROLE_LEVEL, - ) - def delete( - self, - context: AuthedServiceContext, - uid: UID, - force: bool = False, - ) -> SyftSuccess | SyftError: - worker = self._get_worker(context=context, uid=uid) - if isinstance(worker, SyftError): - return worker - context.node = cast(AbstractNode, context.node) - worker_pool_name = worker.worker_pool_name - - # relative - from .worker_pool_service import SyftWorkerPoolService + def _delete( + self, context: AuthedServiceContext, worker: SyftWorker, force: bool = False + ) -> SyftSuccess: + uid = worker.id + if force and worker.job_id is not None: + context.server.services.job.kill(context=context, id=worker.job_id) - worker_pool_service: AbstractService = context.node.get_service( - SyftWorkerPoolService - ) - worker_pool_stash = worker_pool_service.stash - result = worker_pool_stash.get_by_name( + worker_pool_stash = context.server.services.syft_worker_pool.stash + worker_pool = worker_pool_stash.get_by_name( credentials=context.credentials, pool_name=worker.worker_pool_name - ) - - if result.is_err(): - return SyftError( - f"Failed to retrieved WorkerPool {worker_pool_name} " - f"associated with SyftWorker {uid}" - ) - - worker_pool = result.ok() - if worker_pool is None: - return SyftError( - f"Failed to retrieved WorkerPool {worker_pool_name} " - f"associated with SyftWorker {uid}" - ) + ).unwrap() if IN_KUBERNETES: # Kubernetes will only restart the worker NOT REMOVE IT @@ -199,16 +154,14 @@ def delete( f"Removing and re-creating worker id={worker.id}" ) ) - elif not context.node.in_memory_workers: + elif not context.server.in_memory_workers: # delete the worker using docker client sdk with contextlib.closing(docker.from_env()) as client: - docker_container = _get_worker_container(client, worker) - if isinstance(docker_container, SyftError): - return docker_container - - stopped = _stop_worker_container(worker, docker_container, force) - if stopped is not None: - return stopped + docker_container = _get_worker_container(client, worker).unwrap() + _stop_worker_container(worker, docker_container, force=force).unwrap() + else: + # kill the in memory worker thread + context.server.remove_consumer_with_id(syft_worker_id=worker.id) # remove the worker from the pool try: @@ -220,94 +173,98 @@ def delete( pass # Delete worker from worker stash - result = self.stash.delete_by_uid(credentials=context.credentials, uid=uid) - if result.is_err(): - return SyftError(message=f"Failed to delete worker with uid: {uid}") + self.stash.delete_by_uid(credentials=context.credentials, uid=uid).unwrap() # Update worker pool - result = worker_pool_stash.update(context.credentials, obj=worker_pool) - if result.is_err(): - return SyftError(message=f"Failed to update worker pool: {result.err()}") + worker_pool_stash.update(context.credentials, obj=worker_pool).unwrap() return SyftSuccess( message=f"Worker with id: {uid} deleted successfully from pool: {worker_pool.name}" ) - def _get_worker( - self, context: AuthedServiceContext, uid: UID - ) -> SyftWorker | SyftError: - result = self.stash.get_by_uid(credentials=context.credentials, uid=uid) - if result.is_err(): - return SyftError(message=f"Failed to retrieve worker with UID {uid}") + @service_method( + path="worker.delete", + name="delete", + roles=DATA_OWNER_ROLE_LEVEL, + unwrap_on_success=False, + ) + def delete( + self, + context: AuthedServiceContext, + uid: UID, + force: bool = False, + ) -> SyftSuccess: + worker = self._get_worker(context=context, uid=uid).unwrap() + worker.to_be_deleted = True + + self.stash.update(context.credentials, worker).unwrap() + if not force: + # relative + return SyftSuccess(message=f"Worker {uid} has been marked for deletion.") - worker = result.ok() - if worker is None: - return SyftError(message=f"Worker does not exist for UID {uid}") + return self._delete(context, worker, force=True) - return worker + @as_result(SyftException, StashException) + def _get_worker(self, context: AuthedServiceContext, uid: UID) -> SyftWorker: + return self.stash.get_by_uid(credentials=context.credentials, uid=uid).unwrap() +@as_result(SyftException) def refresh_worker_status( workers: list[SyftWorker], worker_stash: WorkerStash, credentials: SyftVerifyKey, ) -> list[SyftWorker]: if IN_KUBERNETES: - result = refresh_status_kubernetes(workers) + workers = refresh_status_kubernetes(workers).unwrap() else: - result = refresh_status_docker(workers) + workers = refresh_status_docker(workers).unwrap() - if isinstance(result, SyftError): - return result - - for worker in result: - stash_result = worker_stash.update( + for worker in workers: + worker_stash.update( credentials=credentials, obj=worker, - ) - if stash_result.is_err(): - return SyftError( - message=f"Failed to update status for worker: {worker.id}. Error: {stash_result.err()}" - ) + ).unwrap() - return result + return workers +@as_result(SyftException) def refresh_status_kubernetes(workers: list[SyftWorker]) -> list[SyftWorker]: updated_workers = [] runner = KubernetesRunner() for worker in workers: status: PodStatus | WorkerStatus | None = runner.get_pod_status(pod=worker.name) if not status: - return SyftError(message=f"Pod does not exist. name={worker.name}") - status, health, _ = map_pod_to_worker_status(status) - worker.status = status - worker.healthcheck = health - updated_workers.append(worker) + worker.status = WorkerStatus.STOPPED + worker.healthcheck = WorkerHealth.UNHEALTHY + else: + status, health, _ = map_pod_to_worker_status(status) + worker.status = status + worker.healthcheck = health + updated_workers.append(worker) return updated_workers +@as_result(SyftException) def refresh_status_docker(workers: list[SyftWorker]) -> list[SyftWorker]: updated_workers = [] - with contextlib.closing(docker.from_env()) as client: for worker in workers: - status = _get_worker_container_status(client, worker) - if isinstance(status, SyftError): - return status + status = _get_worker_container_status(client, worker).unwrap() worker.status = status worker.healthcheck = _get_healthcheck_based_on_status(status=status) updated_workers.append(worker) - return updated_workers +@as_result(SyftException) def _stop_worker_container( worker: SyftWorker, container: Container, force: bool, -) -> SyftError | None: +) -> None: try: # stop the container container.stop() @@ -315,8 +272,8 @@ def _stop_worker_container( _remove_worker_container(container, force=force, v=True) return None except Exception as e: - return SyftError( - message=f"Failed to delete worker with id: {worker.id}. Error: {e}" + raise SyftException( + public_message=f"Failed to delete worker with id: {worker.id}. Error: {e}" ) diff --git a/packages/syft/src/syft/service/worker/worker_stash.py b/packages/syft/src/syft/service/worker/worker_stash.py index 77e7dfd281a..11d2d66bacc 100644 --- a/packages/syft/src/syft/service/worker/worker_stash.py +++ b/packages/syft/src/syft/service/worker/worker_stash.py @@ -1,39 +1,29 @@ # stdlib # third party -from result import Err -from result import Ok -from result import Result + +# third party +from sqlalchemy.orm import Session # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable -from ...store.document_store import BaseUIDStoreStash -from ...store.document_store import DocumentStore -from ...store.document_store import PartitionKey -from ...store.document_store import PartitionSettings -from ...store.document_store import QueryKeys +from ...server.credentials import SyftVerifyKey +from ...store.db.stash import ObjectStash +from ...store.db.stash import with_session +from ...store.document_store_errors import NotFoundException +from ...store.document_store_errors import StashException +from ...types.result import as_result from ...types.uid import UID -from ...util.telemetry import instrument from ..action.action_permissions import ActionObjectPermission from ..action.action_permissions import ActionPermission from .worker_pool import ConsumerState from .worker_pool import SyftWorker -WorkerContainerNamePartitionKey = PartitionKey(key="container_name", type_=str) - - -@instrument -@serializable() -class WorkerStash(BaseUIDStoreStash): - object_type = SyftWorker - settings: PartitionSettings = PartitionSettings( - name=SyftWorker.__canonical_name__, object_type=SyftWorker - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) +@serializable(canonical_name="WorkerSQLStash", version=1) +class WorkerStash(ObjectStash[SyftWorker]): + @as_result(StashException) + @with_session def set( self, credentials: SyftVerifyKey, @@ -41,41 +31,31 @@ def set( add_permissions: list[ActionObjectPermission] | None = None, add_storage_permission: bool = True, ignore_duplicates: bool = False, - ) -> Result[SyftWorker, str]: + session: Session = None, + skip_check_type: bool = False, + ) -> SyftWorker: # By default all worker pools have all read permission add_permissions = [] if add_permissions is None else add_permissions add_permissions.append( ActionObjectPermission(uid=obj.id, permission=ActionPermission.ALL_READ) ) - return super().set( - credentials, - obj, - add_permissions=add_permissions, - ignore_duplicates=ignore_duplicates, - add_storage_permission=add_storage_permission, + return ( + super() + .set( + credentials, + obj, + add_permissions=add_permissions, + ignore_duplicates=ignore_duplicates, + add_storage_permission=add_storage_permission, + session=session, + ) + .unwrap() ) - def get_worker_by_name( - self, credentials: SyftVerifyKey, worker_name: str - ) -> Result[SyftWorker | None, str]: - qks = QueryKeys(qks=[WorkerContainerNamePartitionKey.with_obj(worker_name)]) - return self.query_one(credentials=credentials, qks=qks) - + @as_result(StashException, NotFoundException) def update_consumer_state( self, credentials: SyftVerifyKey, worker_uid: UID, consumer_state: ConsumerState - ) -> Result[str, str]: - res = self.get_by_uid(credentials=credentials, uid=worker_uid) - if res.is_err(): - return Err( - f"Failed to retrieve Worker with id: {worker_uid}. Error: {res.err()}" - ) - worker: SyftWorker | None = res.ok() - if worker is None: - return Err(f"Worker with id: {worker_uid} not found") + ) -> SyftWorker: + worker = self.get_by_uid(credentials=credentials, uid=worker_uid).unwrap() worker.consumer_state = consumer_state - update_res = self.update(credentials=credentials, obj=worker) - if update_res.is_err(): - return Err( - f"Failed to update Worker with id: {worker_uid}. Error: {update_res.err()}" - ) - return Ok(f"Successfully updated Worker with id: {worker_uid}") + return self.update(credentials=credentials, obj=worker).unwrap() diff --git a/packages/syft/src/syft/stable_version.py b/packages/syft/src/syft/stable_version.py index 6ab7dba0f59..56baa2221d3 100644 --- a/packages/syft/src/syft/stable_version.py +++ b/packages/syft/src/syft/stable_version.py @@ -1 +1 @@ -LATEST_STABLE_SYFT = "0.8.5" +LATEST_STABLE_SYFT = "0.9.5" diff --git a/packages/syft/src/syft/store/__init__.py b/packages/syft/src/syft/store/__init__.py index 2369be33ea4..42ff4bbd825 100644 --- a/packages/syft/src/syft/store/__init__.py +++ b/packages/syft/src/syft/store/__init__.py @@ -1,3 +1,3 @@ # relative -from .mongo_document_store import MongoDict # noqa: F401 -from .mongo_document_store import MongoStoreConfig # noqa: F401 +from . import mongo_document_store # noqa: F401 +from . import sqlite_document_store # noqa: F401 diff --git a/packages/syft/src/syft/store/blob_storage/__init__.py b/packages/syft/src/syft/store/blob_storage/__init__.py index 9dab53341a3..f32bcf61e8d 100644 --- a/packages/syft/src/syft/store/blob_storage/__init__.py +++ b/packages/syft/src/syft/store/blob_storage/__init__.py @@ -41,8 +41,10 @@ """ # stdlib +from collections.abc import Callable from collections.abc import Generator from io import BytesIO +import logging from typing import Any # third party @@ -53,7 +55,6 @@ # relative from ...serde.deserialize import _deserialize as deserialize from ...serde.serializable import serializable -from ...service.response import SyftError from ...service.response import SyftSuccess from ...types.base import SyftBaseModel from ...types.blob_storage import BlobFile @@ -62,13 +63,18 @@ from ...types.blob_storage import CreateBlobStorageEntry from ...types.blob_storage import DEFAULT_CHUNK_SIZE from ...types.blob_storage import SecureFilePathLocation -from ...types.grid_url import GridURL -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_3 -from ...types.syft_object import SYFT_OBJECT_VERSION_4 +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.server_url import ServerURL +from ...types.syft_migration import migrate +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SyftObject +from ...types.transforms import drop +from ...types.transforms import make_set_default from ...types.uid import UID +logger = logging.getLogger(__name__) + DEFAULT_TIMEOUT = 10 MAX_RETRIES = 20 @@ -76,7 +82,7 @@ @serializable() class BlobRetrieval(SyftObject): __canonical_name__ = "BlobRetrieval" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 type_: type | None = None file_name: str @@ -87,7 +93,7 @@ class BlobRetrieval(SyftObject): @serializable() class SyftObjectRetrieval(BlobRetrieval): __canonical_name__ = "SyftObjectRetrieval" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_1 syft_object: bytes @@ -106,21 +112,21 @@ def _read_data( else: return res - def read(self, _deserialize: bool = True) -> SyftObject | SyftError: + def read(self, _deserialize: bool = True) -> SyftObject: return self._read_data(_deserialize=_deserialize) def syft_iter_content( - blob_url: str | GridURL, + blob_url: str | ServerURL, chunk_size: int, max_retries: int = MAX_RETRIES, timeout: int = DEFAULT_TIMEOUT, ) -> Generator: - """custom iter content with smart retries (start from last byte read)""" + """Custom iter content with smart retries (start from last byte read)""" current_byte = 0 for attempt in range(max_retries): + headers = {"Range": f"bytes={current_byte}-"} try: - headers = {"Range": f"bytes={current_byte}-"} with requests.get( str(blob_url), stream=True, headers=headers, timeout=(timeout, timeout) ) as response: @@ -130,31 +136,31 @@ def syft_iter_content( ): current_byte += len(chunk) yield chunk - return - + return # If successful, exit the function except requests.exceptions.RequestException as e: if attempt < max_retries: - print( + logger.debug( f"Attempt {attempt}/{max_retries} failed: {e} at byte {current_byte}. Retrying..." ) else: - print(f"Max retries reached. Failed with error: {e}") - raise + logger.error(f"Max retries reached - {e}") + raise SyftException(public_message=f"Max retries reached - {e}") @serializable() class BlobRetrievalByURL(BlobRetrieval): __canonical_name__ = "BlobRetrievalByURL" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_1 - url: GridURL | str + url: ServerURL | str + proxy_server_uid: UID | None = None - def read(self) -> SyftObject | SyftError: + def read(self) -> SyftObject: if self.type_ is BlobFileType: return BlobFile( file_name=self.file_name, syft_client_verify_key=self.syft_client_verify_key, - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_blob_storage_entry_id=self.syft_blob_storage_entry_id, file_size=self.file_size, ) @@ -171,44 +177,55 @@ def _read_data( # relative from ...client.api import APIRegistry - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) - if api and api.connection and isinstance(self.url, GridURL): - blob_url = api.connection.to_blob_route( - self.url.url_path, host=self.url.host_or_ip - ) + api = self.get_api_wrapped() + + if api.is_ok() and api.unwrap().connection and isinstance(self.url, ServerURL): + api = api.unwrap() + if self.proxy_server_uid is None: + blob_url = api.connection.to_blob_route( # type: ignore [union-attr] + self.url.url_path, host=self.url.host_or_ip + ) + else: + blob_url = api.connection.stream_via( # type: ignore [union-attr] + self.proxy_server_uid, self.url.url_path + ) + stream = True else: blob_url = self.url + try: - if self.type_ is BlobFileType: - if stream: - return syft_iter_content(blob_url, chunk_size) - else: - response = requests.get(str(blob_url), stream=False) # nosec - response.raise_for_status() - return response.content - else: - response = requests.get(str(blob_url), stream=stream) # nosec - response.raise_for_status() - return deserialize(response.content, from_bytes=True) + is_blob_file = self.type_ is not None and issubclass( + self.type_, BlobFileType + ) + if is_blob_file and stream: + return syft_iter_content(blob_url, chunk_size) + + response = requests.get(str(blob_url), stream=stream) # nosec + resp_content = response.content + response.raise_for_status() + + return ( + resp_content + if is_blob_file + else deserialize(resp_content, from_bytes=True) + ) except requests.RequestException as e: - return SyftError(message=f"Failed to retrieve with Error: {e}") + raise SyftException(public_message=f"Failed to retrieve with error: {e}") @serializable() class BlobDeposit(SyftObject): __canonical_name__ = "BlobDeposit" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 blob_storage_entry_id: UID - def write(self, data: BytesIO) -> SyftSuccess | SyftError: + @as_result(SyftException) + def write(self, data: BytesIO) -> SyftSuccess: raise NotImplementedError -@serializable() +@serializable(canonical_name="BlobStorageClientConfig", version=1) class BlobStorageClientConfig(BaseModel): pass @@ -223,9 +240,7 @@ def __exit__(self, *exc: Any) -> None: def read(self, fp: SecureFilePathLocation, type_: type | None) -> BlobRetrieval: raise NotImplementedError - def allocate( - self, obj: CreateBlobStorageEntry - ) -> SecureFilePathLocation | SyftError: + def allocate(self, obj: CreateBlobStorageEntry) -> SecureFilePathLocation: raise NotImplementedError def write(self, obj: BlobStorageEntry) -> BlobDeposit: @@ -235,7 +250,7 @@ def delete(self, fp: SecureFilePathLocation) -> bool: raise NotImplementedError -@serializable() +@serializable(canonical_name="BlobStorageClient", version=1) class BlobStorageClient(SyftBaseModel): config: BlobStorageClientConfig @@ -243,7 +258,8 @@ def connect(self) -> BlobStorageConnection: raise NotImplementedError -@serializable() +@serializable(canonical_name="BlobStorageConfig", version=1) class BlobStorageConfig(SyftBaseModel): client_type: type[BlobStorageClient] client_config: BlobStorageClientConfig + min_blob_size: int = 0 # in MB diff --git a/packages/syft/src/syft/store/blob_storage/on_disk.py b/packages/syft/src/syft/store/blob_storage/on_disk.py index 4369b46db4f..33990042c43 100644 --- a/packages/syft/src/syft/store/blob_storage/on_disk.py +++ b/packages/syft/src/syft/store/blob_storage/on_disk.py @@ -15,30 +15,32 @@ from . import BlobStorageConnection from . import SyftObjectRetrieval from ...serde.serializable import serializable -from ...service.response import SyftError from ...service.response import SyftSuccess from ...types.blob_storage import BlobStorageEntry from ...types.blob_storage import CreateBlobStorageEntry from ...types.blob_storage import SecureFilePathLocation -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_object import SYFT_OBJECT_VERSION_1 @serializable() class OnDiskBlobDeposit(BlobDeposit): __canonical_name__ = "OnDiskBlobDeposit" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - def write(self, data: BytesIO) -> SyftSuccess | SyftError: + @as_result(SyftException) + def write(self, data: BytesIO) -> SyftSuccess: # relative from ...service.service import from_api_or_context write_to_disk_method = from_api_or_context( func_or_path="blob_storage.write_to_disk", - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) if write_to_disk_method is None: - return SyftError(message="write_to_disk_method is None") + raise SyftException(public_message="write_to_disk_method is None") return write_to_disk_method(data=data.read(), uid=self.blob_storage_entry_id) @@ -64,33 +66,31 @@ def read( type_=type_, ) - def allocate( - self, obj: CreateBlobStorageEntry - ) -> SecureFilePathLocation | SyftError: + def allocate(self, obj: CreateBlobStorageEntry) -> SecureFilePathLocation: try: return SecureFilePathLocation( path=str((self._base_directory / obj.file_name).absolute()) ) except Exception as e: - return SyftError(message=f"Failed to allocate: {e}") + raise SyftException(public_message=f"Failed to allocate: {e}") def write(self, obj: BlobStorageEntry) -> BlobDeposit: return OnDiskBlobDeposit(blob_storage_entry_id=obj.id) - def delete(self, fp: SecureFilePathLocation) -> SyftSuccess | SyftError: + def delete(self, fp: SecureFilePathLocation) -> SyftSuccess: try: (self._base_directory / fp.path).unlink() return SyftSuccess(message="Successfully deleted file.") except FileNotFoundError as e: - return SyftError(message=f"Failed to delete file: {e}") + raise SyftException(public_message=f"Failed to delete file: {e}") -@serializable() +@serializable(canonical_name="OnDiskBlobStorageClientConfig", version=1) class OnDiskBlobStorageClientConfig(BlobStorageClientConfig): base_directory: Path -@serializable() +@serializable(canonical_name="OnDiskBlobStorageClient", version=1) class OnDiskBlobStorageClient(BlobStorageClient): config: OnDiskBlobStorageClientConfig @@ -102,7 +102,7 @@ def connect(self) -> BlobStorageConnection: return OnDiskBlobStorageConnection(self.config.base_directory) -@serializable() +@serializable(canonical_name="OnDiskBlobStorageConfig", version=1) class OnDiskBlobStorageConfig(BlobStorageConfig): client_type: type[BlobStorageClient] = OnDiskBlobStorageClient client_config: OnDiskBlobStorageClientConfig diff --git a/packages/syft/src/syft/store/blob_storage/seaweedfs.py b/packages/syft/src/syft/store/blob_storage/seaweedfs.py index 6254c03811e..99ae35e6cea 100644 --- a/packages/syft/src/syft/store/blob_storage/seaweedfs.py +++ b/packages/syft/src/syft/store/blob_storage/seaweedfs.py @@ -1,6 +1,7 @@ # stdlib from collections.abc import Generator from io import BytesIO +import logging import math from queue import Queue import threading @@ -11,7 +12,12 @@ from botocore.client import BaseClient as S3BaseClient from botocore.client import ClientError as BotoClientError from botocore.client import Config +from botocore.exceptions import ConnectionError import requests +from tenacity import retry +from tenacity import retry_if_exception_type +from tenacity import stop_after_delay +from tenacity import wait_fixed from tqdm import tqdm from typing_extensions import Self @@ -24,38 +30,43 @@ from . import BlobStorageConnection from ...serde.serializable import serializable from ...service.blob_storage.remote_profile import AzureRemoteProfile -from ...service.response import SyftError from ...service.response import SyftSuccess from ...service.service import from_api_or_context from ...types.blob_storage import BlobStorageEntry from ...types.blob_storage import CreateBlobStorageEntry from ...types.blob_storage import SeaweedSecureFilePathLocation from ...types.blob_storage import SecureFilePathLocation -from ...types.grid_url import GridURL -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.server_url import ServerURL +from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.uid import UID from ...util.constants import DEFAULT_TIMEOUT +from ...util.telemetry import instrument_botocore +MAX_QUEUE_SIZE = 100 WRITE_EXPIRATION_TIME = 900 # seconds -DEFAULT_FILE_PART_SIZE = (1024**3) * 5 # 5GB -DEFAULT_UPLOAD_CHUNK_SIZE = 819200 +DEFAULT_FILE_PART_SIZE = 1024**3 # 1GB +DEFAULT_UPLOAD_CHUNK_SIZE = 1024 * 800 # 800KB + +logger = logging.getLogger(__name__) + +instrument_botocore() @serializable() class SeaweedFSBlobDeposit(BlobDeposit): __canonical_name__ = "SeaweedFSBlobDeposit" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 - urls: list[GridURL] + urls: list[ServerURL] size: int + proxy_server_uid: UID | None = None - def write(self, data: BytesIO) -> SyftSuccess | SyftError: + @as_result(SyftException) + def write(self, data: BytesIO) -> SyftSuccess: # relative - from ...client.api import APIRegistry - - api = APIRegistry.api_for( - node_uid=self.syft_node_location, - user_verify_key=self.syft_client_verify_key, - ) + api = self.get_api_wrapped() etags = [] @@ -74,15 +85,22 @@ def write(self, data: BytesIO) -> SyftSuccess | SyftError: with tqdm( total=total_iterations, desc=f"Uploading progress", # noqa + colour="green", ) as pbar: for part_no, url in enumerate( self.urls, start=1, ): - if api is not None and api.connection is not None: - blob_url = api.connection.to_blob_route( - url.url_path, host=url.host_or_ip - ) + if api.is_ok() and api.unwrap().connection is not None: + api = api.unwrap() + if self.proxy_server_uid is None: + blob_url = api.connection.to_blob_route( # type: ignore [union-attr] + url.url_path, host=url.host_or_ip + ) + else: + blob_url = api.connection.stream_via( # type: ignore [union-attr] + self.proxy_server_uid, url.url_path + ) else: blob_url = url @@ -94,7 +112,7 @@ def __init__(self) -> None: def async_generator( self, chunk_size: int = DEFAULT_UPLOAD_CHUNK_SIZE ) -> Generator: - item_queue: Queue = Queue() + item_queue: Queue = Queue(maxsize=MAX_QUEUE_SIZE) threading.Thread( target=self.add_chunks_to_queue, kwargs={"queue": item_queue, "chunk_size": chunk_size}, @@ -114,16 +132,17 @@ def add_chunks_to_queue( """Creates a data geneator for the part""" n = 0 - while n * chunk_size <= part_size: - try: + try: + while n * chunk_size <= part_size: chunk = data.read(chunk_size) + if not chunk: + break self.no_lines += chunk.count(b"\n") n += 1 queue.put(chunk) - except BlockingIOError: - # if end of file, stop - queue.put(0) - # if end of part, stop + except BlockingIOError: + pass + # if end of file or part, stop queue.put(0) gen = PartGenerator() @@ -141,22 +160,23 @@ def add_chunks_to_queue( etags.append({"ETag": etag, "PartNumber": part_no}) except requests.RequestException as e: - print(e) - return SyftError(message=str(e)) + raise SyftException( + public_message=f"Failed to upload file to SeaweedFS - {e}" + ) mark_write_complete_method = from_api_or_context( func_or_path="blob_storage.mark_write_complete", - syft_node_location=self.syft_node_location, + syft_server_location=self.syft_server_location, syft_client_verify_key=self.syft_client_verify_key, ) if mark_write_complete_method is None: - return SyftError(message="mark_write_complete_method is None") + raise SyftException(public_message="mark_write_complete_method is None") return mark_write_complete_method( etags=etags, uid=self.blob_storage_entry_id, no_lines=no_lines ) -@serializable() +@serializable(canonical_name="SeaweedFSClientConfig", version=1) class SeaweedFSClientConfig(BlobStorageClientConfig): host: str port: int @@ -169,8 +189,8 @@ class SeaweedFSClientConfig(BlobStorageClientConfig): @property def endpoint_url(self) -> str: - grid_url = GridURL(host_or_ip=self.host, port=self.port) - return grid_url.url + server_url = ServerURL(host_or_ip=self.host, port=self.port) + return server_url.url @property def mount_url(self) -> str: @@ -179,7 +199,7 @@ def mount_url(self) -> str: return f"http://{self.host}:{self.mount_port}/configure_azure" -@serializable() +@serializable(canonical_name="SeaweedFSClient", version=1) class SeaweedFSClient(BlobStorageClient): config: SeaweedFSClientConfig @@ -198,7 +218,7 @@ def connect(self) -> BlobStorageConnection: ) -@serializable() +@serializable(canonical_name="SeaweedFSConnection", version=1) class SeaweedFSConnection(BlobStorageConnection): client: S3BaseClient default_bucket_name: str @@ -214,12 +234,22 @@ def __init__( self.default_bucket_name = default_bucket_name self.config = config + self._check_connection() + def __enter__(self) -> Self: return self def __exit__(self, *exc: Any) -> None: self.client.close() + @retry( + wait=wait_fixed(5), + stop=stop_after_delay(60), + retry=retry_if_exception_type(ConnectionError), + ) + def _check_connection(self) -> dict: + return self.client.list_buckets() + def read( self, fp: SecureFilePathLocation, @@ -232,9 +262,7 @@ def read( # that decides whether to use a direct connection to azure/aws/gcp or via seaweed return fp.generate_url(self, type_, bucket_name) - def allocate( - self, obj: CreateBlobStorageEntry - ) -> SecureFilePathLocation | SyftError: + def allocate(self, obj: CreateBlobStorageEntry) -> SecureFilePathLocation: try: file_name = obj.file_name result = self.client.create_multipart_upload( @@ -244,15 +272,15 @@ def allocate( upload_id = result["UploadId"] return SeaweedSecureFilePathLocation(upload_id=upload_id, path=file_name) except BotoClientError as e: - return SyftError( - message=f"Failed to allocate space for {obj} with error: {e}" + raise SyftException( + public_message=f"Failed to allocate space for {obj} with error: {e}" ) def write(self, obj: BlobStorageEntry) -> BlobDeposit: total_parts = math.ceil(obj.file_size / DEFAULT_FILE_PART_SIZE) urls = [ - GridURL.from_url( + ServerURL.from_url( self.client.generate_presigned_url( ClientMethod="upload_part", Params={ @@ -274,7 +302,7 @@ def complete_multipart_upload( self, blob_entry: BlobStorageEntry, etags: list, - ) -> SyftError | SyftSuccess: + ) -> SyftSuccess: try: self.client.complete_multipart_upload( Bucket=self.default_bucket_name, @@ -284,20 +312,20 @@ def complete_multipart_upload( ) return SyftSuccess(message="Successfully saved file.") except BotoClientError as e: - return SyftError(message=str(e)) + raise SyftException(public_message=str(e)) def delete( self, fp: SecureFilePathLocation, - ) -> SyftSuccess | SyftError: + ) -> SyftSuccess: try: self.client.delete_object(Bucket=self.default_bucket_name, Key=fp.path) return SyftSuccess(message="Successfully deleted file.") except BotoClientError as e: - return SyftError(message=str(e)) + raise SyftException(public_message=str(e)) -@serializable() +@serializable(canonical_name="SeaweedFSConfig", version=1) class SeaweedFSConfig(BlobStorageConfig): client_type: type[BlobStorageClient] = SeaweedFSClient client_config: SeaweedFSClientConfig diff --git a/packages/syft/src/syft/store/db/__init__.py b/packages/syft/src/syft/store/db/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/store/db/db.py b/packages/syft/src/syft/store/db/db.py new file mode 100644 index 00000000000..5de906c3a6a --- /dev/null +++ b/packages/syft/src/syft/store/db/db.py @@ -0,0 +1,85 @@ +# stdlib +import logging +from pathlib import Path +from typing import Generic +from typing import TypeVar +from urllib.parse import urlparse + +# third party +from pydantic import BaseModel +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +# relative +from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...types.uid import UID +from ...util.telemetry import instrument_sqlalchemny +from .schema import PostgresBase +from .schema import SQLiteBase + +logger = logging.getLogger(__name__) +instrument_sqlalchemny() + + +@serializable(canonical_name="DBConfig", version=1) +class DBConfig(BaseModel): + @property + def connection_string(self) -> str: + raise NotImplementedError("Subclasses must implement this method.") + + @classmethod + def from_connection_string(cls, conn_str: str) -> "DBConfig": + # relative + from .postgres import PostgresDBConfig + from .sqlite import SQLiteDBConfig + + parsed = urlparse(conn_str) + if parsed.scheme == "postgresql": + return PostgresDBConfig( + host=parsed.hostname, + port=parsed.port, + user=parsed.username, + password=parsed.password, + database=parsed.path.lstrip("/"), + ) + elif parsed.scheme == "sqlite": + path = Path(parsed.path) + return SQLiteDBConfig(path=path.parent, filename=path.name) + else: + raise ValueError(f"Unsupported database scheme: {parsed.scheme}") + + +ConfigT = TypeVar("ConfigT", bound=DBConfig) + + +class DBManager(Generic[ConfigT]): + def __init__( + self, + config: ConfigT, + server_uid: UID, + root_verify_key: SyftVerifyKey, + ) -> None: + self.config = config + self.root_verify_key = root_verify_key + self.server_uid = server_uid + self.engine = create_engine( + config.connection_string, + # json_serializer=dumps, + # json_deserializer=loads, + ) + logger.info(f"Connecting to {config.connection_string}") + self.sessionmaker = sessionmaker(bind=self.engine) + self.update_settings() + logger.info(f"Successfully connected to {config.connection_string}") + + def update_settings(self) -> None: + pass + + def init_tables(self, reset: bool = False) -> None: + Base = SQLiteBase if self.engine.dialect.name == "sqlite" else PostgresBase + + with self.sessionmaker().begin() as _: + if reset: + Base.metadata.drop_all(bind=self.engine) + Base.metadata.create_all(self.engine) diff --git a/packages/syft/src/syft/store/db/errors.py b/packages/syft/src/syft/store/db/errors.py new file mode 100644 index 00000000000..8f9a4ca048a --- /dev/null +++ b/packages/syft/src/syft/store/db/errors.py @@ -0,0 +1,31 @@ +# stdlib +import logging + +# third party +from sqlalchemy.exc import DatabaseError +from typing_extensions import Self + +# relative +from ..document_store_errors import StashException + +logger = logging.getLogger(__name__) + + +class StashDBException(StashException): + """ + See https://docs.sqlalchemy.org/en/20/errors.html#databaseerror + + StashDBException converts a SQLAlchemy DatabaseError into a StashException, + DatabaseErrors are errors thrown by the database itself, for example when a + query fails because a table is missing. + """ + + public_message = "There was an error retrieving data. Contact your admin." + + @classmethod + def from_sqlalchemy_error(cls, e: DatabaseError) -> Self: + logger.exception(e) + + error_type = e.__class__.__name__ + private_message = f"{error_type}: {str(e)}" + return cls(private_message=private_message) diff --git a/packages/syft/src/syft/store/db/postgres.py b/packages/syft/src/syft/store/db/postgres.py new file mode 100644 index 00000000000..630155e29de --- /dev/null +++ b/packages/syft/src/syft/store/db/postgres.py @@ -0,0 +1,48 @@ +# third party +from sqlalchemy import URL + +# relative +from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey +from ...types.uid import UID +from .db import DBManager +from .sqlite import DBConfig + + +@serializable(canonical_name="PostgresDBConfig", version=1) +class PostgresDBConfig(DBConfig): + host: str + port: int + user: str + password: str + database: str + + @property + def connection_string(self) -> str: + return URL.create( + "postgresql", + username=self.user, + password=self.password, + host=self.host, + port=self.port, + database=self.database, + ).render_as_string(hide_password=False) + + +class PostgresDBManager(DBManager[PostgresDBConfig]): + def update_settings(self) -> None: + return super().update_settings() + + @classmethod + def random( + cls: type, + *, + config: PostgresDBConfig, + server_uid: UID | None = None, + root_verify_key: SyftVerifyKey | None = None, + ) -> "PostgresDBManager": + root_verify_key = root_verify_key or SyftVerifyKey.generate() + server_uid = server_uid or UID() + return PostgresDBManager( + config=config, server_uid=server_uid, root_verify_key=root_verify_key + ) diff --git a/packages/syft/src/syft/store/db/query.py b/packages/syft/src/syft/store/db/query.py new file mode 100644 index 00000000000..04864a4a74a --- /dev/null +++ b/packages/syft/src/syft/store/db/query.py @@ -0,0 +1,383 @@ +# stdlib +from abc import ABC +from abc import abstractmethod +import enum +from typing import Any +from typing import Literal + +# third party +import sqlalchemy as sa +from sqlalchemy import Column +from sqlalchemy import Dialect +from sqlalchemy import Result +from sqlalchemy import Select +from sqlalchemy import Table +from sqlalchemy import func +from sqlalchemy.exc import DatabaseError +from sqlalchemy.orm import Session +from typing_extensions import Self + +# relative +from ...serde.json_serde import serialize_json +from ...server.credentials import SyftVerifyKey +from ...service.action.action_permissions import ActionObjectPermission +from ...service.action.action_permissions import ActionPermission +from ...service.user.user_roles import ServiceRole +from ...types.syft_object import SyftObject +from ...types.uid import UID +from .errors import StashDBException +from .schema import PostgresBase +from .schema import SQLiteBase + + +class FilterOperator(enum.Enum): + EQ = "eq" + CONTAINS = "contains" + + +class Query(ABC): + def __init__(self, object_type: type[SyftObject]) -> None: + self.object_type: type = object_type + self.table: Table = self._get_table(object_type) + self.stmt: Select = self.table.select() + + @abstractmethod + def _get_table(self, object_type: type[SyftObject]) -> Table: + raise NotImplementedError + + @staticmethod + def get_query_class(dialect: str | Dialect) -> "type[Query]": + if isinstance(dialect, Dialect): + dialect = dialect.name + + if dialect == "sqlite": + return SQLiteQuery + elif dialect == "postgresql": + return PostgresQuery + else: + raise ValueError(f"Unsupported dialect {dialect}") + + @classmethod + def create(cls, object_type: type[SyftObject], dialect: str | Dialect) -> "Query": + """Create a query object for the given object type and dialect.""" + query_class = cls.get_query_class(dialect) + return query_class(object_type) + + def execute(self, session: Session) -> Result: + """Execute the query using the given session.""" + try: + return session.execute(self.stmt) + except DatabaseError as e: + raise StashDBException.from_sqlalchemy_error(e) from e + + def with_permissions( + self, + credentials: SyftVerifyKey, + role: ServiceRole, + permission: ActionPermission = ActionPermission.READ, + ) -> Self: + """Add a permission check to the query. + + If the user has a role below DATA_OWNER, the query will be filtered to only include objects + that the user has the specified permission on. + + Args: + credentials (SyftVerifyKey): user verify key + role (ServiceRole): role of the user + permission (ActionPermission, optional): Type of permission to check for. + Defaults to ActionPermission.READ. + + Returns: + Self: The query object with the permission check applied + """ + if role in (ServiceRole.ADMIN, ServiceRole.DATA_OWNER): + return self + + ao_permission = ActionObjectPermission( + uid=UID(), # dummy uid, we just need the permission string + credentials=credentials, + permission=permission, + ) + + permission_clause = self._make_permissions_clause(ao_permission) + self.stmt = self.stmt.where(permission_clause) + + return self + + def filter(self, field: str, operator: str | FilterOperator, value: Any) -> Self: + """Add a filter to the query. + + example usage: + Query(User).filter("name", "eq", "Alice") + Query(User).filter("friends", "contains", "Bob") + + Args: + field (str): Field to filter on + operator (str): Operator to use for the filter + value (Any): Value to filter on + + Raises: + ValueError: If the operator is not supported + + Returns: + Self: The query object with the filter applied + """ + filter = self._create_filter_clause(self.table, field, operator, value) + self.stmt = self.stmt.where(filter) + return self + + def filter_and(self, *filters: tuple[str, str | FilterOperator, Any]) -> Self: + """Add filters to the query using an AND clause. + + example usage: + Query(User).filter_and( + ("name", "eq", "Alice"), + ("age", "eq", 30), + ) + + Args: + field (str): Field to filter on + operator (str): Operator to use for the filter + value (Any): Value to filter on + + Raises: + ValueError: If the operator is not supported + + Returns: + Self: The query object with the filter applied + """ + filter_clauses = [ + self._create_filter_clause(self.table, field, operator, value) + for field, operator, value in filters + ] + + self.stmt = self.stmt.where(sa.and_(*filter_clauses)) + return self + + def filter_or(self, *filters: tuple[str, str | FilterOperator, Any]) -> Self: + """Add filters to the query using an OR clause. + + example usage: + Query(User).filter_or( + ("name", "eq", "Alice"), + ("age", "eq", 30), + ) + + Args: + field (str): Field to filter on + operator (str): Operator to use for the filter + value (Any): Value to filter on + + Raises: + ValueError: If the operator is not supported + + Returns: + Self: The query object with the filter applied + """ + filter_clauses = [ + self._create_filter_clause(self.table, field, operator, value) + for field, operator, value in filters + ] + + self.stmt = self.stmt.where(sa.or_(*filter_clauses)) + return self + + def _create_filter_clause( + self, + table: Table, + field: str, + operator: str | FilterOperator, + value: Any, + ) -> sa.sql.elements.BinaryExpression: + if isinstance(operator, str): + try: + operator = FilterOperator(operator.lower()) + except ValueError: + raise ValueError(f"Filter operator {operator} not supported") + + if operator == FilterOperator.EQ: + return self._eq_filter(table, field, value) + elif operator == FilterOperator.CONTAINS: + return self._contains_filter(table, field, value) + + def order_by( + self, + field: str | None = None, + order: Literal["asc", "desc"] | None = None, + ) -> Self: + """Add an order by clause to the query, with sensible defaults if field or order is not provided. + + Args: + field (Optional[str]): field to order by. If None, uses the default field. + order (Optional[Literal["asc", "desc"]]): Order to use ("asc" or "desc"). + Defaults to 'asc' if field is provided and order is not, or the default order otherwise. + + Raises: + ValueError: If the order is not "asc" or "desc" + + Returns: + Self: The query object with the order by clause applied. + """ + # Determine the field and order defaults if not provided + if field is None: + if hasattr(self.object_type, "__order_by__"): + default_field, default_order = self.object_type.__order_by__ + else: + default_field, default_order = "_created_at", "desc" + field = default_field + else: + # If field is provided but order is not, default to 'asc' + default_order = "asc" + order = order or default_order + + column = self._get_column(field) + + if isinstance(column.type, sa.JSON): + column = sa.cast(column, sa.String) + + if order.lower() == "asc": + self.stmt = self.stmt.order_by(column.asc()) + + elif order.lower() == "desc": + self.stmt = self.stmt.order_by(column.desc()) + else: + raise ValueError(f"Invalid sort order {order}") + + return self + + def limit(self, limit: int | None) -> Self: + """Add a limit clause to the query.""" + if limit is None: + return self + + if limit < 0: + raise ValueError("Limit must be a positive integer") + self.stmt = self.stmt.limit(limit) + + return self + + def offset(self, offset: int) -> Self: + """Add an offset clause to the query.""" + if offset < 0: + raise ValueError("Offset must be a positive integer") + + self.stmt = self.stmt.offset(offset) + return self + + @abstractmethod + def _make_permissions_clause( + self, + permission: ActionObjectPermission, + ) -> sa.sql.elements.BinaryExpression: + pass + + @abstractmethod + def _contains_filter( + self, + table: Table, + field: str, + value: Any, + ) -> sa.sql.elements.BinaryExpression: + pass + + def _get_column(self, column: str) -> Column: + if column == "id": + return self.table.c.id + if column == "created_date" or column == "_created_at": + return self.table.c._created_at + elif column == "updated_date" or column == "_updated_at": + return self.table.c._updated_at + elif column == "deleted_date" or column == "_deleted_at": + return self.table.c._deleted_at + + return self.table.c.fields[column] + + +class SQLiteQuery(Query): + def _make_permissions_clause( + self, + permission: ActionObjectPermission, + ) -> sa.sql.elements.BinaryExpression: + permission_string = permission.permission_string + compound_permission_string = permission.compound_permission_string + return sa.or_( + self.table.c.permissions.contains(permission_string), + self.table.c.permissions.contains(compound_permission_string), + ) + + def _get_table(self, object_type: type[SyftObject]) -> Table: + cname = object_type.__canonical_name__ + if cname not in SQLiteBase.metadata.tables: + raise ValueError(f"Table for {cname} not found") + return SQLiteBase.metadata.tables[cname] + + def _contains_filter( + self, + table: Table, + field: str, + value: Any, + ) -> sa.sql.elements.BinaryExpression: + field_value = serialize_json(value) + return table.c.fields[field].contains(func.json_quote(field_value)) + + def _eq_filter( + self, + table: Table, + field: str, + value: Any, + ) -> sa.sql.elements.BinaryExpression: + if field == "id": + return table.c.id == UID(value) + + if "." in field: + # magic! + field = field.split(".") # type: ignore + + json_value = serialize_json(value) + return table.c.fields[field] == func.json_quote(json_value) + + +class PostgresQuery(Query): + def _make_permissions_clause( + self, permission: ActionObjectPermission + ) -> sa.sql.elements.BinaryExpression: + permission_string = [permission.permission_string] + compound_permission_string = [permission.compound_permission_string] + return sa.or_( + self.table.c.permissions.contains(permission_string), + self.table.c.permissions.contains(compound_permission_string), + ) + + def _contains_filter( + self, + table: Table, + field: str, + value: Any, + ) -> sa.sql.elements.BinaryExpression: + field_value = serialize_json(value) + col = sa.cast(table.c.fields[field], sa.Text) + val = sa.cast(field_value, sa.Text) + return col.contains(val) + + def _get_table(self, object_type: type[SyftObject]) -> Table: + cname = object_type.__canonical_name__ + if cname not in PostgresBase.metadata.tables: + raise ValueError(f"Table for {cname} not found") + return PostgresBase.metadata.tables[cname] + + def _eq_filter( + self, + table: Table, + field: str, + value: Any, + ) -> sa.sql.elements.BinaryExpression: + if field == "id": + return table.c.id == UID(value) + + if "." in field: + # magic! + field = field.split(".") # type: ignore + + json_value = serialize_json(value) + # NOTE: there might be a bug with casting everything to text + return table.c.fields[field].astext == sa.cast(json_value, sa.Text) diff --git a/packages/syft/src/syft/store/db/schema.py b/packages/syft/src/syft/store/db/schema.py new file mode 100644 index 00000000000..7f81e39802e --- /dev/null +++ b/packages/syft/src/syft/store/db/schema.py @@ -0,0 +1,87 @@ +# stdlib + +# stdlib +import uuid + +# third party +import sqlalchemy as sa +from sqlalchemy import Column +from sqlalchemy import Dialect +from sqlalchemy import Table +from sqlalchemy import TypeDecorator +from sqlalchemy.dialects import postgresql +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.types import JSON + +# relative +from ...types.syft_object import SyftObject +from ...types.uid import UID + + +class SQLiteBase(DeclarativeBase): + pass + + +class PostgresBase(DeclarativeBase): + pass + + +class UIDTypeDecorator(TypeDecorator): + """Converts between Syft UID and UUID.""" + + impl = sa.UUID + cache_ok = True + + def process_bind_param(self, value, dialect): # type: ignore + if value is not None: + return value.value + + def process_result_value(self, value, dialect): # type: ignore + if value is not None: + return UID(value) + + +def create_table( + object_type: type[SyftObject], + dialect: Dialect, +) -> Table: + """Create a table for a given SYftObject type, and add it to the metadata. + + To create the table on the database, you must call `Base.metadata.create_all(engine)`. + + Args: + object_type (type[SyftObject]): The type of the object to create a table for. + dialect (Dialect): The dialect of the database. + + Returns: + Table: The created table. + """ + table_name = object_type.__canonical_name__ + dialect_name = dialect.name + + fields_type = JSON if dialect_name == "sqlite" else postgresql.JSON + permissions_type = JSON if dialect_name == "sqlite" else postgresql.JSONB + storage_permissions_type = JSON if dialect_name == "sqlite" else postgresql.JSONB + + Base = SQLiteBase if dialect_name == "sqlite" else PostgresBase + + if table_name not in Base.metadata.tables: + Table( + object_type.__canonical_name__, + Base.metadata, + Column("id", UIDTypeDecorator, primary_key=True, default=uuid.uuid4), + Column("fields", fields_type, default={}), + Column("permissions", permissions_type, default=[]), + Column( + "storage_permissions", + storage_permissions_type, + default=[], + ), + Column( + "_created_at", sa.DateTime, server_default=sa.func.now(), index=True + ), + Column("_updated_at", sa.DateTime, server_onupdate=sa.func.now()), + Column("_deleted_at", sa.DateTime, index=True), + ) + + return Base.metadata.tables[table_name] diff --git a/packages/syft/src/syft/store/db/sqlite.py b/packages/syft/src/syft/store/db/sqlite.py new file mode 100644 index 00000000000..fbcf87ce47b --- /dev/null +++ b/packages/syft/src/syft/store/db/sqlite.py @@ -0,0 +1,61 @@ +# stdlib +from pathlib import Path +import tempfile +import uuid + +# third party +from pydantic import Field +import sqlalchemy as sa + +# relative +from ...serde.serializable import serializable +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey +from ...types.uid import UID +from .db import DBConfig +from .db import DBManager + + +@serializable(canonical_name="SQLiteDBConfig", version=1) +class SQLiteDBConfig(DBConfig): + filename: str = Field(default_factory=lambda: f"{uuid.uuid4()}.db") + path: Path = Field(default_factory=lambda: Path(tempfile.gettempdir())) + + @property + def connection_string(self) -> str: + """ + NOTE in-memory sqlite is not shared between connections, so: + - using 2 workers (high/low) will not share a db + - re-using a connection (e.g. for a Job worker) will not share a db + """ + if self.path == Path("."): + # Use in-memory database, only for unittests + return "sqlite://" + filepath = self.path / self.filename + return f"sqlite:///{filepath.resolve()}" + + +class SQLiteDBManager(DBManager[SQLiteDBConfig]): + def update_settings(self) -> None: + connection = self.engine.connect() + connection.execute(sa.text("PRAGMA journal_mode = WAL")) + connection.execute(sa.text("PRAGMA busy_timeout = 5000")) + connection.execute(sa.text("PRAGMA temp_store = 2")) + connection.execute(sa.text("PRAGMA synchronous = 1")) + + @classmethod + def random( + cls, + *, + config: SQLiteDBConfig | None = None, + server_uid: UID | None = None, + root_verify_key: SyftVerifyKey | None = None, + ) -> "SQLiteDBManager": + root_verify_key = root_verify_key or SyftSigningKey.generate().verify_key + server_uid = server_uid or UID() + config = config or SQLiteDBConfig() + return SQLiteDBManager( + config=config, + server_uid=server_uid, + root_verify_key=root_verify_key, + ) diff --git a/packages/syft/src/syft/store/db/stash.py b/packages/syft/src/syft/store/db/stash.py new file mode 100644 index 00000000000..f2083a1a424 --- /dev/null +++ b/packages/syft/src/syft/store/db/stash.py @@ -0,0 +1,878 @@ +# stdlib +from collections.abc import Callable +from functools import wraps +import inspect +from typing import Any +from typing import Generic +from typing import ParamSpec +from typing import Set # noqa: UP035 +from typing import cast +from typing import get_args + +# third party +from pydantic import ValidationError +import sqlalchemy as sa +from sqlalchemy import Row +from sqlalchemy import Table +from sqlalchemy import func +from sqlalchemy import select +from sqlalchemy.orm import Session +from typing_extensions import Self +from typing_extensions import TypeVar + +# relative +from ...serde.json_serde import deserialize_json +from ...serde.json_serde import is_json_primitive +from ...serde.json_serde import serialize_json +from ...server.credentials import SyftVerifyKey +from ...service.action.action_permissions import ActionObjectEXECUTE +from ...service.action.action_permissions import ActionObjectOWNER +from ...service.action.action_permissions import ActionObjectPermission +from ...service.action.action_permissions import ActionObjectREAD +from ...service.action.action_permissions import ActionObjectWRITE +from ...service.action.action_permissions import ActionPermission +from ...service.action.action_permissions import StoragePermission +from ...service.user.user_roles import ServiceRole +from ...types.errors import SyftException +from ...types.result import as_result +from ...types.syft_metaclass import Empty +from ...types.syft_object import PartialSyftObject +from ...types.syft_object import SyftObject +from ...types.uid import UID +from ...util.telemetry import instrument +from ..document_store_errors import NotFoundException +from ..document_store_errors import StashException +from ..document_store_errors import UniqueConstraintException +from .db import DBManager +from .query import Query +from .schema import PostgresBase +from .schema import SQLiteBase +from .schema import create_table +from .sqlite import SQLiteDBManager + +StashT = TypeVar("StashT", bound=SyftObject) +T = TypeVar("T") +P = ParamSpec("P") + + +def parse_filters(filter_dict: dict[str, Any] | None) -> list[tuple[str, str, Any]]: + # NOTE using django style filters, e.g. {"age__gt": 18} + if filter_dict is None: + return [] + filters = [] + for key, value in filter_dict.items(): + key_split = key.split("__") + # Operator is eq if not specified + if len(key_split) == 1: + field, operator = key, "eq" + elif len(key_split) == 2: + field, operator = key_split + filters.append((field, operator, value)) + return filters + + +def with_session(func: Callable[P, T]) -> Callable[P, T]: # type: ignore + """ + Decorator to inject a session into the function kwargs if it is not provided. + + Make sure to pass session as a keyword argument to the function. + + TODO: This decorator is a temporary fix, we want to move to a DI approach instead: + move db connection and session to context, and pass context to all stash methods. + """ + + # inspect if the function has a session kwarg + sig = inspect.signature(func) + inject_session: bool = "session" in sig.parameters + + @wraps(func) + def wrapper(self: "ObjectStash[StashT]", *args: Any, **kwargs: Any) -> Any: + if inject_session and kwargs.get("session") is None: + with self.sessionmaker() as session: + with session.begin(): + kwargs["session"] = session + return func(self, *args, **kwargs) + return func(self, *args, **kwargs) + + return wrapper # type: ignore + + +@instrument +class ObjectStash(Generic[StashT]): + allow_any_type: bool = False + + def __init__(self, store: DBManager) -> None: + self.db = store + self.object_type = self.get_object_type() + self.table = create_table(self.object_type, self.dialect) + self.sessionmaker: Callable[[], Session] = self.db.sessionmaker + + @property + def dialect(self) -> sa.engine.interfaces.Dialect: + return self.db.engine.dialect + + @classmethod + def get_object_type(cls) -> type[StashT]: + """ + Get the object type this stash is storing. This is the generic argument of the + ObjectStash class. + """ + generic_args = get_args(cls.__orig_bases__[0]) + if len(generic_args) != 1: + raise TypeError("ObjectStash must have a single generic argument") + elif not issubclass(generic_args[0], SyftObject): + raise TypeError( + "ObjectStash generic argument must be a subclass of SyftObject" + ) + return generic_args[0] + + @with_session + def __len__(self, session: Session = None) -> int: + return session.query(self.table).count() + + @classmethod + def random(cls, **kwargs: dict) -> Self: + """Create a random stash with a random server_uid and root_verify_key. Useful for development.""" + db_manager = SQLiteDBManager.random(**kwargs) + stash = cls(store=db_manager) + stash.db.init_tables() + return stash + + def _is_sqlite(self) -> bool: + return self.db.engine.dialect.name == "sqlite" + + @property + def server_uid(self) -> UID: + return self.db.server_uid + + @property + def root_verify_key(self) -> SyftVerifyKey: + return self.db.root_verify_key + + @property + def _data(self) -> list[StashT]: + return self.get_all(self.root_verify_key, has_permission=True).unwrap() + + def query(self, object_type: type[SyftObject] | None = None) -> Query: + """Creates a query for this stash's object type and SQL dialect.""" + object_type = object_type or self.object_type + return Query.create(object_type, self.dialect) + + @as_result(StashException) + def check_type(self, obj: T, type_: type) -> T: + if not isinstance(obj, type_): + raise StashException(f"{type(obj)} does not match required type: {type_}") + return cast(T, obj) + + @property + def session(self) -> Session: + return self.db.session + + def _print_query(self, stmt: sa.sql.select) -> None: + print( + stmt.compile( + compile_kwargs={"literal_binds": True}, + dialect=self.db.engine.dialect, + ) + ) + + @property + def unique_fields(self) -> list[str]: + return getattr(self.object_type, "__attr_unique__", []) + + @with_session + def is_unique(self, obj: StashT, session: Session = None) -> bool: + unique_fields = self.unique_fields + if not unique_fields: + return True + + filters = [] + for field_name in unique_fields: + field_value = getattr(obj, field_name, None) + if not is_json_primitive(field_value): + raise StashException( + f"Cannot check uniqueness of non-primitive field {field_name}" + ) + if field_value is None: + continue + filters.append((field_name, "eq", field_value)) + + query = self.query() + query = query.filter_or( + *filters, + ) + + results = query.execute(session).all() + + if len(results) > 1: + return False + elif len(results) == 1: + result = results[0] + res = result.id == obj.id + return res + return True + + @with_session + def exists( + self, credentials: SyftVerifyKey, uid: UID, session: Session = None + ) -> bool: + # TODO should be @as_result + # TODO needs credentials check? + # TODO use COUNT(*) instead of SELECT + query = self.query().filter("id", "eq", uid) + result = query.execute(session).first() + return result is not None + + @as_result(SyftException, StashException, NotFoundException) + @with_session + def get_by_uid( + self, + credentials: SyftVerifyKey, + uid: UID, + has_permission: bool = False, + session: Session = None, + ) -> StashT: + return self.get_one( + credentials=credentials, + filters={"id": uid}, + has_permission=has_permission, + session=session, + ).unwrap() + + def _get_field_filter( + self, + field_name: str, + field_value: Any, + table: Table | None = None, + ) -> sa.sql.elements.BinaryExpression: + table = table if table is not None else self.table + if field_name == "id": + uid_field_value = UID(field_value) + return table.c.id == uid_field_value + + json_value = serialize_json(field_value) + if self.db.engine.dialect.name == "sqlite": + return table.c.fields[field_name] == func.json_quote(json_value) + elif self.db.engine.dialect.name == "postgresql": + return table.c.fields[field_name].astext == cast(json_value, sa.String) + + @as_result(SyftException, StashException, NotFoundException) + def get_index( + self, credentials: SyftVerifyKey, index: int, has_permission: bool = False + ) -> StashT: + order_by, sort_order = self.object_type.__order_by__ + if index < 0: + index = -1 - index + sort_order = "desc" if sort_order == "asc" else "asc" + + items = self.get_all( + credentials, + has_permission=has_permission, + limit=1, + offset=index, + order_by=order_by, + sort_order=sort_order, + ).unwrap() + + if len(items) == 0: + raise NotFoundException(f"No item found at index {index}") + return items[0] + + def row_as_obj(self, row: Row) -> StashT: + # TODO make unwrappable serde + return deserialize_json(row.fields) + + @with_session + def get_role( + self, credentials: SyftVerifyKey, session: Session = None + ) -> ServiceRole: + # relative + from ...service.user.user import User + + Base = SQLiteBase if self._is_sqlite() else PostgresBase + + # TODO error handling + if Base.metadata.tables.get("User") is None: + # if User table does not exist, we assume the user is a guest + # this happens when we create stashes in tests + return ServiceRole.GUEST + + try: + query = self.query(User).filter("verify_key", "eq", credentials) + except Exception as e: + print("Error getting role", e) + raise e + + user = query.execute(session).first() + if user is None: + return ServiceRole.GUEST + + return self.row_as_obj(user).role + + def _get_permission_filter_from_permisson( + self, + permission: ActionObjectPermission, + ) -> sa.sql.elements.BinaryExpression: + permission_string = permission.permission_string + compound_permission_string = permission.compound_permission_string + + if self.db.engine.dialect.name == "postgresql": + permission_string = [permission_string] # type: ignore + compound_permission_string = [compound_permission_string] # type: ignore + return sa.or_( + self.table.c.permissions.contains(permission_string), + self.table.c.permissions.contains(compound_permission_string), + ) + + @with_session + def _apply_permission_filter( + self, + stmt: T, + *, + credentials: SyftVerifyKey, + permission: ActionPermission = ActionPermission.READ, + has_permission: bool = False, + session: Session = None, + ) -> T: + if has_permission: + # ignoring permissions + return stmt + role = self.get_role(credentials, session=session) + if role in (ServiceRole.ADMIN, ServiceRole.DATA_OWNER): + # admins and data owners have all permissions + return stmt + + action_object_permission = ActionObjectPermission( + uid=UID(), # dummy uid, we just need the permission string + credentials=credentials, + permission=permission, + ) + + stmt = stmt.where( + self._get_permission_filter_from_permisson( + permission=action_object_permission + ) + ) + return stmt + + @as_result(SyftException, StashException) + @with_session + def set( + self, + credentials: SyftVerifyKey, + obj: StashT, + add_permissions: list[ActionObjectPermission] | None = None, + add_storage_permission: bool = True, # TODO: check the default value + ignore_duplicates: bool = False, + session: Session = None, + skip_check_type: bool = False, + ) -> StashT: + if not self.allow_any_type and not skip_check_type: + self.check_type(obj, self.object_type).unwrap() + uid = obj.id + + # check if the object already exists + if self.exists(credentials, uid, session=session) or not self.is_unique( + obj, session=session + ): + if ignore_duplicates: + return obj + unique_fields_str = ", ".join(self.unique_fields) + raise UniqueConstraintException( + public_message=f"Duplication Key Error for {obj}.\n" + f"The fields that should be unique are {unique_fields_str}." + ) + + permissions = self.get_ownership_permissions(uid, credentials) + if add_permissions is not None: + add_permission_strings = [p.permission_string for p in add_permissions] + permissions.extend(add_permission_strings) + + storage_permissions = [] + if add_storage_permission: + storage_permissions.append( + self.server_uid.no_dash, + ) + + fields = serialize_json(obj) + try: + # check if the fields are deserializable + # TODO: Ideally, we want to make sure we don't serialize what we cannot deserialize + # and remove this check. + deserialize_json(fields) + except Exception as e: + raise StashException( + f"Error serializing object: {e}. Some fields are invalid." + ) + # create the object with the permissions + stmt = self.table.insert().values( + id=uid, + fields=fields, + permissions=permissions, + storage_permissions=storage_permissions, + ) + session.execute(stmt) + return self.get_by_uid(credentials, uid, session=session).unwrap() + + @as_result(ValidationError, AttributeError) + def apply_partial_update( + self, original_obj: StashT, update_obj: SyftObject + ) -> StashT: + for key, value in update_obj.__dict__.items(): + if value is Empty: + continue + + if key in original_obj.__dict__: + setattr(original_obj, key, value) + else: + raise AttributeError( + f"{type(update_obj).__name__}.{key} not found in {type(original_obj).__name__}" + ) + + # validate the new fields + self.object_type.model_validate(original_obj) + return original_obj + + @as_result( + StashException, + NotFoundException, + AttributeError, + ValidationError, + UniqueConstraintException, + ) + @with_session + def update( + self, + credentials: SyftVerifyKey, + obj: StashT, + has_permission: bool = False, + session: Session = None, + ) -> StashT: + """ + NOTE: We cannot do partial updates on the database, + because we are using computed fields that are not known to the DB: + - serialize_json will add computed fields to the JSON stored in the database + - If we update a single field in the JSON, the computed fields can get out of sync. + - To fix, we either need db-supported computed fields, or know in our ORM which fields should be re-computed. + """ + + if issubclass(type(obj), PartialSyftObject): + original_obj = self.get_by_uid( + credentials, obj.id, session=session + ).unwrap() + obj = self.apply_partial_update( + original_obj=original_obj, update_obj=obj + ).unwrap() + + # TODO has_permission is not used + if not self.is_unique(obj, session=session): + raise UniqueConstraintException( + f"Some fields are not unique for {type(obj).__name__} and unique fields {self.unique_fields}" + ) + + stmt = self.table.update().where(self._get_field_filter("id", obj.id)) + stmt = self._apply_permission_filter( + stmt, + credentials=credentials, + permission=ActionPermission.WRITE, + has_permission=has_permission, + session=session, + ) + fields = serialize_json(obj) + try: + deserialize_json(fields) + except Exception as e: + raise StashException( + f"Error serializing object: {e}. Some fields are invalid." + ) + stmt = stmt.values(fields=fields) + result = session.execute(stmt) + if result.rowcount == 0: + raise NotFoundException( + f"{self.object_type.__name__}: {obj.id} not found or no permission to update." + ) + return self.get_by_uid(credentials, obj.id, session=session).unwrap() + + @as_result(StashException, NotFoundException) + @with_session + def delete_by_uid( + self, + credentials: SyftVerifyKey, + uid: UID, + has_permission: bool = False, + session: Session = None, + ) -> UID: + stmt = self.table.delete().where(self._get_field_filter("id", uid)) + stmt = self._apply_permission_filter( + stmt, + credentials=credentials, + permission=ActionPermission.WRITE, + has_permission=has_permission, + session=session, + ) + result = session.execute(stmt) + if result.rowcount == 0: + raise NotFoundException( + f"{self.object_type.__name__}: {uid} not found or no permission to delete." + ) + return uid + + @as_result(StashException) + @with_session + def get_one( + self, + credentials: SyftVerifyKey, + filters: dict[str, Any] | None = None, + has_permission: bool = False, + order_by: str | None = None, + sort_order: str | None = None, + offset: int = 0, + session: Session = None, + ) -> StashT: + """ + Get first objects from the stash, optionally filtered. + + Args: + credentials (SyftVerifyKey): credentials of the user + filters (dict[str, Any] | None, optional): dictionary of filters, + where the key is the field name and the value is the filter value. + Operators other than equals can be used in the key, + e.g. {"name": "Bob", "friends__contains": "Alice"}. Defaults to None. + has_permission (bool, optional): If True, overrides the permission check. + Defaults to False. + order_by (str | None, optional): If provided, the results will be ordered by this field. + If not provided, the default order and field defined on the SyftObject.__order_by__ are used. + Defaults to None. + sort_order (str | None, optional): "asc" or "desc" If not defined, + the default order defined on the SyftObject.__order_by__ is used. + Defaults to None. + offset (int, optional): offset the results. Defaults to 0. + + Returns: + list[StashT]: list of objects. + """ + query = self.query() + + if not has_permission: + role = self.get_role(credentials, session=session) + query = query.with_permissions(credentials, role) + + for field_name, operator, field_value in parse_filters(filters): + query = query.filter(field_name, operator, field_value) + + query = query.order_by(order_by, sort_order).offset(offset).limit(1) + result = query.execute(session).first() + if result is None: + raise NotFoundException(f"{self.object_type.__name__}: not found") + + return self.row_as_obj(result) + + @as_result(StashException) + @with_session + def get_all( + self, + credentials: SyftVerifyKey, + filters: dict[str, Any] | None = None, + has_permission: bool = False, + order_by: str | None = None, + sort_order: str | None = None, + limit: int | None = None, + offset: int = 0, + session: Session = None, + ) -> list[StashT]: + """ + Get all objects from the stash, optionally filtered. + + Args: + credentials (SyftVerifyKey): credentials of the user + filters (dict[str, Any] | None, optional): dictionary of filters, + where the key is the field name and the value is the filter value. + Operators other than equals can be used in the key, + e.g. {"name": "Bob", "friends__contains": "Alice"}. Defaults to None. + has_permission (bool, optional): If True, overrides the permission check. + Defaults to False. + order_by (str | None, optional): If provided, the results will be ordered by this field. + If not provided, the default order and field defined on the SyftObject.__order_by__ are used. + Defaults to None. + sort_order (str | None, optional): "asc" or "desc" If not defined, + the default order defined on the SyftObject.__order_by__ is used. + Defaults to None. + limit (int | None, optional): limit the number of results. Defaults to None. + offset (int, optional): offset the results. Defaults to 0. + + Returns: + list[StashT]: list of objects. + """ + query = self.query() + + if not has_permission: + role = self.get_role(credentials, session=session) + query = query.with_permissions(credentials, role) + + for field_name, operator, field_value in parse_filters(filters): + query = query.filter(field_name, operator, field_value) + + query = query.order_by(order_by, sort_order).limit(limit).offset(offset) + result = query.execute(session).all() + return [self.row_as_obj(row) for row in result] + + # PERMISSIONS + def get_ownership_permissions( + self, uid: UID, credentials: SyftVerifyKey + ) -> list[str]: + return [ + ActionObjectOWNER(uid=uid, credentials=credentials).permission_string, + ActionObjectWRITE(uid=uid, credentials=credentials).permission_string, + ActionObjectREAD(uid=uid, credentials=credentials).permission_string, + ActionObjectEXECUTE(uid=uid, credentials=credentials).permission_string, + ] + + @as_result(NotFoundException) + @with_session + def add_permission( + self, + permission: ActionObjectPermission, + session: Session = None, + ignore_missing: bool = False, + ) -> None: + try: + existing_permissions = self._get_permissions_for_uid( + permission.uid, session=session + ).unwrap() + except NotFoundException: + if ignore_missing: + return None + raise + + existing_permissions.add(permission.permission_string) + + stmt = self.table.update().where(self.table.c.id == permission.uid) + stmt = stmt.values(permissions=list(existing_permissions)) + session.execute(stmt) + return None + + @as_result(NotFoundException) + @with_session + def add_permissions( + self, + permissions: list[ActionObjectPermission], + ignore_missing: bool = False, + session: Session = None, + ) -> None: + for permission in permissions: + self.add_permission( + permission, session=session, ignore_missing=ignore_missing + ).unwrap() + return None + + @with_session + def remove_permission( + self, permission: ActionObjectPermission, session: Session = None + ) -> None: + # TODO not threadsafe + try: + permissions = self._get_permissions_for_uid(permission.uid).unwrap() + permissions.remove(permission.permission_string) + except (NotFoundException, KeyError): + # TODO add error handling to permissions + return None + + stmt = ( + self.table.update() + .where(self.table.c.id == permission.uid) + .values(permissions=list(permissions)) + ) + session.execute(stmt) + return None + + @with_session + def has_permission( + self, permission: ActionObjectPermission, session: Session = None + ) -> bool: + if self.get_role(permission.credentials, session=session) in ( + ServiceRole.ADMIN, + ServiceRole.DATA_OWNER, + ): + return True + return self.has_permissions([permission], session=session) + + @with_session + def has_permissions( + self, permissions: list[ActionObjectPermission], session: Session = None + ) -> bool: + # TODO: we should use a permissions table to check all permissions at once + + permission_filters = [ + sa.and_( + self._get_field_filter("id", p.uid), + self._get_permission_filter_from_permisson(permission=p), + ) + for p in permissions + ] + + stmt = self.table.select().where( + sa.and_( + *permission_filters, + ), + ) + result = session.execute(stmt).first() + return result is not None + + @as_result(StashException) + @with_session + def _get_permissions_for_uid(self, uid: UID, session: Session = None) -> Set[str]: # noqa: UP006 + stmt = select(self.table.c.permissions).where(self.table.c.id == uid) + result = session.execute(stmt).scalar_one_or_none() + if result is None: + raise NotFoundException(f"No permissions found for uid: {uid}") + return set(result) + + @as_result(StashException) + @with_session + def get_all_permissions(self, session: Session = None) -> dict[UID, Set[str]]: # noqa: UP006 + stmt = select(self.table.c.id, self.table.c.permissions) + results = session.execute(stmt).all() + return {UID(row.id): set(row.permissions) for row in results} + + # STORAGE PERMISSIONS + @with_session + def has_storage_permission( + self, permission: StoragePermission, session: Session = None + ) -> bool: + return self.has_storage_permissions([permission], session=session) + + @with_session + def has_storage_permissions( + self, permissions: list[StoragePermission], session: Session = None + ) -> bool: + permission_filters = [ + sa.and_( + self._get_field_filter("id", p.uid), + self.table.c.storage_permissions.contains( + p.server_uid.no_dash + if self._is_sqlite() + else [p.server_uid.no_dash] + ), + ) + for p in permissions + ] + + stmt = self.table.select().where( + sa.and_( + *permission_filters, + ) + ) + result = session.execute(stmt).first() + return result is not None + + @as_result(StashException) + @with_session + def get_all_storage_permissions( + self, session: Session = None + ) -> dict[UID, Set[UID]]: # noqa: UP006 + stmt = select(self.table.c.id, self.table.c.storage_permissions) + results = session.execute(stmt).all() + + return { + UID(row.id): {UID(uid) for uid in row.storage_permissions} + for row in results + } + + @as_result(NotFoundException) + @with_session + def add_storage_permissions( + self, + permissions: list[StoragePermission], + session: Session = None, + ignore_missing: bool = False, + ) -> None: + for permission in permissions: + self.add_storage_permission( + permission, session=session, ignore_missing=ignore_missing + ).unwrap() + + return None + + @as_result(NotFoundException) + @with_session + def add_storage_permission( + self, + permission: StoragePermission, + session: Session = None, + ignore_missing: bool = False, + ) -> None: + try: + existing_permissions = self._get_storage_permissions_for_uid( + permission.uid, session=session + ).unwrap() + except NotFoundException: + if ignore_missing: + return None + raise + + existing_permissions.add(permission.server_uid) + + stmt = ( + self.table.update() + .where(self.table.c.id == permission.uid) + .values(storage_permissions=[str(uid) for uid in existing_permissions]) + ) + + session.execute(stmt) + + @with_session + def remove_storage_permission( + self, permission: StoragePermission, session: Session = None + ) -> None: + try: + permissions = self._get_storage_permissions_for_uid( + permission.uid, session=session + ).unwrap() + permissions.discard(permission.server_uid) + except NotFoundException: + # TODO add error handling to permissions + return None + + stmt = ( + self.table.update() + .where(self.table.c.id == permission.uid) + .values(storage_permissions=[str(uid) for uid in permissions]) + ) + session.execute(stmt) + return None + + @as_result(StashException) + @with_session + def _get_storage_permissions_for_uid( + self, uid: UID, session: Session = None + ) -> Set[UID]: # noqa: UP006 + stmt = select(self.table.c.id, self.table.c.storage_permissions).where( + self.table.c.id == uid + ) + result = session.execute(stmt).first() + if result is None: + raise NotFoundException(f"No storage permissions found for uid: {uid}") + return {UID(uid) for uid in result.storage_permissions} + + @with_session + @as_result(StashException) + def upsert( + self, + credentials: SyftVerifyKey, + obj: StashT, + session: Session = None, + ) -> StashT: + """Insert or update an object in the stash if it already exists. + Atomic operation when using the same session for both operations. + """ + + try: + return self.set( + credentials=credentials, + obj=obj, + session=session, + ).unwrap() + except UniqueConstraintException: + return self.update( + credentials=credentials, obj=obj, session=session + ).unwrap() diff --git a/packages/syft/src/syft/store/dict_document_store.py b/packages/syft/src/syft/store/dict_document_store.py deleted file mode 100644 index d422ca87584..00000000000 --- a/packages/syft/src/syft/store/dict_document_store.py +++ /dev/null @@ -1,107 +0,0 @@ -# future -from __future__ import annotations - -# stdlib -from typing import Any - -# third party -from pydantic import Field - -# relative -from ..node.credentials import SyftVerifyKey -from ..serde.serializable import serializable -from ..types import uid -from .document_store import DocumentStore -from .document_store import StoreConfig -from .kv_document_store import KeyValueBackingStore -from .kv_document_store import KeyValueStorePartition -from .locks import LockingConfig -from .locks import ThreadingLockingConfig - - -@serializable() -class DictBackingStore(dict, KeyValueBackingStore): # type: ignore[misc] - # TODO: fix the mypy issue - """Dictionary-based Store core logic""" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__() - self._ddtype = kwargs.get("ddtype", None) - - def __getitem__(self, key: Any) -> Any: - try: - value = super().__getitem__(key) - return value - except KeyError as e: - if self._ddtype: - return self._ddtype() - raise e - - -@serializable() -class DictStorePartition(KeyValueStorePartition): - """Dictionary-based StorePartition - - Parameters: - `settings`: PartitionSettings - PySyft specific settings, used for indexing and partitioning - `store_config`: DictStoreConfig - DictStore specific configuration - """ - - def prune(self) -> None: - self.init_store() - - -# the base document store is already a dict but we can change it later -@serializable() -class DictDocumentStore(DocumentStore): - """Dictionary-based Document Store - - Parameters: - `store_config`: DictStoreConfig - Dictionary Store specific configuration, containing the store type and the backing store type - """ - - partition_type = DictStorePartition - - def __init__( - self, - node_uid: uid, - root_verify_key: SyftVerifyKey | None, - store_config: DictStoreConfig | None = None, - ) -> None: - if store_config is None: - store_config = DictStoreConfig() - super().__init__( - node_uid=node_uid, - root_verify_key=root_verify_key, - store_config=store_config, - ) - - def reset(self) -> None: - for _, partition in self.partitions.items(): - partition.prune() - - -@serializable() -class DictStoreConfig(StoreConfig): - __canonical_name__ = "DictStoreConfig" - """Dictionary-based configuration - - Parameters: - `store_type`: Type[DocumentStore] - The Document type used. Default: DictDocumentStore - `backing_store`: Type[KeyValueBackingStore] - The backend type used. Default: DictBackingStore - locking_config: LockingConfig - The config used for store locking. Available options: - * NoLockingConfig: no locking, ideal for single-thread stores. - * ThreadingLockingConfig: threading-based locking, ideal for same-process in-memory stores. - * FileLockingConfig: file based locking, ideal for same-device different-processes/threads stores. - Defaults to ThreadingLockingConfig. - """ - - store_type: type[DocumentStore] = DictDocumentStore - backing_store: type[KeyValueBackingStore] = DictBackingStore - locking_config: LockingConfig = Field(default_factory=ThreadingLockingConfig) diff --git a/packages/syft/src/syft/store/document_store.py b/packages/syft/src/syft/store/document_store.py index a3739d3c9c5..24dbd9969b4 100644 --- a/packages/syft/src/syft/store/document_store.py +++ b/packages/syft/src/syft/store/document_store.py @@ -1,58 +1,16 @@ # future from __future__ import annotations -# stdlib -from collections.abc import Callable -import types -import typing -from typing import Any - # third party from pydantic import BaseModel from pydantic import Field -from result import Err -from result import Ok -from result import Result -from typeguard import check_type # relative -from ..node.credentials import SyftSigningKey -from ..node.credentials import SyftVerifyKey from ..serde.serializable import serializable -from ..service.action.action_permissions import ActionObjectPermission -from ..service.context import AuthedServiceContext -from ..service.response import SyftSuccess -from ..types.base import SyftBaseModel -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftBaseObject -from ..types.syft_object import SyftObject -from ..types.uid import UID -from ..util.telemetry import instrument from .locks import LockingConfig from .locks import NoLockingConfig -from .locks import SyftLock - - -@serializable() -class BasePartitionSettings(SyftBaseModel): - """Basic Partition Settings - - Parameters: - name: str - Identifier to be used as prefix by stores and for partitioning - """ - - name: str - - -def first_or_none(result: Any) -> Ok: - if hasattr(result, "__len__") and len(result) > 0: - return Ok(result[0]) - return Ok(None) - - -def is_generic_alias(t: type) -> bool: - return isinstance(t, types.GenericAlias | typing._GenericAlias) class StoreClientConfig(BaseModel): @@ -61,240 +19,13 @@ class StoreClientConfig(BaseModel): pass -@serializable() -class PartitionKey(BaseModel): - key: str - type_: type | object - - def __eq__(self, other: Any) -> bool: - return ( - type(other) == type(self) - and self.key == other.key - and self.type_ == other.type_ - ) - - def with_obj(self, obj: Any) -> QueryKey: - return QueryKey.from_obj(partition_key=self, obj=obj) - - def extract_list(self, obj: Any) -> list: - # not a list and matches the internal list type of the _GenericAlias - if not isinstance(obj, list): - if not isinstance(obj, typing.get_args(self.type_)): - obj = getattr(obj, self.key) - if isinstance(obj, types.FunctionType | types.MethodType): - obj = obj() - - if not isinstance(obj, list) and isinstance( - obj, typing.get_args(self.type_) - ): - # still not a list but the right type - obj = [obj] - - # is a list type so lets compare directly - check_type(obj, self.type_) - return obj - - @property - def type_list(self) -> bool: - return is_generic_alias(self.type_) and self.type_.__origin__ == list - - -@serializable() -class PartitionKeys(BaseModel): - pks: PartitionKey | tuple[PartitionKey, ...] | list[PartitionKey] - - @property - def all(self) -> tuple[PartitionKey, ...] | list[PartitionKey]: - # make sure we always return a list even if there's a single value - return self.pks if isinstance(self.pks, tuple | list) else [self.pks] - - def with_obj(self, obj: Any) -> QueryKeys: - return QueryKeys.from_obj(partition_keys=self, obj=obj) - - def with_tuple(self, *args: Any) -> QueryKeys: - return QueryKeys.from_tuple(partition_keys=self, args=args) - - def add(self, pk: PartitionKey) -> PartitionKeys: - return PartitionKeys(pks=list(self.all) + [pk]) - - @staticmethod - def from_dict(cks_dict: dict[str, type]) -> PartitionKeys: - pks = [] - for k, t in cks_dict.items(): - pks.append(PartitionKey(key=k, type_=t)) - return PartitionKeys(pks=pks) - - -@serializable() -class QueryKey(PartitionKey): - value: Any = None - - def __eq__(self, other: Any) -> bool: - return ( - type(other) == type(self) - and self.key == other.key - and self.type_ == other.type_ - and self.value == other.value - ) - - @property - def partition_key(self) -> PartitionKey: - return PartitionKey(key=self.key, type_=self.type_) - - @staticmethod - def from_obj(partition_key: PartitionKey, obj: Any) -> QueryKey: - pk_key = partition_key.key - pk_type = partition_key.type_ - - # 🟡 TODO: support more advanced types than List[type] - if partition_key.type_list: - pk_value = partition_key.extract_list(obj) - else: - if isinstance(obj, pk_type): - pk_value = obj - else: - pk_value = getattr(obj, pk_key) - # object has a method for getting these types - # we can't use properties because we don't seem to be able to get the - # return types - # TODO: fix the mypy issue - if isinstance(pk_value, types.FunctionType | types.MethodType): # type: ignore[unreachable] - pk_value = pk_value() # type: ignore[unreachable] - - if pk_value and not isinstance(pk_value, pk_type): - raise Exception( - f"PartitionKey {pk_value} of type {type(pk_value)} must be {pk_type}." - ) - return QueryKey(key=pk_key, type_=pk_type, value=pk_value) - - @property - def as_dict(self) -> dict[str, Any]: - return {self.key: self.value} - - @property - def as_dict_mongo(self) -> dict[str, Any]: - key = self.key - if key == "id": - key = "_id" - if self.type_list: - # We want to search inside the list of values - return {key: {"$in": self.value}} - return {key: self.value} - - -@serializable() -class PartitionKeysWithUID(PartitionKeys): - uid_pk: PartitionKey - - @property - def all(self) -> tuple[PartitionKey, ...] | list[PartitionKey]: - all_keys = list(self.pks) if isinstance(self.pks, tuple | list) else [self.pks] - if self.uid_pk not in all_keys: - all_keys.insert(0, self.uid_pk) - return all_keys - - -@serializable() -class QueryKeys(SyftBaseModel): - qks: QueryKey | tuple[QueryKey, ...] | list[QueryKey] - - @property - def all(self) -> tuple[QueryKey, ...] | list[QueryKey]: - # make sure we always return a list even if there's a single value - return self.qks if isinstance(self.qks, tuple | list) else [self.qks] - - @staticmethod - def from_obj(partition_keys: PartitionKeys, obj: SyftObject) -> QueryKeys: - qks = [] - for partition_key in partition_keys.all: - pk_key = partition_key.key - pk_type = partition_key.type_ - pk_value = getattr(obj, pk_key) - # object has a method for getting these types - # we can't use properties because we don't seem to be able to get the - # return types - if isinstance(pk_value, types.FunctionType | types.MethodType): - pk_value = pk_value() - if partition_key.type_list: - pk_value = partition_key.extract_list(obj) - else: - if pk_value and not isinstance(pk_value, pk_type): - raise Exception( - f"PartitionKey {pk_value} of type {type(pk_value)} must be {pk_type}." - ) - qk = QueryKey(key=pk_key, type_=pk_type, value=pk_value) - qks.append(qk) - return QueryKeys(qks=qks) - - @staticmethod - def from_tuple(partition_keys: PartitionKeys, args: tuple) -> QueryKeys: - qks = [] - for partition_key, pk_value in zip(partition_keys.all, args): - pk_key = partition_key.key - pk_type = partition_key.type_ - if not isinstance(pk_value, pk_type): - raise Exception( - f"PartitionKey {pk_value} of type {type(pk_value)} must be {pk_type}." - ) - qk = QueryKey(key=pk_key, type_=pk_type, value=pk_value) - qks.append(qk) - return QueryKeys(qks=qks) - - @staticmethod - def from_dict(qks_dict: dict[str, Any]) -> QueryKeys: - qks = [] - for k, v in qks_dict.items(): - qks.append(QueryKey(key=k, type_=type(v), value=v)) - return QueryKeys(qks=qks) - - @property - def as_dict(self) -> dict: - qk_dict = {} - for qk in self.all: - qk_key = qk.key - qk_value = qk.value - qk_dict[qk_key] = qk_value - return qk_dict - - @property - def as_dict_mongo(self) -> dict: - qk_dict = {} - for qk in self.all: - qk_key = qk.key - qk_value = qk.value - if qk_key == "id": - qk_key = "_id" - if qk.type_list: - # We want to search inside the list of values - qk_dict[qk_key] = {"$in": qk_value} - else: - qk_dict[qk_key] = qk_value - return qk_dict - - -UIDPartitionKey = PartitionKey(key="id", type_=UID) - - -@serializable() -class PartitionSettings(BasePartitionSettings): - object_type: type - store_key: PartitionKey = UIDPartitionKey - - @property - def unique_keys(self) -> PartitionKeys: - unique_keys = PartitionKeys.from_dict(self.object_type._syft_unique_keys_dict()) - return unique_keys.add(self.store_key) - - @property - def searchable_keys(self) -> PartitionKeys: - return PartitionKeys.from_dict(self.object_type._syft_searchable_keys_dict()) - - -@instrument -@serializable(attrs=["settings", "store_config", "unique_cks", "searchable_cks"]) +@serializable( + attrs=["settings", "store_config", "unique_cks", "searchable_cks"], + canonical_name="StorePartition", + version=1, +) class StorePartition: """Base StorePartition - Parameters: settings: PartitionSettings PySyft specific settings @@ -302,465 +33,6 @@ class StorePartition: Backend specific configuration """ - def __init__( - self, - node_uid: UID, - root_verify_key: SyftVerifyKey | None, - settings: PartitionSettings, - store_config: StoreConfig, - ) -> None: - if root_verify_key is None: - root_verify_key = SyftSigningKey.generate().verify_key - self.node_uid = node_uid - self.root_verify_key = root_verify_key - self.settings = settings - self.store_config = store_config - self.init_store() - - store_config.locking_config.lock_name = f"StorePartition-{settings.name}" - self.lock = SyftLock(store_config.locking_config) - - def init_store(self) -> Result[Ok, Err]: - try: - self.unique_cks = self.settings.unique_keys.all - self.searchable_cks = self.settings.searchable_keys.all - except BaseException as e: - return Err(str(e)) - - return Ok(True) - - def matches_unique_cks(self, partition_key: PartitionKey) -> bool: - return partition_key in self.unique_cks - - def matches_searchable_cks(self, partition_key: PartitionKey) -> bool: - return partition_key in self.searchable_cks - - def store_query_key(self, obj: Any) -> QueryKey: - return self.settings.store_key.with_obj(obj) - - def store_query_keys(self, objs: Any) -> QueryKeys: - return QueryKeys(qks=[self.store_query_key(obj) for obj in objs]) - - # Thread-safe methods - def _thread_safe_cbk(self, cbk: Callable, *args: Any, **kwargs: Any) -> Any | Err: - locked = self.lock.acquire(blocking=True) - if not locked: - print("FAILED TO LOCK") - return Err( - f"Failed to acquire lock for the operation {self.lock.lock_name} ({self.lock._lock})" - ) - - try: - result = cbk(*args, **kwargs) - except BaseException as e: - result = Err(str(e)) - self.lock.release() - - return result - - def set( - self, - credentials: SyftVerifyKey, - obj: SyftObject, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[SyftObject, str]: - return self._thread_safe_cbk( - self._set, - credentials=credentials, - obj=obj, - add_permissions=add_permissions, - add_storage_permission=add_storage_permission, - ignore_duplicates=ignore_duplicates, - ) - - def get( - self, - credentials: SyftVerifyKey, - uid: UID, - ) -> Result[SyftObject, str]: - return self._thread_safe_cbk( - self._get, - uid=uid, - credentials=credentials, - ) - - def find_index_or_search_keys( - self, - credentials: SyftVerifyKey, - index_qks: QueryKeys, - search_qks: QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[SyftObject], str]: - return self._thread_safe_cbk( - self._find_index_or_search_keys, - credentials, - index_qks=index_qks, - search_qks=search_qks, - order_by=order_by, - ) - - def remove_keys( - self, - unique_query_keys: QueryKeys, - searchable_query_keys: QueryKeys, - ) -> None: - self._thread_safe_cbk( - self._remove_keys, - unique_query_keys=unique_query_keys, - searchable_query_keys=searchable_query_keys, - ) - - def update( - self, - credentials: SyftVerifyKey, - qk: QueryKey, - obj: SyftObject, - has_permission: bool = False, - ) -> Result[SyftObject, str]: - return self._thread_safe_cbk( - self._update, - credentials=credentials, - qk=qk, - obj=obj, - has_permission=has_permission, - ) - - def get_all_from_store( - self, - credentials: SyftVerifyKey, - qks: QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[SyftObject], str]: - return self._thread_safe_cbk( - self._get_all_from_store, credentials, qks, order_by - ) - - def delete( - self, credentials: SyftVerifyKey, qk: QueryKey, has_permission: bool = False - ) -> Result[SyftSuccess, Err]: - return self._thread_safe_cbk( - self._delete, credentials, qk, has_permission=has_permission - ) - - def all( - self, - credentials: SyftVerifyKey, - order_by: PartitionKey | None = None, - has_permission: bool | None = False, - ) -> Result[list[BaseStash.object_type], str]: - return self._thread_safe_cbk(self._all, credentials, order_by, has_permission) - - def migrate_data( - self, - to_klass: SyftObject, - context: AuthedServiceContext, - has_permission: bool | None = False, - ) -> Result[bool, str]: - return self._thread_safe_cbk( - self._migrate_data, to_klass, context, has_permission - ) - - # Potentially thread-unsafe methods. - # CAUTION: - # * Don't use self.lock here. - # * Do not call the public thread-safe methods here(with locking). - # These methods are called from the public thread-safe API, and will hang the process. - def _set( - self, - credentials: SyftVerifyKey, - obj: SyftObject, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[SyftObject, str]: - raise NotImplementedError - - def _update( - self, - credentials: SyftVerifyKey, - qk: QueryKey, - obj: SyftObject, - has_permission: bool = False, - overwrite: bool = False, - ) -> Result[SyftObject, str]: - raise NotImplementedError - - def _get_all_from_store( - self, - credentials: SyftVerifyKey, - qks: QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[SyftObject], str]: - raise NotImplementedError - - def _delete( - self, credentials: SyftVerifyKey, qk: QueryKey, has_permission: bool = False - ) -> Result[SyftSuccess, Err]: - raise NotImplementedError - - def _all( - self, - credentials: SyftVerifyKey, - order_by: PartitionKey | None = None, - has_permission: bool | None = False, - ) -> Result[list[BaseStash.object_type], str]: - raise NotImplementedError - - def add_permission(self, permission: ActionObjectPermission) -> None: - raise NotImplementedError - - def add_permissions(self, permissions: list[ActionObjectPermission]) -> None: - raise NotImplementedError - - def remove_permission(self, permission: ActionObjectPermission) -> None: - raise NotImplementedError - - def has_permission(self, permission: ActionObjectPermission) -> bool: - raise NotImplementedError - - def _migrate_data( - self, - to_klass: SyftObject, - context: AuthedServiceContext, - has_permission: bool, - ) -> Result[bool, str]: - raise NotImplementedError - - -@instrument -@serializable() -class DocumentStore: - """Base Document Store - - Parameters: - store_config: StoreConfig - Store specific configuration. - """ - - partitions: dict[str, StorePartition] - partition_type: type[StorePartition] - - def __init__( - self, - node_uid: UID, - root_verify_key: SyftVerifyKey | None, - store_config: StoreConfig, - ) -> None: - if store_config is None: - raise Exception("must have store config") - self.partitions = {} - self.store_config = store_config - self.node_uid = node_uid - self.root_verify_key = root_verify_key - - def partition(self, settings: PartitionSettings) -> StorePartition: - if settings.name not in self.partitions: - self.partitions[settings.name] = self.partition_type( - node_uid=self.node_uid, - root_verify_key=self.root_verify_key, - settings=settings, - store_config=self.store_config, - ) - return self.partitions[settings.name] - - -@instrument -class BaseStash: - object_type: type[SyftObject] - settings: PartitionSettings - partition: StorePartition - - def __init__(self, store: DocumentStore) -> None: - self.store = store - self.partition = store.partition(type(self).settings) - - def check_type(self, obj: Any, type_: type) -> Result[Any, str]: - return ( - Ok(obj) - if isinstance(obj, type_) - else Err(f"{type(obj)} does not match required type: {type_}") - ) - - def get_all( - self, - credentials: SyftVerifyKey, - order_by: PartitionKey | None = None, - has_permission: bool = False, - ) -> Result[list[BaseStash.object_type], str]: - return self.partition.all(credentials, order_by, has_permission) - - def add_permissions(self, permissions: list[ActionObjectPermission]) -> None: - self.partition.add_permissions(permissions) - - def add_permission(self, permission: ActionObjectPermission) -> None: - self.partition.add_permission(permission) - - def remove_permission(self, permission: ActionObjectPermission) -> None: - self.partition.remove_permission(permission) - - def has_permission(self, permission: ActionObjectPermission) -> bool: - return self.partition.has_permission(permission=permission) - - def __len__(self) -> int: - return len(self.partition) - - def set( - self, - credentials: SyftVerifyKey, - obj: BaseStash.object_type, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[BaseStash.object_type, str]: - return self.partition.set( - credentials=credentials, - obj=obj, - ignore_duplicates=ignore_duplicates, - add_permissions=add_permissions, - add_storage_permission=add_storage_permission, - ) - - def query_all( - self, - credentials: SyftVerifyKey, - qks: QueryKey | QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[BaseStash.object_type], str]: - if isinstance(qks, QueryKey): - qks = QueryKeys(qks=qks) - - unique_keys = [] - searchable_keys = [] - - for qk in qks.all: - pk = qk.partition_key - if self.partition.matches_unique_cks(pk): - unique_keys.append(qk) - elif self.partition.matches_searchable_cks(pk): - searchable_keys.append(qk) - else: - return Err( - f"{qk} not in {type(self.partition)} unique or searchable keys" - ) - - index_qks = QueryKeys(qks=unique_keys) - search_qks = QueryKeys(qks=searchable_keys) - - return self.partition.find_index_or_search_keys( - credentials=credentials, - index_qks=index_qks, - search_qks=search_qks, - order_by=order_by, - ) - - def query_all_kwargs( - self, - credentials: SyftVerifyKey, - **kwargs: dict[str, Any], - ) -> Result[list[BaseStash.object_type], str]: - order_by = kwargs.pop("order_by", None) - qks = QueryKeys.from_dict(kwargs) - return self.query_all(credentials=credentials, qks=qks, order_by=order_by) - - def query_one( - self, - credentials: SyftVerifyKey, - qks: QueryKey | QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[BaseStash.object_type | None, str]: - return self.query_all( - credentials=credentials, qks=qks, order_by=order_by - ).and_then(first_or_none) - - def query_one_kwargs( - self, - credentials: SyftVerifyKey, - **kwargs: dict[str, Any], - ) -> Result[BaseStash.object_type | None, str]: - return self.query_all_kwargs(credentials, **kwargs).and_then(first_or_none) - - def find_all( - self, credentials: SyftVerifyKey, **kwargs: dict[str, Any] - ) -> Result[list[BaseStash.object_type], str]: - return self.query_all_kwargs(credentials=credentials, **kwargs) - - def find_one( - self, credentials: SyftVerifyKey, **kwargs: dict[str, Any] - ) -> Result[BaseStash.object_type | None, str]: - return self.query_one_kwargs(credentials=credentials, **kwargs) - - def find_and_delete( - self, credentials: SyftVerifyKey, **kwargs: dict[str, Any] - ) -> Result[SyftSuccess, Err]: - obj = self.query_one_kwargs(credentials=credentials, **kwargs) - if obj.is_err(): - return obj - else: - obj = obj.ok() - - if not obj: - return Err(f"Object does not exists with kwargs: {kwargs}") - qk = self.partition.store_query_key(obj) - return self.delete(credentials=credentials, qk=qk) - - def delete( - self, credentials: SyftVerifyKey, qk: QueryKey, has_permission: bool = False - ) -> Result[SyftSuccess, Err]: - return self.partition.delete( - credentials=credentials, qk=qk, has_permission=has_permission - ) - - def update( - self, - credentials: SyftVerifyKey, - obj: BaseStash.object_type, - has_permission: bool = False, - ) -> Result[BaseStash.object_type, str]: - qk = self.partition.store_query_key(obj) - return self.partition.update( - credentials=credentials, qk=qk, obj=obj, has_permission=has_permission - ) - - -@instrument -class BaseUIDStoreStash(BaseStash): - def delete_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[SyftSuccess, str]: - qk = UIDPartitionKey.with_obj(uid) - result = super().delete(credentials=credentials, qk=qk) - if result.is_ok(): - return Ok(SyftSuccess(message=f"ID: {uid} deleted")) - return result - - def get_by_uid( - self, credentials: SyftVerifyKey, uid: UID - ) -> Result[BaseUIDStoreStash.object_type | None, str]: - qks = QueryKeys(qks=[UIDPartitionKey.with_obj(uid)]) - return self.query_one(credentials=credentials, qks=qks) - - def set( - self, - credentials: SyftVerifyKey, - obj: BaseUIDStoreStash.object_type, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[BaseUIDStoreStash.object_type, str]: - res = self.check_type(obj, self.object_type) - # we dont use and_then logic here as it is hard because of the order of the arguments - if res.is_err(): - return res - return super().set( - credentials=credentials, - obj=res.ok(), - ignore_duplicates=ignore_duplicates, - add_permissions=add_permissions, - add_storage_permission=add_storage_permission, - ) - @serializable() class StoreConfig(SyftBaseObject): @@ -775,13 +47,17 @@ class StoreConfig(SyftBaseObject): The config used for store locking. Available options: * NoLockingConfig: no locking, ideal for single-thread stores. * ThreadingLockingConfig: threading-based locking, ideal for same-process in-memory stores. - * FileLockingConfig: file based locking, ideal for same-device different-processes/threads stores. Defaults to NoLockingConfig. """ __canonical_name__ = "StoreConfig" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 store_type: type[DocumentStore] client_config: StoreClientConfig | None = None - locking_config: LockingConfig = Field(default_factory=NoLockingConfig) + locking_config: LockingConfig = Field(default_factory=NoLockingConfig) # noqa: F821 + + +@serializable(canonical_name="DocumentStore", version=1) +class DocumentStore: + pass diff --git a/packages/syft/src/syft/store/document_store_errors.py b/packages/syft/src/syft/store/document_store_errors.py new file mode 100644 index 00000000000..04fb6777897 --- /dev/null +++ b/packages/syft/src/syft/store/document_store_errors.py @@ -0,0 +1,26 @@ +# relative +from ..types.errors import SyftException + + +class NotFoundException(SyftException): + public_message = "Item not found." + + +class TooManyItemsFoundException(SyftException): + public_message = "Too many items found." + + +class StashException(SyftException): + public_message = "There was an error retrieving data. Contact your admin." + + +class UniqueConstraintException(StashException): + public_message = "Another item with the same unique constraint already exists." + + +class ObjectCRUDPermissionException(SyftException): + public_message = "You do not have permission to perform this action." + + +class ObjectExecutionPermissionException(SyftException): + public_message = "You do not have permission to execute this action." diff --git a/packages/syft/src/syft/store/kv_document_store.py b/packages/syft/src/syft/store/kv_document_store.py index a4bbc548f55..30104b9a581 100644 --- a/packages/syft/src/syft/store/kv_document_store.py +++ b/packages/syft/src/syft/store/kv_document_store.py @@ -1,94 +1,10 @@ -# future -from __future__ import annotations - -# stdlib -from collections import defaultdict -from enum import Enum -from typing import Any - -# third party -from result import Err -from result import Ok -from result import Result -from typing_extensions import Self - # relative -from ..node.credentials import SyftVerifyKey -from ..serde.serializable import serializable -from ..service.action.action_permissions import ActionObjectEXECUTE -from ..service.action.action_permissions import ActionObjectOWNER -from ..service.action.action_permissions import ActionObjectPermission -from ..service.action.action_permissions import ActionObjectREAD -from ..service.action.action_permissions import ActionObjectWRITE -from ..service.action.action_permissions import ActionPermission -from ..service.action.action_permissions import StoragePermission -from ..service.context import AuthedServiceContext -from ..service.response import SyftSuccess -from ..types.syft_object import SyftObject -from ..types.uid import UID -from .document_store import BaseStash -from .document_store import PartitionKey -from .document_store import QueryKey -from .document_store import QueryKeys -from .document_store import StorePartition - - -@serializable() -class UniqueKeyCheck(Enum): - EMPTY = 0 - MATCHES = 1 - ERROR = 2 - - class KeyValueBackingStore: - """Key-Value store core logic.""" - - def __setitem__(self, key: Any, value: Any) -> None: - raise NotImplementedError - - def __getitem__(self, key: Any) -> Self: - raise NotImplementedError - - def __repr__(self) -> str: - raise NotImplementedError - - def __len__(self) -> int: - raise NotImplementedError - - def __delitem__(self, key: str) -> None: - raise NotImplementedError + pass - def clear(self) -> None: - raise NotImplementedError - def copy(self) -> Self: - raise NotImplementedError - - def update(self, *args: Any, **kwargs: Any) -> None: - raise NotImplementedError - - def keys(self) -> Any: - raise NotImplementedError - - def values(self) -> Any: - raise NotImplementedError - - def items(self) -> Any: - raise NotImplementedError - - def pop(self, *args: Any) -> Self: - raise NotImplementedError - - def __contains__(self, item: Any) -> bool: - raise NotImplementedError - - def __iter__(self) -> Any: - raise NotImplementedError - - -class KeyValueStorePartition(StorePartition): +class KeyValueStorePartition: """Key-Value StorePartition - Parameters: `settings`: PartitionSettings PySyft specific settings @@ -96,584 +12,4 @@ class KeyValueStorePartition(StorePartition): Backend specific configuration """ - def init_store(self) -> Result[Ok, Err]: - store_status = super().init_store() - if store_status.is_err(): - return store_status - - try: - self.data = self.store_config.backing_store( - "data", self.settings, self.store_config - ) - self.unique_keys = self.store_config.backing_store( - "unique_keys", self.settings, self.store_config - ) - self.searchable_keys = self.store_config.backing_store( - "searchable_keys", self.settings, self.store_config - ) - # uid -> set['_permission'] - self.permissions: dict[UID, set[str]] = self.store_config.backing_store( - "permissions", self.settings, self.store_config, ddtype=set - ) - - # uid -> set[''] - self.storage_permissions: dict[UID, set[UID]] = ( - self.store_config.backing_store( - "storage_permissions", - self.settings, - self.store_config, - ddtype=set, - ) - ) - - for partition_key in self.unique_cks: - pk_key = partition_key.key - if pk_key not in self.unique_keys: - self.unique_keys[pk_key] = {} - - for partition_key in self.searchable_cks: - pk_key = partition_key.key - if pk_key not in self.searchable_keys: - self.searchable_keys[pk_key] = defaultdict(list) - except BaseException as e: - return Err(str(e)) - - return Ok(True) - - def __len__(self) -> int: - return len(self.data) - - def _get( - self, - uid: UID, - credentials: SyftVerifyKey, - has_permission: bool | None = False, - ) -> Result[SyftObject, str]: - # relative - from ..service.action.action_store import ActionObjectREAD - - # if you get something you need READ permission - read_permission = ActionObjectREAD(uid=uid, credentials=credentials) - - if self.has_permission(read_permission) or has_permission: - syft_object = self.data[uid] - return Ok(syft_object) - return Err(f"Permission: {read_permission} denied") - - # Potentially thread-unsafe methods. - # CAUTION: - # * Don't use self.lock here. - # * Do not call the public thread-safe methods here(with locking). - # These methods are called from the public thread-safe API, and will hang the process. - - def _set( - self, - credentials: SyftVerifyKey, - obj: SyftObject, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[SyftObject, str]: - try: - # if obj.id is None: - # obj.id = UID() - store_query_key: QueryKey = self.settings.store_key.with_obj(obj) - uid = store_query_key.value - write_permission = ActionObjectWRITE(uid=uid, credentials=credentials) - can_write = self.has_permission(write_permission) - unique_query_keys: QueryKeys = self.settings.unique_keys.with_obj(obj) - store_key_exists = store_query_key.value in self.data - searchable_query_keys = self.settings.searchable_keys.with_obj(obj) - - ck_check = self._check_partition_keys_unique( - unique_query_keys=unique_query_keys - ) - - if not store_key_exists and ck_check == UniqueKeyCheck.EMPTY: - # attempt to claim it for writing - ownership_result = self.take_ownership(uid=uid, credentials=credentials) - can_write = ownership_result.is_ok() - elif not ignore_duplicates: - keys = ", ".join(f"`{key.key}`" for key in unique_query_keys.all) - return Err( - f"Duplication Key Error for {obj}.\n" - f"The fields that should be unique are {keys}." - ) - else: - # we are not throwing an error, because we are ignoring duplicates - # we are also not writing though - return Ok(obj) - - if can_write: - self._set_data_and_keys( - store_query_key=store_query_key, - unique_query_keys=unique_query_keys, - searchable_query_keys=searchable_query_keys, - obj=obj, - ) - self.data[uid] = obj - - # Add default permissions - if uid not in self.permissions: - self.permissions[uid] = set() - self.add_permission(ActionObjectREAD(uid=uid, credentials=credentials)) - if add_permissions is not None: - self.add_permissions(add_permissions) - - if uid not in self.storage_permissions: - self.storage_permissions[uid] = set() - if add_storage_permission: - self.add_storage_permission( - StoragePermission( - uid=uid, - node_uid=self.node_uid, - ) - ) - - return Ok(obj) - else: - return Err(f"Permission: {write_permission} denied") - except Exception as e: - return Err(f"Failed to write obj {obj}. {e}") - - def take_ownership( - self, uid: UID, credentials: SyftVerifyKey - ) -> Result[SyftSuccess, str]: - # first person using this UID can claim ownership - if uid not in self.permissions and uid not in self.data: - self.add_permissions( - [ - ActionObjectOWNER(uid=uid, credentials=credentials), - ActionObjectWRITE(uid=uid, credentials=credentials), - ActionObjectREAD(uid=uid, credentials=credentials), - ActionObjectEXECUTE(uid=uid, credentials=credentials), - ] - ) - return Ok(SyftSuccess(message=f"Ownership of ID: {uid} taken.")) - return Err(f"UID: {uid} already owned.") - - def add_permission(self, permission: ActionObjectPermission) -> None: - permissions = self.permissions[permission.uid] - permissions.add(permission.permission_string) - self.permissions[permission.uid] = permissions - - def remove_permission(self, permission: ActionObjectPermission) -> None: - permissions = self.permissions[permission.uid] - permissions.remove(permission.permission_string) - self.permissions[permission.uid] = permissions - - def add_permissions(self, permissions: list[ActionObjectPermission]) -> None: - for permission in permissions: - self.add_permission(permission) - - def has_permission(self, permission: ActionObjectPermission) -> bool: - if not isinstance(permission.permission, ActionPermission): - raise Exception(f"ObjectPermission type: {permission.permission} not valid") - - # TODO: fix for other admins - if ( - permission.credentials - and self.root_verify_key.verify == permission.credentials.verify - ): - return True - - if ( - permission.uid in self.permissions - and permission.permission_string in self.permissions[permission.uid] - ): - return True - - # 🟡 TODO 14: add ALL_READ, ALL_EXECUTE etc - # third party - if permission.permission == ActionPermission.OWNER: - pass - elif ( - permission.permission == ActionPermission.READ - and ActionObjectPermission( - permission.uid, ActionPermission.ALL_READ - ).permission_string - in self.permissions[permission.uid] - ): - return True - elif permission.permission == ActionPermission.WRITE: - pass - elif permission.permission == ActionPermission.EXECUTE: - pass - - return False - - def add_storage_permission(self, permission: StoragePermission) -> None: - permissions = self.storage_permissions[permission.uid] - permissions.add(permission.node_uid) - self.storage_permissions[permission.uid] = permissions - - def add_storage_permissions(self, permissions: list[StoragePermission]) -> None: - for permission in permissions: - self.add_storage_permission(permission) - - def remove_storage_permission(self, permission: StoragePermission) -> None: - permissions = self.storage_permissions[permission.uid] - permissions.remove(permission.node_uid) - self.storage_permissions[permission.uid] = permissions - - def has_storage_permission(self, permission: StoragePermission) -> bool: - if permission.uid in self.storage_permissions: - return permission.node_uid in self.storage_permissions[permission.uid] - return False - - def _all( - self, - credentials: SyftVerifyKey, - order_by: PartitionKey | None = None, - has_permission: bool | None = False, - ) -> Result[list[BaseStash.object_type], str]: - # this checks permissions - res = [self._get(uid, credentials, has_permission) for uid in self.data.keys()] - result = [x.ok() for x in res if x.is_ok()] - if order_by is not None: - result = sorted(result, key=lambda x: getattr(x, order_by.key, "")) - return Ok(result) - - def _remove_keys( - self, - store_key: QueryKey, - unique_query_keys: QueryKeys, - searchable_query_keys: QueryKeys, - ) -> None: - uqks = unique_query_keys.all - for qk in uqks: - pk_key, pk_value = qk.key, qk.value - ck_col = self.unique_keys[pk_key] - ck_col.pop(store_key.value, None) - self.unique_keys[pk_key] = ck_col - - sqks = searchable_query_keys.all - for qk in sqks: - pk_key, pk_value = qk.key, qk.value - ck_col = self.searchable_keys[pk_key] - if pk_value in ck_col and (store_key.value in ck_col[pk_value]): - ck_col[pk_value].remove(store_key.value) - self.searchable_keys[pk_key] = ck_col - - def _find_index_or_search_keys( - self, - credentials: SyftVerifyKey, - index_qks: QueryKeys, - search_qks: QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[SyftObject], str]: - ids: set | None = None - errors = [] - # third party - if len(index_qks.all) > 0: - index_results = self._get_keys_index(qks=index_qks) - if index_results.is_ok(): - if ids is None: - ids = index_results.ok() - ids = ids.intersection(index_results.ok()) - else: - errors.append(index_results.err()) - - search_results = None - if len(search_qks.all) > 0: - search_results = self._find_keys_search(qks=search_qks) - - if search_results.is_ok(): - if ids is None: - ids = search_results.ok() - ids = ids.intersection(search_results.ok()) - else: - errors.append(search_results.err()) - - if len(errors) > 0: - return Err(" ".join(errors)) - - if ids is None: - return Ok([]) - - qks: QueryKeys = self.store_query_keys(ids) - return self._get_all_from_store( - credentials=credentials, qks=qks, order_by=order_by - ) - - def _update( - self, - credentials: SyftVerifyKey, - qk: QueryKey, - obj: SyftObject, - has_permission: bool = False, - overwrite: bool = False, - ) -> Result[SyftObject, str]: - try: - if qk.value not in self.data: - return Err(f"No object exists for query key: {qk}") - - if has_permission or self.has_permission( - ActionObjectWRITE(uid=qk.value, credentials=credentials) - ): - _original_obj = self.data[qk.value] - _original_unique_keys = self.settings.unique_keys.with_obj( - _original_obj - ) - _original_searchable_keys = self.settings.searchable_keys.with_obj( - _original_obj - ) - - store_query_key = self.settings.store_key.with_obj(_original_obj) - - # remove old keys - self._remove_keys( - store_key=store_query_key, - unique_query_keys=_original_unique_keys, - searchable_query_keys=_original_searchable_keys, - ) - - # update the object with new data - if overwrite: - # Overwrite existing object and their values - _original_obj = obj - else: - for key, value in obj.to_dict(exclude_empty=True).items(): - if key == "id": - # protected field - continue - setattr(_original_obj, key, value) - - # update data and keys - self._set_data_and_keys( - store_query_key=store_query_key, - unique_query_keys=self.settings.unique_keys.with_obj(_original_obj), - searchable_query_keys=self.settings.searchable_keys.with_obj( - _original_obj - ), - # has been updated - obj=_original_obj, - ) - - # 🟡 TODO 28: Add locking in this transaction - - return Ok(_original_obj) - else: - return Err(f"Failed to update obj {obj}, you have no permission") - - except Exception as e: - return Err(f"Failed to update obj {obj} with error: {e}") - - def _get_all_from_store( - self, - credentials: SyftVerifyKey, - qks: QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[SyftObject], str]: - matches = [] - for qk in qks.all: - if qk.value in self.data: - if self.has_permission( - ActionObjectREAD(uid=qk.value, credentials=credentials) - ): - matches.append(self.data[qk.value]) - if order_by is not None: - matches = sorted(matches, key=lambda x: getattr(x, order_by.key, "")) - return Ok(matches) - - def create(self, obj: SyftObject) -> Result[SyftObject, str]: - pass - - def _delete( - self, credentials: SyftVerifyKey, qk: QueryKey, has_permission: bool = False - ) -> Result[SyftSuccess, Err]: - try: - if has_permission or self.has_permission( - ActionObjectWRITE(uid=qk.value, credentials=credentials) - ): - _obj = self.data.pop(qk.value) - self.permissions.pop(qk.value) - self.storage_permissions.pop(qk.value) - self._delete_unique_keys_for(_obj) - self._delete_search_keys_for(_obj) - return Ok(SyftSuccess(message="Deleted")) - else: - return Err( - f"Failed to delete with query key {qk}, you have no permission" - ) - except Exception as e: - return Err(f"Failed to delete with query key {qk} with error: {e}") - - def _delete_unique_keys_for(self, obj: SyftObject) -> Result[SyftSuccess, str]: - for _unique_ck in self.unique_cks: - qk = _unique_ck.with_obj(obj) - unique_keys = self.unique_keys[qk.key] - unique_keys.pop(qk.value, None) - self.unique_keys[qk.key] = unique_keys - return Ok(SyftSuccess(message="Deleted")) - - def _delete_search_keys_for(self, obj: SyftObject) -> Result[SyftSuccess, str]: - for _search_ck in self.searchable_cks: - qk = _search_ck.with_obj(obj) - search_keys = self.searchable_keys[qk.key] - search_keys.pop(qk.value, None) - self.searchable_keys[qk.key] = search_keys - return Ok(SyftSuccess(message="Deleted")) - - def _get_keys_index(self, qks: QueryKeys) -> Result[set[Any], str]: - try: - # match AND - subsets: list = [] - for qk in qks.all: - subset: set = set() - pk_key, pk_value = qk.key, qk.value - if pk_key not in self.unique_keys: - return Err(f"Failed to query index with {qk}") - ck_col = self.unique_keys[pk_key] - if pk_value not in ck_col.keys(): - # must be at least one in all query keys - continue - store_value = ck_col[pk_value] - subsets.append({store_value}) - - if len(subsets) == 0: - return Ok(set()) - # AND - subset = subsets.pop() - for s in subsets: - subset = subset.intersection(s) - - return Ok(subset) - except Exception as e: - return Err(f"Failed to query with {qks}. {e}") - - def _find_keys_search(self, qks: QueryKeys) -> Result[set[QueryKey], str]: - try: - # match AND - subsets = [] - for qk in qks.all: - subset: set = set() - pk_key, pk_value = qk.key, qk.value - if pk_key not in self.searchable_keys: - return Err(f"Failed to search with {qk}") - ck_col = self.searchable_keys[pk_key] - if qk.type_list: - # 🟡 TODO: change this hacky way to do on to many relationships - # this is when you search a QueryKey which is a list of items - # at the moment its mostly just a List[UID] - # match OR against all keys for this col - # the values of the list will be turned into strings in a single key - matches = set() - for item in pk_value: - for col_key in ck_col.keys(): - if str(item) in col_key: - store_values = ck_col[col_key] - for value in store_values: - matches.add(value) - if len(matches): - subsets.append(matches) - else: - # this is the normal path - if pk_value not in ck_col.keys(): - # must be at least one in all query keys - subsets.append(set()) - continue - store_values = ck_col[pk_value] - subsets.append(set(store_values)) - - if len(subsets) == 0: - return Ok(set()) - # AND - subset = subsets.pop() - for s in subsets: - subset = subset.intersection(s) - return Ok(subset) - except Exception as e: - return Err(f"Failed to query with {qks}. {e}") - - def _check_partition_keys_unique( - self, unique_query_keys: QueryKeys - ) -> UniqueKeyCheck: - # dont check the store key - qks = [ - x - for x in unique_query_keys.all - if x.partition_key != self.settings.store_key - ] - matches = [] - for qk in qks: - pk_key, pk_value = qk.key, qk.value - if pk_key not in self.unique_keys: - raise Exception( - f"pk_key: {pk_key} not in unique_keys: {self.unique_keys.keys()}" - ) - ck_col = self.unique_keys[pk_key] - if pk_value in ck_col: - matches.append(pk_key) - - if len(matches) == 0: - return UniqueKeyCheck.EMPTY - elif len(matches) == len(qks): - return UniqueKeyCheck.MATCHES - - return UniqueKeyCheck.ERROR - - def _set_data_and_keys( - self, - store_query_key: QueryKey, - unique_query_keys: QueryKeys, - searchable_query_keys: QueryKeys, - obj: SyftObject, - ) -> None: - uqks = unique_query_keys.all - - for qk in uqks: - pk_key, pk_value = qk.key, qk.value - ck_col = self.unique_keys[pk_key] - ck_col[pk_value] = store_query_key.value - self.unique_keys[pk_key] = ck_col - - self.unique_keys[store_query_key.key][store_query_key.value] = ( - store_query_key.value - ) - - sqks = searchable_query_keys.all - for qk in sqks: - pk_key, pk_value = qk.key, qk.value - ck_col = self.searchable_keys[pk_key] - if qk.type_list: - # coerce the list of objects to strings for a single key - pk_value = " ".join([str(obj) for obj in pk_value]) - - # check if key is present, then add to existing key - if pk_value in ck_col: - ck_col[pk_value].append(store_query_key.value) - else: - # else create the key with a list - ck_col[pk_value] = [store_query_key.value] - - self.searchable_keys[pk_key] = ck_col - - self.data[store_query_key.value] = obj - - def _migrate_data( - self, to_klass: SyftObject, context: AuthedServiceContext, has_permission: bool - ) -> Result[bool, str]: - credentials = context.credentials - has_permission = (credentials == self.root_verify_key) or has_permission - if has_permission: - for key, value in self.data.items(): - try: - migrated_value = value.migrate_to(to_klass.__version__, context) - except Exception: - return Err(f"Failed to migrate data to {to_klass} for qk: {key}") - qk = self.settings.store_key.with_obj(key) - result = self._update( - credentials, - qk=qk, - obj=migrated_value, - has_permission=has_permission, - overwrite=True, - ) - - if result.is_err(): - return result.err() - - return Ok(True) - - return Err("You don't have permissions to migrate data.") + pass diff --git a/packages/syft/src/syft/store/linked_obj.py b/packages/syft/src/syft/store/linked_obj.py index 93f63d1f8b4..2343dc0b9a6 100644 --- a/packages/syft/src/syft/store/linked_obj.py +++ b/packages/syft/src/syft/store/linked_obj.py @@ -1,4 +1,5 @@ # stdlib +import logging from typing import Any # third party @@ -8,69 +9,97 @@ from ..serde.serializable import serializable from ..service.context import AuthedServiceContext from ..service.context import ChangeContext -from ..service.context import NodeServiceContext -from ..service.response import SyftError +from ..service.context import ServerServiceContext from ..service.response import SyftSuccess -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.errors import SyftException +from ..types.result import as_result +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftObject from ..types.uid import UID +logger = logging.getLogger(__name__) + @serializable() class LinkedObject(SyftObject): __canonical_name__ = "LinkedObject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 - node_uid: UID + server_uid: UID service_type: type[Any] object_type: type[SyftObject] object_uid: UID - __exclude_sync_diff_attrs__ = ["node_uid"] + _resolve_cache: SyftObject | None = None + + __exclude_sync_diff_attrs__ = ["server_uid"] def __str__(self) -> str: resolved_obj_type = ( type(self.resolve) if self.object_type is None else self.object_type ) - return f"{resolved_obj_type.__name__}: {self.object_uid} @ Node {self.node_uid}" + return f"{resolved_obj_type.__name__}: {self.object_uid} @ Server {self.server_uid}" @property def resolve(self) -> SyftObject: - # relative - from ..client.api import APIRegistry - - api = APIRegistry.api_for( - node_uid=self.node_uid, - user_verify_key=self.syft_client_verify_key, - ) - if api is None: - raise ValueError(f"api is None. You must login to {self.node_uid}") + return self._resolve() - return api.services.notifications.resolve_object(self) - - def resolve_with_context(self, context: NodeServiceContext) -> Any: - if context.node is None: - raise ValueError(f"context {context}'s node is None") - return context.node.get_service(self.service_type).resolve_link( - context=context, linked_obj=self + def _resolve(self, load_cached: bool = False) -> SyftObject: + api = None + if load_cached and self._resolve_cache is not None: + return self._resolve_cache + try: + # relative + api = self.get_api() # raises + resolve: SyftObject = api.services.notifications.resolve_object(self) + self._resolve_cache = resolve + return resolve + except Exception as e: + logger.error(">>> Failed to resolve object", type(api), e) + raise e + + def resolve_dynamic( + self, context: ServerServiceContext | None, load_cached: bool = False + ) -> SyftObject: + if context is not None: + return self.resolve_with_context(context, load_cached).unwrap() + else: + return self._resolve(load_cached) + + @as_result(SyftException) + def resolve_with_context( + self, context: ServerServiceContext, load_cached: bool = False + ) -> Any: + if load_cached and self._resolve_cache is not None: + return self._resolve_cache + if context.server is None: + raise ValueError(f"context {context}'s server is None") + res = ( + context.server.get_service(self.service_type) + .resolve_link(context=context, linked_obj=self) + .unwrap() ) + self._resolve_cache = res + return res def update_with_context( - self, context: NodeServiceContext | ChangeContext | Any, obj: Any - ) -> SyftSuccess | SyftError: + self, context: ServerServiceContext | ChangeContext | Any, obj: Any + ) -> SyftSuccess: if isinstance(context, AuthedServiceContext): credentials = context.credentials elif isinstance(context, ChangeContext): credentials = context.approving_user_credentials else: - return SyftError(message="wrong context passed") - if context.node is None: - return SyftError(message=f"context {context}'s node is None") - service = context.node.get_service(self.service_type) + raise SyftException(public_message="wrong context passed") + if context.server is None: + raise SyftException(public_message=f"context {context}'s server is None") + service = context.server.get_service(self.service_type) if hasattr(service, "stash"): - result = service.stash.update(credentials, obj) + result = service.stash.update(credentials, obj).unwrap() else: - return SyftError(message=f"service {service} does not have a stash") + raise SyftException( + public_message=f"service {service} does not have a stash" + ) return result @classmethod @@ -78,7 +107,7 @@ def from_obj( cls, obj: SyftObject | type[SyftObject], service_type: type[Any] | None = None, - node_uid: UID | None = None, + server_uid: UID | None = None, ) -> Self: if service_type is None: # relative @@ -95,13 +124,13 @@ def from_obj( if object_uid is None: raise Exception(f"{cls} Requires an object UID") - if node_uid is None: - node_uid = getattr(obj, "node_uid", None) - if node_uid is None: + if server_uid is None: + server_uid = getattr(obj, "server_uid", None) + if server_uid is None: raise Exception(f"{cls} Requires an object UID") return LinkedObject( - node_uid=node_uid, + server_uid=server_uid, service_type=service_type, object_type=type(obj), object_uid=object_uid, @@ -112,7 +141,7 @@ def from_obj( def with_context( cls, obj: SyftObject, - context: NodeServiceContext, + context: ServerServiceContext, object_uid: UID | None = None, service_type: type[Any] | None = None, ) -> Self: @@ -127,12 +156,12 @@ def with_context( if object_uid is None: raise Exception(f"{cls} Requires an object UID") - if context.node is None: - raise ValueError(f"context {context}'s node is None") - node_uid = context.node.id + if context.server is None: + raise ValueError(f"context {context}'s server is None") + server_uid = context.server.id return LinkedObject( - node_uid=node_uid, + server_uid=server_uid, service_type=service_type, object_type=type(obj), object_uid=object_uid, @@ -144,10 +173,10 @@ def from_uid( object_uid: UID, object_type: type[SyftObject], service_type: type[Any], - node_uid: UID, + server_uid: UID, ) -> Self: return cls( - node_uid=node_uid, + server_uid=server_uid, service_type=service_type, object_type=object_type, object_uid=object_uid, diff --git a/packages/syft/src/syft/store/locks.py b/packages/syft/src/syft/store/locks.py index 9f1b8e00644..b16b1b7bd9b 100644 --- a/packages/syft/src/syft/store/locks.py +++ b/packages/syft/src/syft/store/locks.py @@ -1,43 +1,12 @@ -# stdlib -from collections import defaultdict -from collections.abc import Callable -import datetime -import json -from pathlib import Path -import threading -import time -from typing import Any -import uuid - # third party from pydantic import BaseModel -from sherlock.lock import BaseLock -from sherlock.lock import FileLock # relative from ..serde.serializable import serializable -THREAD_FILE_LOCKS: dict[int, dict[str, int]] = defaultdict(dict) - -@serializable() +@serializable(canonical_name="LockingConfig", version=1) class LockingConfig(BaseModel): - """ - Locking config - - Args: - lock_name: str - Lock name - namespace: Optional[str] - Namespace to use for setting lock keys in the backend store. - expire: Optional[int] - Lock expiration time in seconds. If explicitly set to `None`, lock will not expire. - timeout: Optional[int] - Timeout to acquire lock(seconds) - retry_interval: float - Retry interval to retry acquiring a lock if previous attempts failed. - """ - lock_name: str = "syft_lock" namespace: str | None = None expire: int | None = 60 @@ -45,363 +14,11 @@ class LockingConfig(BaseModel): retry_interval: float = 0.1 -@serializable() +@serializable(canonical_name="NoLockingConfig", version=1) class NoLockingConfig(LockingConfig): - """ - No-locking policy - """ - pass -@serializable() +@serializable(canonical_name="ThreadingLockingConfig", version=1) class ThreadingLockingConfig(LockingConfig): - """ - Threading-based locking policy - """ - pass - - -@serializable() -class FileLockingConfig(LockingConfig): - """File locking policy""" - - client_path: Path | None = None - - -class ThreadingLock(BaseLock): - """ - Threading-based Lock. Used to provide the same API as the rest of the locks. - """ - - def __init__(self, expire: int, **kwargs: Any) -> None: - self.expire = expire - self.locked_timestamp: float = 0.0 - self.lock = threading.Lock() - - @property - def _locked(self) -> bool: - """ - Implementation of method to check if lock has been acquired. Must be - :returns: if the lock is acquired or not - :rtype: bool - """ - locked = self.lock.locked() - if ( - locked - and time.time() - self.locked_timestamp >= self.expire - and self.expire != -1 - ): - self._release() - - return self.lock.locked() - - def _acquire(self) -> bool: - """ - Implementation of acquiring a lock in a non-blocking fashion. - :returns: if the lock was successfully acquired or not - :rtype: bool - """ - locked = self.lock.locked() - if ( - locked - and time.time() - self.locked_timestamp > self.expire - and self.expire != -1 - ): - self._release() - - status = self.lock.acquire( - blocking=False - ) # timeout/retries handle in the `acquire` method - if status: - self.locked_timestamp = time.time() - return status - - def _release(self) -> None: - """ - Implementation of releasing an acquired lock. - """ - - try: - return self.lock.release() - except RuntimeError: # already unlocked - pass - - def _renew(self) -> bool: - """ - Implementation of renewing an acquired lock. - """ - return True - - -class PatchedFileLock(FileLock): - """ - Implementation of lock with the file system as the backend for synchronization. - This version patches for the `FileLock._expiry_time` crash(https://github.com/py-sherlock/sherlock/issues/71) - - `sherlock.FileLock` might not work as expected for Python threads. - It uses re-entrant OS locks, meaning that multiple Python threads could acquire the lock at the same time. - For different processes/OS threads, the file lock will work as expected. - We need to patch the lock to handle Python threads too. - - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - self._lock_file_enabled = True - try: - super().__init__(*args, **kwargs) - except BaseException as e: - print(f"Failed to create a file lock = {e}. Using memory-lock only") - self._lock_file_enabled = False - - self._lock_py_thread = ThreadingLock(*args, **kwargs) - - def _expiry_time(self) -> str: - if self.expire is not None: - expiry_time = self._now() + datetime.timedelta(seconds=self.expire) - else: - expiry_time = datetime.datetime.max.replace( - tzinfo=datetime.timezone.utc - ).astimezone(datetime.timezone.utc) - return expiry_time.isoformat() - - def _thread_safe_cbk(self, cbk: Callable) -> bool: - # Acquire lock at Python level(if-needed) - locked = self._lock_py_thread._acquire() - if not locked: - return False - - try: - result = cbk() - except BaseException as e: - print(e) - result = False - - self._lock_py_thread._release() - return result - - def _acquire(self) -> bool: - return self._thread_safe_cbk(self._acquire_file_lock) - - def _release(self) -> bool: - res = self._thread_safe_cbk(self._release_file_lock) - return res - - def _acquire_file_lock(self) -> bool: - if not self._lock_file_enabled: - return True - - owner = str(uuid.uuid4()) - - # Acquire lock at OS level - with self._lock_file: - if self._data_file.exists(): - for _retry in range(10): - try: - data = json.loads(self._data_file.read_text()) - break - except BaseException: - time.sleep(0.1) - if _retry == 9: - pass - - now = self._now() - has_expired = self._has_expired(data, now) - if owner != data["owner"]: - if not has_expired: - # Someone else holds the lock. - return False - else: - # Lock is available for us to take. - data = {"owner": owner, "expiry_time": self._expiry_time()} - else: - # Same owner so do not set or modify Lease. - return False - else: - data = {"owner": owner, "expiry_time": self._expiry_time()} - - # Write new data back to file. - self._data_file.touch() - self._data_file.write_text(json.dumps(data)) - - # We succeeded in writing to the file so we now hold the lock. - self._owner: str | None = owner - - return True - - @property - def _locked(self) -> bool: - if self._lock_py_thread.locked(): - return True - - if not self._lock_file_enabled: - return False - - if not self._data_file.exists(): - # File doesn't exist so can't be locked. - return False - - with self._lock_file: - data = None - for _retry in range(10): - try: - data = json.loads(self._data_file.read_text()) - break - except BaseException: - time.sleep(0.1) - - if data is None: - return False - - if self._has_expired(data, self._now()): - # File exists but has expired. - return False - - # Lease exists and has not expired. - return True - - def _release_file_lock(self) -> None: - if not self._lock_file_enabled: - return - - if self._owner is None: - return - - if not self._data_file.exists(): - return - - with self._lock_file: - data = None - for _retry in range(10): - try: - data = json.loads(self._data_file.read_text()) - break - except BaseException: - time.sleep(0.1) - - if data is None: - return - - if self._owner == data["owner"]: - self._data_file.unlink() - self._owner = None - - -class SyftLock(BaseLock): - """ - Syft Lock implementations. - - Params: - config: Config specific to a locking strategy. - """ - - def __init__(self, config: LockingConfig): - self.config = config - - self.lock_name = config.lock_name - self.namespace = config.namespace - self.expire = config.expire - self.timeout = config.timeout - self.retry_interval = config.retry_interval - - self.passthrough = False - - self._lock: BaseLock | None = None - - base_params = { - "lock_name": config.lock_name, - "namespace": config.namespace, - "expire": config.expire, - "timeout": config.timeout, - "retry_interval": config.retry_interval, - } - if isinstance(config, NoLockingConfig): - self.passthrough = True - elif isinstance(config, ThreadingLockingConfig): - self._lock = ThreadingLock(**base_params) - elif isinstance(config, FileLockingConfig): - client: Path | None = config.client_path - self._lock = PatchedFileLock( - **base_params, - client=client, - ) - else: - raise ValueError("Unsupported config type") - - @property - def _locked(self) -> bool: - """ - Implementation of method to check if lock has been acquired. - - :returns: if the lock is acquired or not - :rtype: bool - """ - if self.passthrough: - return False - return self._lock.locked() if self._lock else False - - def acquire(self, blocking: bool = True) -> bool: - """ - Acquire a lock, blocking or non-blocking. - :param bool blocking: acquire a lock in a blocking or non-blocking - fashion. Defaults to True. - :returns: if the lock was successfully acquired or not - :rtype: bool - """ - - if not blocking: - return self._acquire() - - timeout: float = float(self.timeout) - start_time = time.time() - elapsed: float = 0.0 - while timeout >= elapsed: - if not self._acquire(): - time.sleep(self.retry_interval) - elapsed = time.time() - start_time - else: - return True - print( - f"Timeout elapsed after {self.timeout} seconds while trying to acquiring lock." - ) - # third party - return False - - def _acquire(self) -> bool: - """ - Implementation of acquiring a lock in a non-blocking fashion. - `acquire` makes use of this implementation to provide blocking and non-blocking implementations. - - :returns: if the lock was successfully acquired or not - :rtype: bool - """ - if self.passthrough: - return True - - try: - return self._lock._acquire() if self._lock else False - except BaseException: - return False - - def _release(self) -> bool | None: - """ - Implementation of releasing an acquired lock. - """ - if self.passthrough: - return None - if not self._lock: - return None - try: - return self._lock._release() - except BaseException: - return None - - def _renew(self) -> bool: - """ - Implementation of renewing an acquired lock. - """ - if self.passthrough: - return True - - return self._lock._renew() if self._lock else False diff --git a/packages/syft/src/syft/store/mongo_client.py b/packages/syft/src/syft/store/mongo_client.py index 7ae46b85950..c8af9b97c75 100644 --- a/packages/syft/src/syft/store/mongo_client.py +++ b/packages/syft/src/syft/store/mongo_client.py @@ -1,241 +1,8 @@ -# stdlib -from threading import Lock -from typing import Any - -# third party -from pymongo.collection import Collection as MongoCollection -from pymongo.database import Database as MongoDatabase -from pymongo.errors import ConnectionFailure -from pymongo.mongo_client import MongoClient as PyMongoClient -from result import Err -from result import Ok -from result import Result - # relative from ..serde.serializable import serializable -from .document_store import PartitionSettings from .document_store import StoreClientConfig -from .document_store import StoreConfig -from .mongo_codecs import SYFT_CODEC_OPTIONS -@serializable() +@serializable(canonical_name="MongoStoreClientConfig", version=1) class MongoStoreClientConfig(StoreClientConfig): - """ - Paramaters: - `hostname`: optional string - hostname or IP address or Unix domain socket path of a single mongod or mongos - instance to connect to, or a mongodb URI, or a list of hostnames (but no more - than one mongodb URI). If `host` is an IPv6 literal it must be enclosed in '[' - and ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for localhost). - Multihomed and round robin DNS addresses are **not** supported. - `port` : optional int - port number on which to connect - `directConnection`: bool - if ``True``, forces this client to connect directly to the specified MongoDB host - as a standalone. If ``false``, the client connects to the entire replica set of which - the given MongoDB host(s) is a part. If this is ``True`` and a mongodb+srv:// URI - or a URI containing multiple seeds is provided, an exception will be raised. - `maxPoolSize`: int. Default 100 - The maximum allowable number of concurrent connections to each connected server. - Requests to a server will block if there are `maxPoolSize` outstanding connections - to the requested server. Defaults to 100. Can be either 0 or None, in which case - there is no limit on the number of concurrent connections. - `minPoolSize` : int. Default 0 - The minimum required number of concurrent connections that the pool will maintain - to each connected server. Default is 0. - `maxIdleTimeMS`: int - The maximum number of milliseconds that a connection can remain idle in the pool - before being removed and replaced. Defaults to `None` (no limit). - `appname`: string - The name of the application that created this MongoClient instance. The server will - log this value upon establishing each connection. It is also recorded in the slow - query log and profile collections. - `maxConnecting`: optional int - The maximum number of connections that each pool can establish concurrently. - Defaults to `2`. - `timeoutMS`: (integer or None) - Controls how long (in milliseconds) the driver will wait when executing an operation - (including retry attempts) before raising a timeout error. ``0`` or ``None`` means - no timeout. - `socketTimeoutMS`: (integer or None) - Controls how long (in milliseconds) the driver will wait for a response after sending - an ordinary (non-monitoring) database operation before concluding that a network error - has occurred. ``0`` or ``None`` means no timeout. Defaults to ``None`` (no timeout). - `connectTimeoutMS`: (integer or None) - Controls how long (in milliseconds) the driver will wait during server monitoring when - connecting a new socket to a server before concluding the server is unavailable. - ``0`` or ``None`` means no timeout. Defaults to ``20000`` (20 seconds). - `serverSelectionTimeoutMS`: (integer) - Controls how long (in milliseconds) the driver will wait to find an available, appropriate - server to carry out a database operation; while it is waiting, multiple server monitoring - operations may be carried out, each controlled by `connectTimeoutMS`. - Defaults to ``120000`` (120 seconds). - `waitQueueTimeoutMS`: (integer or None) - How long (in milliseconds) a thread will wait for a socket from the pool if the pool - has no free sockets. Defaults to ``None`` (no timeout). - `heartbeatFrequencyMS`: (optional) - The number of milliseconds between periodic server checks, or None to accept the default - frequency of 10 seconds. - # Auth - username: str - Database username - password: str - Database pass - authSource: str - The database to authenticate on. - Defaults to the database specified in the URI, if provided, or to “admin”. - tls: bool - If True, create the connection to the server using transport layer security. - Defaults to False. - # Testing and connection reuse - client: Optional[PyMongoClient] - If provided, this client is reused. Default = None - - """ - - # Connection - hostname: str | None = "127.0.0.1" - port: int | None = None - directConnection: bool = False - maxPoolSize: int = 200 - minPoolSize: int = 0 - maxIdleTimeMS: int | None = None - maxConnecting: int = 3 - timeoutMS: int = 0 - socketTimeoutMS: int = 0 - connectTimeoutMS: int = 20000 - serverSelectionTimeoutMS: int = 120000 - waitQueueTimeoutMS: int | None = None - heartbeatFrequencyMS: int = 10000 - appname: str = "pysyft" - # Auth - username: str | None = None - password: str | None = None - authSource: str = "admin" - tls: bool | None = False - # Testing and connection reuse - client: Any = None - - # this allows us to have one connection per `Node` object - # in the MongoClientCache - node_obj_python_id: int | None = None - - -class MongoClientCache: - __client_cache__: dict[int, type["MongoClient"] | None] = {} - _lock: Lock = Lock() - - @classmethod - def from_cache(cls, config: MongoStoreClientConfig) -> PyMongoClient | None: - return cls.__client_cache__.get(hash(str(config)), None) - - @classmethod - def set_cache(cls, config: MongoStoreClientConfig, client: PyMongoClient) -> None: - with cls._lock: - cls.__client_cache__[hash(str(config))] = client - - -class MongoClient: - client: PyMongoClient = None - - def __init__(self, config: MongoStoreClientConfig, cache: bool = True) -> None: - self.config = config - if config.client is not None: - self.client = config.client - elif cache: - self.client = MongoClientCache.from_cache(config=config) - - if not cache or self.client is None: - self.connect(config=config) - - def connect(self, config: MongoStoreClientConfig) -> Result[Ok, Err]: - self.client = PyMongoClient( - # Connection - host=config.hostname, - port=config.port, - directConnection=config.directConnection, - maxPoolSize=config.maxPoolSize, - minPoolSize=config.minPoolSize, - maxIdleTimeMS=config.maxIdleTimeMS, - maxConnecting=config.maxConnecting, - timeoutMS=config.timeoutMS, - socketTimeoutMS=config.socketTimeoutMS, - connectTimeoutMS=config.connectTimeoutMS, - serverSelectionTimeoutMS=config.serverSelectionTimeoutMS, - waitQueueTimeoutMS=config.waitQueueTimeoutMS, - heartbeatFrequencyMS=config.heartbeatFrequencyMS, - appname=config.appname, - # Auth - username=config.username, - password=config.password, - authSource=config.authSource, - tls=config.tls, - uuidRepresentation="standard", - ) - MongoClientCache.set_cache(config=config, client=self.client) - try: - # Check if mongo connection is still up - self.client.admin.command("ping") - except ConnectionFailure as e: - self.client = None - return Err(str(e)) - - return Ok(True) - - def with_db(self, db_name: str) -> Result[MongoDatabase, Err]: - try: - return Ok(self.client[db_name]) - except BaseException as e: - return Err(str(e)) - - def with_collection( - self, - collection_settings: PartitionSettings, - store_config: StoreConfig, - collection_name: str | None = None, - ) -> Result[MongoCollection, Err]: - res = self.with_db(db_name=store_config.db_name) - if res.is_err(): - return res - db = res.ok() - - try: - collection_name = ( - collection_name - if collection_name is not None - else collection_settings.name - ) - collection = db.get_collection( - name=collection_name, codec_options=SYFT_CODEC_OPTIONS - ) - except BaseException as e: - return Err(str(e)) - - return Ok(collection) - - def with_collection_permissions( - self, collection_settings: PartitionSettings, store_config: StoreConfig - ) -> Result[MongoCollection, Err]: - """ - For each collection, create a corresponding collection - that store the permissions to the data in that collection - """ - res = self.with_db(db_name=store_config.db_name) - if res.is_err(): - return res - db = res.ok() - - try: - collection_permissions_name: str = collection_settings.name + "_permissions" - collection_permissions = db.get_collection( - name=collection_permissions_name, codec_options=SYFT_CODEC_OPTIONS - ) - except BaseException as e: - return Err(str(e)) - - return Ok(collection_permissions) - - def close(self) -> None: - self.client.close() - MongoClientCache.__client_cache__.pop(hash(str(self.config)), None) + pass diff --git a/packages/syft/src/syft/store/mongo_codecs.py b/packages/syft/src/syft/store/mongo_codecs.py deleted file mode 100644 index 08b7fa63562..00000000000 --- a/packages/syft/src/syft/store/mongo_codecs.py +++ /dev/null @@ -1,31 +0,0 @@ -# stdlib -from typing import Any - -# third party -from bson import CodecOptions -from bson.binary import Binary -from bson.binary import USER_DEFINED_SUBTYPE -from bson.codec_options import TypeDecoder -from bson.codec_options import TypeRegistry - -# relative -from ..serde.deserialize import _deserialize -from ..serde.serialize import _serialize - - -def fallback_syft_encoder(value: object) -> Binary: - return Binary(_serialize(value, to_bytes=True), USER_DEFINED_SUBTYPE) - - -class SyftMongoBinaryDecoder(TypeDecoder): - bson_type = Binary - - def transform_bson(self, value: Any) -> Any: - if value.subtype == USER_DEFINED_SUBTYPE: - return _deserialize(value, from_bytes=True) - return value - - -syft_codecs = [SyftMongoBinaryDecoder()] -syft_type_registry = TypeRegistry(syft_codecs, fallback_encoder=fallback_syft_encoder) -SYFT_CODEC_OPTIONS = CodecOptions(type_registry=syft_type_registry) diff --git a/packages/syft/src/syft/store/mongo_document_store.py b/packages/syft/src/syft/store/mongo_document_store.py index cd1f2c1e253..e1c68316894 100644 --- a/packages/syft/src/syft/store/mongo_document_store.py +++ b/packages/syft/src/syft/store/mongo_document_store.py @@ -1,125 +1,34 @@ -# stdlib -from collections.abc import Callable -from typing import Any - # third party from pydantic import Field -from pymongo import ASCENDING -from pymongo.collection import Collection as MongoCollection -from result import Err -from result import Ok -from result import Result -from typing_extensions import Self # relative -from ..node.credentials import SyftVerifyKey -from ..serde.deserialize import _deserialize from ..serde.serializable import serializable -from ..serde.serialize import _serialize -from ..service.action.action_permissions import ActionObjectEXECUTE -from ..service.action.action_permissions import ActionObjectOWNER -from ..service.action.action_permissions import ActionObjectPermission -from ..service.action.action_permissions import ActionObjectREAD -from ..service.action.action_permissions import ActionObjectWRITE -from ..service.action.action_permissions import ActionPermission -from ..service.context import AuthedServiceContext -from ..service.response import SyftSuccess -from ..types.syft_object import SYFT_OBJECT_VERSION_2 from ..types.syft_object import StorableObjectType -from ..types.syft_object import SyftBaseObject -from ..types.syft_object import SyftObject -from ..types.transforms import TransformContext -from ..types.transforms import transform -from ..types.transforms import transform_method -from ..types.uid import UID from .document_store import DocumentStore -from .document_store import PartitionKey -from .document_store import PartitionSettings -from .document_store import QueryKey -from .document_store import QueryKeys from .document_store import StoreConfig from .document_store import StorePartition from .kv_document_store import KeyValueBackingStore from .locks import LockingConfig from .locks import NoLockingConfig -from .mongo_client import MongoClient from .mongo_client import MongoStoreClientConfig -@serializable() -class MongoDict(SyftBaseObject): - __canonical_name__ = "MongoDict" - __version__ = SYFT_OBJECT_VERSION_2 - - keys: list[Any] - values: list[Any] - - @property - def dict(self) -> dict[Any, Any]: - return dict(zip(self.keys, self.values)) - - @classmethod - def from_dict(cls, input: dict) -> Self: - return cls(keys=list(input.keys()), values=list(input.values())) - - def __repr__(self) -> str: - return self.dict.__repr__() - - -class MongoBsonObject(StorableObjectType, dict): +class MongoBsonObject(StorableObjectType): pass -def _repr_debug_(value: Any) -> str: - if hasattr(value, "_repr_debug_"): - return value._repr_debug_() - return repr(value) - - -def to_mongo(context: TransformContext) -> TransformContext: - output = {} - if context.obj: - unique_keys_dict = context.obj._syft_unique_keys_dict() - search_keys_dict = context.obj._syft_searchable_keys_dict() - all_dict = unique_keys_dict - all_dict.update(search_keys_dict) - for k in all_dict: - value = getattr(context.obj, k, "") - # if the value is a method, store its value - if callable(value): - output[k] = value() - else: - output[k] = value - - output["__canonical_name__"] = context.obj.__canonical_name__ - output["__version__"] = context.obj.__version__ - output["__blob__"] = _serialize(context.obj, to_bytes=True) - output["__arepr__"] = _repr_debug_(context.obj) # a comes first in alphabet - - if context.output and "id" in context.output: - output["_id"] = context.output["id"] - - context.output = output - - return context - - -@transform(SyftObject, MongoBsonObject) -def syft_obj_to_mongo() -> list[Callable]: - return [to_mongo] - - -@transform_method(MongoBsonObject, SyftObject) -def from_mongo( - storage_obj: dict, context: TransformContext | None = None -) -> SyftObject: - return _deserialize(storage_obj["__blob__"], from_bytes=True) +@serializable( + attrs=["index_name", "settings", "store_config"], + canonical_name="MongoBackingStore", + version=1, +) +class MongoBackingStore(KeyValueBackingStore): + pass -@serializable(attrs=["storage_type"]) +@serializable(attrs=["storage_type"], canonical_name="MongoStorePartition", version=1) class MongoStorePartition(StorePartition): """Mongo StorePartition - Parameters: `settings`: PartitionSettings PySyft specific settings, used for partitioning and indexing. @@ -129,502 +38,10 @@ class MongoStorePartition(StorePartition): storage_type: type[StorableObjectType] = MongoBsonObject - def init_store(self) -> Result[Ok, Err]: - store_status = super().init_store() - if store_status.is_err(): - return store_status - - client = MongoClient(config=self.store_config.client_config) - - collection_status = client.with_collection( - collection_settings=self.settings, store_config=self.store_config - ) - if collection_status.is_err(): - return collection_status - - collection_permissions_status = client.with_collection_permissions( - collection_settings=self.settings, store_config=self.store_config - ) - if collection_permissions_status.is_err(): - return collection_permissions_status - - self._collection = collection_status.ok() - self._permissions = collection_permissions_status.ok() - - return self._create_update_index() - - # Potentially thread-unsafe methods. - # - # CAUTION: - # * Don't use self.lock here. - # * Do not call the public thread-safe methods here(with locking). - # These methods are called from the public thread-safe API, and will hang the process. - - def _create_update_index(self) -> Result[Ok, Err]: - """Create or update mongo database indexes""" - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - def check_index_keys( - current_keys: list[tuple[str, int]], new_index_keys: list[tuple[str, int]] - ) -> bool: - current_keys.sort() - new_index_keys.sort() - return current_keys == new_index_keys - - syft_obj = self.settings.object_type - - unique_attrs = getattr(syft_obj, "__attr_unique__", []) - object_name = syft_obj.__canonical_name__ - - new_index_keys = [(attr, ASCENDING) for attr in unique_attrs] - - try: - current_indexes = collection.index_information() - except BaseException as e: - return Err(str(e)) - index_name = f"{object_name}_index_name" - - current_index_keys = current_indexes.get(index_name, None) - - if current_index_keys is not None: - keys_same = check_index_keys(current_index_keys["key"], new_index_keys) - if keys_same: - return Ok(True) - - # Drop current index, since incompatible with current object - try: - collection.drop_index(index_or_name=index_name) - except Exception: - return Err( - f"Failed to drop index for object: {object_name} with index keys: {current_index_keys}" - ) - - # If no new indexes, then skip index creation - if len(new_index_keys) == 0: - return Ok(True) - - try: - collection.create_index(new_index_keys, unique=True, name=index_name) - except Exception: - return Err( - f"Failed to create index for {object_name} with index keys: {new_index_keys}" - ) - - return Ok(True) - - @property - def collection(self) -> Result[MongoCollection, Err]: - if not hasattr(self, "_collection"): - res = self.init_store() - if res.is_err(): - return res - - return Ok(self._collection) - - @property - def permissions(self) -> Result[MongoCollection, Err]: - if not hasattr(self, "_permissions"): - res = self.init_store() - if res.is_err(): - return res - - return Ok(self._permissions) - - def set(self, *args: Any, **kwargs: Any) -> Result[SyftObject, str]: - return self._set(*args, **kwargs) - - def _set( - self, - credentials: SyftVerifyKey, - obj: SyftObject, - add_permissions: list[ActionObjectPermission] | None = None, - add_storage_permission: bool = True, - ignore_duplicates: bool = False, - ) -> Result[SyftObject, str]: - # TODO: Refactor this function since now it's doing both set and - # update at the same time - write_permission = ActionObjectWRITE(uid=obj.id, credentials=credentials) - can_write: bool = self.has_permission(write_permission) - - store_query_key: QueryKey = self.settings.store_key.with_obj(obj) - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - store_key_exists = ( - collection.find_one(store_query_key.as_dict_mongo) is not None - ) - if (not store_key_exists) and (not self.item_keys_exist(obj, collection)): - # attempt to claim ownership for writing - ownership_result = self.take_ownership(uid=obj.id, credentials=credentials) - can_write = ownership_result.is_ok() - elif not ignore_duplicates: - unique_query_keys: QueryKeys = self.settings.unique_keys.with_obj(obj) - keys = ", ".join(f"`{key.key}`" for key in unique_query_keys.all) - return Err( - f"Duplication Key Error for {obj}.\n" - f"The fields that should be unique are {keys}." - ) - else: - # we are not throwing an error, because we are ignoring duplicates - # we are also not writing though - return Ok(obj) - - if can_write: - storage_obj = obj.to(self.storage_type) - - collection.insert_one(storage_obj) - - # adding permissions - read_permission = ActionObjectPermission( - uid=obj.id, - credentials=credentials, - permission=ActionPermission.READ, - ) - self.add_permission(read_permission) - - if add_permissions is not None: - self.add_permissions(add_permissions) - - if add_storage_permission: - # TODO: add storage permissions to Mongo store - pass - - return Ok(obj) - else: - return Err(f"No permission to write object with id {obj.id}") - - def item_keys_exist(self, obj: SyftObject, collection: MongoCollection) -> bool: - qks: QueryKeys = self.settings.unique_keys.with_obj(obj) - query = {"$or": [{k: v} for k, v in qks.as_dict_mongo.items()]} - res = collection.find_one(query) - return res is not None - - def _update( - self, - credentials: SyftVerifyKey, - qk: QueryKey, - obj: SyftObject, - has_permission: bool = False, - overwrite: bool = False, - ) -> Result[SyftObject, str]: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - # TODO: optimize the update. The ID should not be overwritten, - # but the qk doesn't necessarily have to include the `id` field either. - - prev_obj_status = self._get_all_from_store(credentials, QueryKeys(qks=[qk])) - if prev_obj_status.is_err(): - return Err(f"No object found with query key: {qk}") - - prev_obj = prev_obj_status.ok() - if len(prev_obj) == 0: - return Err(f"Missing values for query key: {qk}") - - prev_obj = prev_obj[0] - if has_permission or self.has_permission( - ActionObjectWRITE(uid=prev_obj.id, credentials=credentials) - ): - # we don't want to overwrite Mongo's "id_" or Syft's "id" on update - obj_id = obj["id"] - - # Set ID to the updated object value - obj.id = prev_obj["id"] - - # Create the Mongo object - storage_obj = obj.to(self.storage_type) - - # revert the ID - obj.id = obj_id - - try: - collection.update_one( - filter=qk.as_dict_mongo, update={"$set": storage_obj} - ) - except Exception as e: - return Err(f"Failed to update obj: {obj} with qk: {qk}. Error: {e}") - - return Ok(obj) - else: - return Err(f"Failed to update obj {obj}, you have no permission") - - def _find_index_or_search_keys( - self, - credentials: SyftVerifyKey, - index_qks: QueryKeys, - search_qks: QueryKeys, - order_by: PartitionKey | None = None, - ) -> Result[list[SyftObject], str]: - # TODO: pass index as hint to find method - qks = QueryKeys(qks=(list(index_qks.all) + list(search_qks.all))) - return self._get_all_from_store( - credentials=credentials, qks=qks, order_by=order_by - ) - - @property - def data(self) -> dict: - values: list = self._all(credentials=None, has_permission=True).ok() - return {v.id: v for v in values} - - def _get_all_from_store( - self, - credentials: SyftVerifyKey, - qks: QueryKeys, - order_by: PartitionKey | None = None, - has_permission: bool | None = False, - ) -> Result[list[SyftObject], str]: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - if order_by is not None: - storage_objs = collection.find(filter=qks.as_dict_mongo).sort(order_by.key) - else: - _default_key = "_id" - storage_objs = collection.find(filter=qks.as_dict_mongo).sort(_default_key) - syft_objs = [] - for storage_obj in storage_objs: - obj = self.storage_type(storage_obj) - transform_context = TransformContext(output={}, obj=obj) - syft_objs.append(obj.to(self.settings.object_type, transform_context)) - - # TODO: maybe do this in loop before this - res = [] - for s in syft_objs: - if has_permission or self.has_permission( - ActionObjectREAD(uid=s.id, credentials=credentials) - ): - res.append(s) - return Ok(res) - - def _delete( - self, credentials: SyftVerifyKey, qk: QueryKey, has_permission: bool = False - ) -> Result[SyftSuccess, Err]: - if not ( - has_permission - or self.has_permission( - ActionObjectWRITE(uid=qk.value, credentials=credentials) - ) - ): - return Err(f"You don't have permission to delete object with qk: {qk}") - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - collection_permissions_status = self.permissions - if collection_permissions_status.is_err(): - return collection_permissions_status - collection_permissions: MongoCollection = collection_permissions_status.ok() - - qks = QueryKeys(qks=qk) - # delete the object - result = collection.delete_one(filter=qks.as_dict_mongo) - # delete the object's permission - result_permission = collection_permissions.delete_one(filter=qks.as_dict_mongo) - if result.deleted_count == 1 and result_permission.deleted_count == 1: - return Ok(SyftSuccess(message="Object and its permission are deleted")) - elif result.deleted_count == 0: - return Err(f"Failed to delete object with qk: {qk}") - else: - return Err( - f"Object with qk: {qk} was deleted, but failed to delete its corresponding permission" - ) - - def has_permission(self, permission: ActionObjectPermission) -> bool: - """Check if the permission is inside the permission collection""" - collection_permissions_status = self.permissions - if collection_permissions_status.is_err(): - return False - collection_permissions: MongoCollection = collection_permissions_status.ok() - - permissions: dict | None = collection_permissions.find_one( - {"_id": permission.uid} - ) - - if permissions is None: - return False - - # TODO: fix for other admins - if ( - permission.credentials - and self.root_verify_key.verify == permission.credentials.verify - ): - return True - - if permission.permission_string in permissions["permissions"]: - return True - - # check ALL_READ permission - if ( - permission.permission == ActionPermission.READ - and ActionObjectPermission( - permission.uid, ActionPermission.ALL_READ - ).permission_string - in permissions["permissions"] - ): - return True - - return False - - def add_permission(self, permission: ActionObjectPermission) -> Result[None, Err]: - collection_permissions_status = self.permissions - if collection_permissions_status.is_err(): - return collection_permissions_status - collection_permissions: MongoCollection = collection_permissions_status.ok() - - # find the permissions for the given permission.uid - # e.g. permissions = {"_id": "7b88fdef6bff42a8991d294c3d66f757", - # "permissions": set(["permission_str_1", "permission_str_2"]}} - permissions: dict | None = collection_permissions.find_one( - {"_id": permission.uid} - ) - if permissions is None: - # Permission doesn't exist, add a new one - collection_permissions.insert_one( - { - "_id": permission.uid, - "permissions": {permission.permission_string}, - } - ) - else: - # update the permissions with the new permission string - permission_strings: set = permissions["permissions"] - permission_strings.add(permission.permission_string) - collection_permissions.update_one( - {"_id": permission.uid}, {"$set": {"permissions": permission_strings}} - ) - - def add_permissions(self, permissions: list[ActionObjectPermission]) -> None: - for permission in permissions: - self.add_permission(permission) - - def remove_permission( - self, permission: ActionObjectPermission - ) -> Result[None, Err]: - collection_permissions_status = self.permissions - if collection_permissions_status.is_err(): - return collection_permissions_status - collection_permissions: MongoCollection = collection_permissions_status.ok() - permissions: dict | None = collection_permissions.find_one( - {"_id": permission.uid} - ) - if permissions is None: - return Err(f"permission with UID {permission.uid} not found!") - permissions_strings: set = permissions["permissions"] - if permission.permission_string in permissions_strings: - permissions_strings.remove(permission.permission_string) - if len(permissions_strings) > 0: - collection_permissions.update_one( - {"_id": permission.uid}, - {"$set": {"permissions": permissions_strings}}, - ) - else: - collection_permissions.delete_one({"_id": permission.uid}) - else: - return Err(f"the permission {permission.permission_string} does not exist!") - - def take_ownership( - self, uid: UID, credentials: SyftVerifyKey - ) -> Result[SyftSuccess, str]: - collection_permissions_status = self.permissions - if collection_permissions_status.is_err(): - return collection_permissions_status - collection_permissions: MongoCollection = collection_permissions_status.ok() - - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - data: list[UID] | None = collection.find_one({"_id": uid}) - permissions: list[UID] | None = collection_permissions.find_one({"_id": uid}) - - # first person using this UID can claim ownership - if permissions is None and data is None: - self.add_permissions( - [ - ActionObjectOWNER(uid=uid, credentials=credentials), - ActionObjectWRITE(uid=uid, credentials=credentials), - ActionObjectREAD(uid=uid, credentials=credentials), - ActionObjectEXECUTE(uid=uid, credentials=credentials), - ] - ) - return Ok(SyftSuccess(message=f"Ownership of ID: {uid} taken.")) - - return Err(f"UID: {uid} already owned.") - - def _all( - self, - credentials: SyftVerifyKey, - order_by: PartitionKey | None = None, - has_permission: bool | None = False, - ) -> Result[list[SyftObject], str]: - qks = QueryKeys(qks=()) - return self._get_all_from_store( - credentials=credentials, - qks=qks, - order_by=order_by, - has_permission=has_permission, - ) - - def __len__(self) -> int: - collection_status = self.collection - if collection_status.is_err(): - return 0 - collection: MongoCollection = collection_status.ok() - return collection.count_documents(filter={}) - - def _migrate_data( - self, to_klass: SyftObject, context: AuthedServiceContext, has_permission: bool - ) -> Result[bool, str]: - credentials = context.credentials - has_permission = (credentials == self.root_verify_key) or has_permission - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - if has_permission: - storage_objs = collection.find({}) - for storage_obj in storage_objs: - obj = self.storage_type(storage_obj) - transform_context = TransformContext(output={}, obj=obj) - value = obj.to(self.settings.object_type, transform_context) - key = obj.get("_id") - try: - migrated_value = value.migrate_to(to_klass.__version__, context) - except Exception: - return Err(f"Failed to migrate data to {to_klass} for qk: {key}") - qk = self.settings.store_key.with_obj(key) - result = self._update( - credentials, - qk=qk, - obj=migrated_value, - has_permission=has_permission, - ) - - if result.is_err(): - return result.err() - - return Ok(True) - - return Err("You don't have permissions to migrate data.") - - -@serializable() +@serializable(canonical_name="MongoDocumentStore", version=1) class MongoDocumentStore(DocumentStore): """Mongo Document Store - Parameters: `store_config`: MongoStoreConfig Mongo specific configuration, including connection configuration, database name, or client class type. @@ -633,226 +50,11 @@ class MongoDocumentStore(DocumentStore): partition_type = MongoStorePartition -@serializable(attrs=["index_name", "settings", "store_config"]) -class MongoBackingStore(KeyValueBackingStore): - """ - Core logic for the MongoDB key-value store - - Parameters: - `index_name`: str - Index name (can be either 'data' or 'permissions') - `settings`: PartitionSettings - Syft specific settings - `store_config`: StoreConfig - Connection Configuration - `ddtype`: Type - Optional and should be None - Used to make a consistent interface with SQLiteBackingStore - """ - - def __init__( - self, - index_name: str, - settings: PartitionSettings, - store_config: StoreConfig, - ddtype: type | None = None, - ) -> None: - self.index_name = index_name - self.settings = settings - self.store_config = store_config - self.client: MongoClient - self.ddtype = ddtype - self.init_client() - - def init_client(self) -> Err | None: - self.client = MongoClient(config=self.store_config.client_config) - - collection_status = self.client.with_collection( - collection_settings=self.settings, - store_config=self.store_config, - collection_name=f"{self.settings.name}_{self.index_name}", - ) - if collection_status.is_err(): - return collection_status - self._collection: MongoCollection = collection_status.ok() - return None - - @property - def collection(self) -> Result[MongoCollection, Err]: - if not hasattr(self, "_collection"): - res = self.init_client() - if res is not None and res.is_err(): - return res - - return Ok(self._collection) - - def _exist(self, key: UID) -> bool: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - result: dict | None = collection.find_one({"_id": key}) - if result is not None: - return True - - return False - - def _set(self, key: UID, value: Any) -> None: - if self._exist(key): - self._update(key, value) - else: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - try: - bson_data = { - "_id": key, - f"{key}": _serialize(value, to_bytes=True), - "_repr_debug_": _repr_debug_(value), - } - collection.insert_one(bson_data) - except Exception as e: - raise ValueError(f"Cannot insert data. Error message: {e}") - - def _update(self, key: UID, value: Any) -> None: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - try: - collection.update_one( - {"_id": key}, - { - "$set": { - f"{key}": _serialize(value, to_bytes=True), - "_repr_debug_": _repr_debug_(value), - } - }, - ) - except Exception as e: - raise RuntimeError( - f"Failed to update obj: {key} with value: {value}. Error: {e}" - ) - - def __setitem__(self, key: Any, value: Any) -> None: - self._set(key, value) - - def _get(self, key: UID) -> Any: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - - result: dict | None = collection.find_one({"_id": key}) - if result is not None: - return _deserialize(result[f"{key}"], from_bytes=True) - else: - # raise KeyError(f"{key} does not exist") - # return an empty set which is the same with SQLiteBackingStore - return set() - - def __getitem__(self, key: Any) -> Self: - try: - return self._get(key) - except KeyError as e: - raise e - - def _len(self) -> int: - collection_status = self.collection - if collection_status.is_err(): - return 0 - collection: MongoCollection = collection_status.ok() - return collection.count_documents(filter={}) - - def __len__(self) -> int: - return self._len() - - def _delete(self, key: UID) -> Result[SyftSuccess, Err]: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - result = collection.delete_one({"_id": key}) - if result.deleted_count != 1: - raise KeyError(f"{key} does not exist") - return Ok(SyftSuccess(message="Deleted")) - - def __delitem__(self, key: str) -> None: - self._delete(key) - - def _delete_all(self) -> None: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - collection.delete_many({}) - - def clear(self) -> None: - self._delete_all() - - def _get_all(self) -> Any: - collection_status = self.collection - if collection_status.is_err(): - return collection_status - collection: MongoCollection = collection_status.ok() - result = collection.find() - keys, values = [], [] - for row in result: - keys.append(row["_id"]) - values.append(_deserialize(row[f"{row['_id']}"], from_bytes=True)) - return dict(zip(keys, values)) - - def keys(self) -> Any: - return self._get_all().keys() - - def values(self) -> Any: - return self._get_all().values() - - def items(self) -> Any: - return self._get_all().items() - - def pop(self, key: Any) -> Self: - value = self._get(key) - self._delete(key) - return value - - def __contains__(self, key: Any) -> bool: - return self._exist(key) - - def __iter__(self) -> Any: - return iter(self.keys()) - - def __repr__(self) -> str: - return repr(self._get_all()) - - def copy(self) -> Self: - # 🟡 TODO - raise NotImplementedError - - def update(self, *args: Any, **kwargs: Any) -> None: - """ - Inserts the specified items to the dictionary. - """ - # 🟡 TODO - raise NotImplementedError - - def __del__(self) -> None: - """ - Close the mongo client connection: - - Cleanup client resources and disconnect from MongoDB - - End all server sessions created by this client - - Close all sockets in the connection pools and stop the monitor threads - """ - self.client.close() - - @serializable() class MongoStoreConfig(StoreConfig): __canonical_name__ = "MongoStoreConfig" - """Mongo Store configuration + """Mongo Store configuration Parameters: `client_config`: MongoStoreClientConfig Mongo connection details: hostname, port, user, password etc. @@ -864,7 +66,6 @@ class MongoStoreConfig(StoreConfig): The config used for store locking. Available options: * NoLockingConfig: no locking, ideal for single-thread stores. * ThreadingLockingConfig: threading-based locking, ideal for same-process in-memory stores. - * FileLockingConfig: file based locking, ideal for same-device different-processes/threads stores. Defaults to NoLockingConfig. """ diff --git a/packages/syft/src/syft/store/sqlite_document_store.py b/packages/syft/src/syft/store/sqlite_document_store.py index 078f85f64a8..7e8b980ca14 100644 --- a/packages/syft/src/syft/store/sqlite_document_store.py +++ b/packages/syft/src/syft/store/sqlite_document_store.py @@ -1,365 +1,31 @@ -# future -from __future__ import annotations - -# stdlib -from collections import defaultdict -from copy import deepcopy -from pathlib import Path -import sqlite3 -import tempfile -from typing import Any - # third party from pydantic import Field -from pydantic import field_validator -from result import Err -from result import Ok -from result import Result -from typing_extensions import Self # relative -from ..serde.deserialize import _deserialize from ..serde.serializable import serializable -from ..serde.serialize import _serialize -from ..types.uid import UID -from ..util.util import thread_ident from .document_store import DocumentStore -from .document_store import PartitionSettings from .document_store import StoreClientConfig from .document_store import StoreConfig from .kv_document_store import KeyValueBackingStore from .kv_document_store import KeyValueStorePartition from .locks import LockingConfig from .locks import NoLockingConfig -from .locks import SyftLock - -# here we can create a single connection per cache_key -# since pytest is concurrent processes, we need to isolate each connection -# by its filename and optionally the thread that its running in -# we keep track of each SQLiteBackingStore init in REF_COUNTS -# when it hits 0 we can close the connection and release the file descriptor -SQLITE_CONNECTION_POOL_DB: dict[str, sqlite3.Connection] = {} -SQLITE_CONNECTION_POOL_CUR: dict[str, sqlite3.Cursor] = {} -REF_COUNTS: dict[str, int] = defaultdict(int) - - -def cache_key(db_name: str) -> str: - return f"{db_name}_{thread_ident()}" - - -def _repr_debug_(value: Any) -> str: - if hasattr(value, "_repr_debug_"): - return str(value._repr_debug_()) - return repr(value) - - -def raise_exception(table_name: str, e: Exception) -> None: - if "disk I/O error" in str(e): - message = f"Error usually related to concurrent writes. {str(e)}" - raise Exception(message) - - if "Cannot operate on a closed database" in str(e): - message = ( - "Error usually related to calling self.db.close()" - + f"before last SQLiteBackingStore.__del__ gets called. {str(e)}" - ) - raise Exception(message) - # if its something else other than "table already exists" raise original e - if f"table {table_name} already exists" not in str(e): - raise e - -@serializable(attrs=["index_name", "settings", "store_config"]) +@serializable( + attrs=["index_name", "settings", "store_config"], + canonical_name="SQLiteBackingStore", + version=1, +) class SQLiteBackingStore(KeyValueBackingStore): - """Core Store logic for the SQLite stores. - - Parameters: - `index_name`: str - Index name - `settings`: PartitionSettings - Syft specific settings - `store_config`: SQLiteStoreConfig - Connection Configuration - `ddtype`: Type - Class used as fallback on `get` errors - """ - - def __init__( - self, - index_name: str, - settings: PartitionSettings, - store_config: StoreConfig, - ddtype: type | None = None, - ) -> None: - self.index_name = index_name - self.settings = settings - self.store_config = store_config - self._ddtype = ddtype - if self.store_config.client_config: - self.file_path = self.store_config.client_config.file_path - if store_config.client_config: - self.db_filename = store_config.client_config.filename - - self.lock = SyftLock(NoLockingConfig()) - self.create_table() - REF_COUNTS[cache_key(self.db_filename)] += 1 - - @property - def table_name(self) -> str: - return f"{self.settings.name}_{self.index_name}" - - def _connect(self) -> None: - # SQLite is not thread safe by default so we ensure that each connection - # comes from a different thread. In cases of Uvicorn and other AWSGI servers - # there will be many threads handling incoming requests so we need to ensure - # that different connections are used in each thread. By using a dict for the - # _db and _cur we can ensure they are never shared - - path = Path(self.file_path) - if not path.exists(): - path.parent.mkdir(parents=True, exist_ok=True) - - if self.store_config.client_config: - connection = sqlite3.connect( - self.file_path, - timeout=self.store_config.client_config.timeout, - check_same_thread=False, # do we need this if we use the lock? - # check_same_thread=self.store_config.client_config.check_same_thread, - ) - # Set journal mode to WAL. - connection.execute("PRAGMA journal_mode = WAL") - connection.execute("PRAGMA busy_timeout = 5000") - connection.execute("PRAGMA temp_store = 2") - connection.execute("PRAGMA synchronous = 1") - SQLITE_CONNECTION_POOL_DB[cache_key(self.db_filename)] = connection - - def create_table(self) -> None: - try: - with self.lock: - self.cur.execute( - f"create table {self.table_name} (uid VARCHAR(32) NOT NULL PRIMARY KEY, " # nosec - + "repr TEXT NOT NULL, value BLOB NOT NULL, " # nosec - + "sqltime TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL)" # nosec - ) - self.db.commit() - except Exception as e: - raise_exception(self.table_name, e) - - @property - def db(self) -> sqlite3.Connection: - if cache_key(self.db_filename) not in SQLITE_CONNECTION_POOL_DB: - self._connect() - return SQLITE_CONNECTION_POOL_DB[cache_key(self.db_filename)] - - @property - def cur(self) -> sqlite3.Cursor: - if cache_key(self.db_filename) not in SQLITE_CONNECTION_POOL_CUR: - SQLITE_CONNECTION_POOL_CUR[cache_key(self.db_filename)] = self.db.cursor() - - return SQLITE_CONNECTION_POOL_CUR[cache_key(self.db_filename)] - - def _close(self) -> None: - self._commit() - REF_COUNTS[cache_key(self.db_filename)] -= 1 - if REF_COUNTS[cache_key(self.db_filename)] <= 0: - # once you close it seems like other object references can't re-use the - # same connection - self.db.close() - del SQLITE_CONNECTION_POOL_DB[cache_key(self.db_filename)] - else: - # don't close yet because another SQLiteBackingStore is probably still open - pass - - def _commit(self) -> None: - self.db.commit() - - def _execute( - self, sql: str, *args: list[Any] | None - ) -> Result[Ok[sqlite3.Cursor], Err[str]]: - with self.lock: - cursor: sqlite3.Cursor | None = None - # err = None - try: - cursor = self.cur.execute(sql, *args) - except Exception as e: - raise_exception(self.table_name, e) - - # TODO: Which exception is safe to rollback on? - # we should map out some more clear exceptions that can be returned - # rather than halting the program like disk I/O error etc - # self.db.rollback() # Roll back all changes if an exception occurs. - # err = Err(str(e)) - self.db.commit() # Commit if everything went ok - - # if err is not None: - # return err - - return Ok(cursor) - - def _set(self, key: UID, value: Any) -> None: - if self._exists(key): - self._update(key, value) - else: - insert_sql = ( - f"insert into {self.table_name} (uid, repr, value) VALUES (?, ?, ?)" # nosec - ) - data = _serialize(value, to_bytes=True) - res = self._execute(insert_sql, [str(key), _repr_debug_(value), data]) - if res.is_err(): - raise ValueError(res.err()) - - def _update(self, key: UID, value: Any) -> None: - insert_sql = ( - f"update {self.table_name} set uid = ?, repr = ?, value = ? where uid = ?" # nosec - ) - data = _serialize(value, to_bytes=True) - res = self._execute(insert_sql, [str(key), _repr_debug_(value), data, str(key)]) - if res.is_err(): - raise ValueError(res.err()) - - def _get(self, key: UID) -> Any: - select_sql = f"select * from {self.table_name} where uid = ? order by sqltime" # nosec - res = self._execute(select_sql, [str(key)]) - if res.is_err(): - raise KeyError(f"Query {select_sql} failed") - cursor = res.ok() - - row = cursor.fetchone() - if row is None or len(row) == 0: - raise KeyError(f"{key} not in {type(self)}") - data = row[2] - return _deserialize(data, from_bytes=True) - - def _exists(self, key: UID) -> bool: - select_sql = f"select uid from {self.table_name} where uid = ?" # nosec - - res = self._execute(select_sql, [str(key)]) - if res.is_err(): - return False - cursor = res.ok() - - row = cursor.fetchone() - if row is None: - return False + """Core Store logic for the SQLite stores.""" - return bool(row) + pass - def _get_all(self) -> Any: - select_sql = f"select * from {self.table_name} order by sqltime" # nosec - keys = [] - data = [] - res = self._execute(select_sql) - if res.is_err(): - return {} - cursor = res.ok() - - rows = cursor.fetchall() - if rows is None: - return {} - - for row in rows: - keys.append(UID(row[0])) - data.append(_deserialize(row[2], from_bytes=True)) - return dict(zip(keys, data)) - - def _get_all_keys(self) -> Any: - select_sql = f"select uid from {self.table_name} order by sqltime" # nosec - keys = [] - - res = self._execute(select_sql) - if res.is_err(): - return [] - cursor = res.ok() - - rows = cursor.fetchall() - if rows is None: - return [] - - for row in rows: - keys.append(UID(row[0])) - return keys - - def _delete(self, key: UID) -> None: - select_sql = f"delete from {self.table_name} where uid = ?" # nosec - res = self._execute(select_sql, [str(key)]) - if res.is_err(): - raise ValueError(res.err()) - - def _delete_all(self) -> None: - select_sql = f"delete from {self.table_name}" # nosec - res = self._execute(select_sql) - if res.is_err(): - raise ValueError(res.err()) - - def _len(self) -> int: - select_sql = f"select count(uid) from {self.table_name}" # nosec - res = self._execute(select_sql) - if res.is_err(): - raise ValueError(res.err()) - cursor = res.ok() - cnt = cursor.fetchone()[0] - return cnt - - def __setitem__(self, key: Any, value: Any) -> None: - self._set(key, value) - - def __getitem__(self, key: Any) -> Self: - try: - return self._get(key) - except KeyError as e: - if self._ddtype is not None: - return self._ddtype() - raise e - - def __repr__(self) -> str: - return repr(self._get_all()) - - def __len__(self) -> int: - return self._len() - - def __delitem__(self, key: str) -> None: - self._delete(key) - - def clear(self) -> None: - self._delete_all() - - def copy(self) -> Self: - return deepcopy(self) - - def keys(self) -> Any: - return self._get_all_keys() - - def values(self) -> Any: - return self._get_all().values() - - def items(self) -> Any: - return self._get_all().items() - - def pop(self, key: Any) -> Self: - value = self._get(key) - self._delete(key) - return value - - def __contains__(self, key: Any) -> bool: - return self._exists(key) - - def __iter__(self) -> Any: - return iter(self.keys()) - - def __del__(self) -> None: - try: - self._close() - except BaseException: - print("Could not close connection") - pass - - -@serializable() +@serializable(canonical_name="SQLiteStorePartition", version=1) class SQLiteStorePartition(KeyValueStorePartition): """SQLite StorePartition - Parameters: `settings`: PartitionSettings PySyft specific settings, used for indexing and partitioning @@ -367,34 +33,11 @@ class SQLiteStorePartition(KeyValueStorePartition): SQLite specific configuration """ - def close(self) -> None: - self.lock.acquire() - try: - # I think we don't want these now, because of the REF_COUNT? - # self.data._close() - # self.unique_keys._close() - # self.searchable_keys._close() - pass - except BaseException: - pass - self.lock.release() - - def commit(self) -> None: - self.lock.acquire() - try: - self.data._commit() - self.unique_keys._commit() - self.searchable_keys._commit() - except BaseException: - pass - self.lock.release() - # the base document store is already a dict but we can change it later -@serializable() +@serializable(canonical_name="SQLiteDocumentStore", version=1) class SQLiteDocumentStore(DocumentStore): """SQLite Document Store - Parameters: `store_config`: StoreConfig SQLite specific configuration, including connection details and client class type. @@ -403,50 +46,17 @@ class SQLiteDocumentStore(DocumentStore): partition_type = SQLiteStorePartition -@serializable() +@serializable(canonical_name="SQLiteStoreClientConfig", version=1) class SQLiteStoreClientConfig(StoreClientConfig): - """SQLite connection config + """SQLite connection config""" - Parameters: - `filename` : str - Database name - `path` : Path or str - Database folder - `check_same_thread`: bool - If True (default), ProgrammingError will be raised if the database connection is used - by a thread other than the one that created it. If False, the connection may be accessed - in multiple threads; write operations may need to be serialized by the user to avoid - data corruption. - `timeout`: int - How many seconds the connection should wait before raising an exception, if the database - is locked by another connection. If another connection opens a transaction to modify the - database, it will be locked until that transaction is committed. Default five seconds. - """ - - filename: str = "syftdb.sqlite" - path: str | Path = Field(default_factory=tempfile.gettempdir) - check_same_thread: bool = True - timeout: int = 5 - - # We need this in addition to Field(default_factory=...) - # so users can still do SQLiteStoreClientConfig(path=None) - @field_validator("path", mode="before") - @classmethod - def __default_path(cls, path: str | Path | None) -> str | Path: - if path is None: - return tempfile.gettempdir() - return path - - @property - def file_path(self) -> Path | None: - return Path(self.path) / self.filename + pass @serializable() class SQLiteStoreConfig(StoreConfig): __canonical_name__ = "SQLiteStoreConfig" """SQLite Store config, used by SQLiteStorePartition - Parameters: `client_config`: SQLiteStoreClientConfig SQLite connection configuration @@ -458,8 +68,7 @@ class SQLiteStoreConfig(StoreConfig): The config used for store locking. Available options: * NoLockingConfig: no locking, ideal for single-thread stores. * ThreadingLockingConfig: threading-based locking, ideal for same-process in-memory stores. - * FileLockingConfig: file based locking, ideal for same-device different-processes/threads stores. - Defaults to FileLockingConfig. + Defaults to NoLockingConfig. """ client_config: SQLiteStoreClientConfig diff --git a/packages/syft/src/syft/types/blob_storage.py b/packages/syft/src/syft/types/blob_storage.py index 19a29624c06..1ae755467a5 100644 --- a/packages/syft/src/syft/types/blob_storage.py +++ b/packages/syft/src/syft/types/blob_storage.py @@ -22,23 +22,20 @@ # relative from ..client.api import SyftAPI from ..client.client import SyftClient -from ..node.credentials import SyftVerifyKey from ..serde import serialize from ..serde.serializable import serializable +from ..server.credentials import SyftVerifyKey from ..service.action.action_object import ActionObject from ..service.action.action_object import ActionObjectPointer from ..service.action.action_object import BASE_PASSTHROUGH_ATTRS from ..service.action.action_types import action_types -from ..service.response import SyftError -from ..service.response import SyftException from ..service.service import from_api_or_context -from ..types.grid_url import GridURL +from ..types.errors import SyftException +from ..types.server_url import ServerURL from ..types.transforms import keep from ..types.transforms import transform from .datetime import DateTime -from .syft_object import SYFT_OBJECT_VERSION_2 -from .syft_object import SYFT_OBJECT_VERSION_3 -from .syft_object import SYFT_OBJECT_VERSION_4 +from .syft_object import SYFT_OBJECT_VERSION_1 from .syft_object import SyftObject from .uid import UID @@ -55,7 +52,7 @@ @serializable() class BlobFile(SyftObject): __canonical_name__ = "BlobFile" - __version__ = SYFT_OBJECT_VERSION_4 + __version__ = SYFT_OBJECT_VERSION_1 file_name: str syft_blob_storage_entry_id: UID | None = None @@ -73,7 +70,7 @@ def read( ) -> Any: # get blob retrieval object from api + syft_blob_storage_entry_id read_method = from_api_or_context( - "blob_storage.read", self.syft_node_location, self.syft_client_verify_key + "blob_storage.read", self.syft_server_location, self.syft_client_verify_key ) if read_method is not None: blob_retrieval_object = read_method(self.syft_blob_storage_entry_id) @@ -90,29 +87,23 @@ def upload_from_path(cls, path: str | Path, client: SyftClient) -> Any: return sy.ActionObject.from_path(path=path).send(client).syft_action_data - def _upload_to_blobstorage_from_api(self, api: SyftAPI) -> SyftError | None: + def _upload_to_blobstorage_from_api(self, api: SyftAPI) -> None: if self.path is None: raise ValueError("cannot upload BlobFile, no path specified") storage_entry = CreateBlobStorageEntry.from_path(self.path) blob_deposit_object = api.services.blob_storage.allocate(storage_entry) - if isinstance(blob_deposit_object, SyftError): - return blob_deposit_object - with open(self.path, "rb") as f: - result = blob_deposit_object.write(f) - - if isinstance(result, SyftError): - return result + blob_deposit_object.write(f).unwrap() self.syft_blob_storage_entry_id = blob_deposit_object.blob_storage_entry_id self.uploaded = True return None - def upload_to_blobstorage(self, client: SyftClient) -> SyftError | None: - self.syft_node_location = client.id + def upload_to_blobstorage(self, client: SyftClient) -> None: + self.syft_server_location = client.id self.syft_client_verify_key = client.verify_key return self._upload_to_blobstorage_from_api(client.api) @@ -183,10 +174,12 @@ def _coll_repr_(self) -> dict[str, str]: return {"file_name": self.file_name} +@serializable(canonical_name="BlobFileType", version=1) class BlobFileType(type): pass +@serializable(canonical_name="BlobFileObjectPointer", version=1) class BlobFileObjectPointer(ActionObjectPointer): pass @@ -194,7 +187,7 @@ class BlobFileObjectPointer(ActionObjectPointer): @serializable() class BlobFileObject(ActionObject): __canonical_name__ = "BlobFileOBject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 syft_internal_type: ClassVar[type[Any]] = BlobFile syft_pointer_type: ClassVar[type[ActionObjectPointer]] = BlobFileObjectPointer @@ -204,7 +197,7 @@ class BlobFileObject(ActionObject): @serializable() class SecureFilePathLocation(SyftObject): __canonical_name__ = "SecureFilePathLocation" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID path: str @@ -225,7 +218,7 @@ def generate_url( @serializable() class SeaweedSecureFilePathLocation(SecureFilePathLocation): __canonical_name__ = "SeaweedSecureFilePathLocation" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 upload_id: str | None = None @@ -247,7 +240,7 @@ def generate_url( from ..store.blob_storage import BlobRetrievalByURL return BlobRetrievalByURL( - url=GridURL.from_url(url), file_name=Path(self.path).name, type_=type_ + url=ServerURL.from_url(url), file_name=Path(self.path).name, type_=type_ ) except BotoClientError as e: raise SyftException(e) @@ -256,7 +249,7 @@ def generate_url( @serializable() class AzureSecureFilePathLocation(SecureFilePathLocation): __canonical_name__ = "AzureSecureFilePathLocation" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 # upload_id: str azure_profile_name: str # Used by Seaweedfs to refer to a remote config @@ -289,7 +282,7 @@ def generate_url( @serializable() class BlobStorageEntry(SyftObject): __canonical_name__ = "BlobStorageEntry" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 id: UID location: SecureFilePathLocation | SeaweedSecureFilePathLocation @@ -307,7 +300,7 @@ class BlobStorageEntry(SyftObject): @serializable() class BlobStorageMetadata(SyftObject): __canonical_name__ = "BlobStorageMetadata" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_1 type_: type[SyftObject] | None = None mimetype: str = "bytes" @@ -318,7 +311,7 @@ class BlobStorageMetadata(SyftObject): @serializable() class CreateBlobStorageEntry(SyftObject): __canonical_name__ = "CreateBlobStorageEntry" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID type_: type | None = None @@ -327,8 +320,21 @@ class CreateBlobStorageEntry(SyftObject): extensions: list[str] = [] @classmethod - def from_obj(cls, obj: SyftObject) -> Self: - file_size = sys.getsizeof(serialize._serialize(obj=obj, to_bytes=True)) + def from_blob_storage_entry(cls, entry: BlobStorageEntry) -> Self: + # TODO extensions are not stored in the BlobStorageEntry, + # so a blob entry from path might get a different filename + # after uploading. + return cls( + id=entry.id, + type_=entry.type_, + mimetype=entry.mimetype, + file_size=entry.file_size, + ) + + @classmethod + def from_obj(cls, obj: SyftObject, file_size: int | None = None) -> Self: + if file_size is None: + file_size = sys.getsizeof(serialize._serialize(obj=obj, to_bytes=True)) return cls(file_size=file_size, type_=type(obj)) @classmethod diff --git a/packages/syft/src/syft/types/cache_object.py b/packages/syft/src/syft/types/cache_object.py deleted file mode 100644 index ddee2e32a6d..00000000000 --- a/packages/syft/src/syft/types/cache_object.py +++ /dev/null @@ -1,14 +0,0 @@ -# stdlib -from typing import Any - -# relative -from ..serde.serializable import serializable -from .base import SyftBaseModel - - -@serializable() -class CachedSyftObject(SyftBaseModel): - """This class is used to represent the cached result.""" - - result: Any - error_msg: str | None = None diff --git a/packages/syft/src/syft/types/datetime.py b/packages/syft/src/syft/types/datetime.py index 10a6e04e941..93dd4ffc65e 100644 --- a/packages/syft/src/syft/types/datetime.py +++ b/packages/syft/src/syft/types/datetime.py @@ -1,6 +1,9 @@ # stdlib from datetime import datetime +from datetime import timedelta +from datetime import timezone from functools import total_ordering +import re from typing import Any # third party @@ -8,31 +11,50 @@ # relative from ..serde.serializable import serializable -from .syft_object import SYFT_OBJECT_VERSION_2 +from .syft_object import SYFT_OBJECT_VERSION_1 from .syft_object import SyftObject from .uid import UID +DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" +DATETIME_REGEX = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}" + + +def str_is_datetime(str_: str) -> bool: + return bool(re.match(DATETIME_REGEX, str_)) + @serializable() @total_ordering class DateTime(SyftObject): __canonical_name__ = "DateTime" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore utc_timestamp: float @classmethod def now(cls) -> Self: - return cls(utc_timestamp=datetime.utcnow().timestamp()) + utc_datetime = datetime.now(tz=timezone.utc) + return cls(utc_timestamp=utc_datetime.timestamp()) + + @classmethod + def from_str(cls, datetime_str: str) -> "DateTime": + utc_datetime = datetime.strptime(datetime_str, DATETIME_FORMAT).replace( + tzinfo=timezone.utc + ) + return cls(utc_timestamp=utc_datetime.timestamp()) def __str__(self) -> str: - utc_datetime = datetime.utcfromtimestamp(self.utc_timestamp) - return utc_datetime.strftime("%Y-%m-%d %H:%M:%S") + utc_datetime = datetime.fromtimestamp(self.utc_timestamp, tz=timezone.utc) + return utc_datetime.strftime(DATETIME_FORMAT) def __hash__(self) -> int: return hash(self.utc_timestamp) + def __sub__(self, other: "DateTime") -> timedelta: + res = self.timedelta(other) + return res + def __eq__(self, other: Any) -> bool: if other is None: return False @@ -40,3 +62,43 @@ def __eq__(self, other: Any) -> bool: def __lt__(self, other: Self) -> bool: return self.utc_timestamp < other.utc_timestamp + + def timedelta(self, other: "DateTime") -> timedelta: + utc_timestamp_delta = self.utc_timestamp - other.utc_timestamp + return timedelta(seconds=utc_timestamp_delta) + + @classmethod + def from_timestamp(cls, ts: float) -> datetime: + return cls(utc_timestamp=ts) + + @classmethod + def from_datetime(cls, dt: datetime) -> "DateTime": + utc_datetime = dt.astimezone(timezone.utc) + return cls(utc_timestamp=utc_datetime.timestamp()) + + +def format_timedelta(local_timedelta: timedelta) -> str: + total_seconds = int(local_timedelta.total_seconds()) + hours, leftover = divmod(total_seconds, 3600) + minutes, seconds = divmod(leftover, 60) + + hours_string = f"{hours}:" if hours != 0 else "" + minutes_string = f"{minutes}:".zfill(3) + seconds_string = f"{seconds}".zfill(2) + + return f"{hours_string}{minutes_string}{seconds_string}" + + +def format_timedelta_human_readable(local_timedelta: timedelta) -> str: + # Returns a human-readable string representing the timedelta + units = [("day", 86400), ("hour", 3600), ("minute", 60), ("second", 1)] + total_seconds = int(local_timedelta.total_seconds()) + + for unit_name, unit_seconds in units: + unit_value, total_seconds = divmod(total_seconds, unit_seconds) + if unit_value > 0: + if unit_value == 1: + return f"{unit_value} {unit_name}" + else: + return f"{unit_value} {unit_name}s" + return "0 seconds" diff --git a/packages/syft/src/syft/types/dicttuple.py b/packages/syft/src/syft/types/dicttuple.py index 4fe202454f2..f82c23528fe 100644 --- a/packages/syft/src/syft/types/dicttuple.py +++ b/packages/syft/src/syft/types/dicttuple.py @@ -1,4 +1,5 @@ # stdlib +from abc import ABCMeta from collections import OrderedDict from collections import deque from collections.abc import Callable @@ -7,15 +8,22 @@ from collections.abc import KeysView from collections.abc import Mapping from types import MappingProxyType +from typing import Any from typing import Generic from typing import SupportsIndex from typing import TypeVar +from typing import get_args +from typing import get_origin from typing import overload # third party +from pydantic import GetCoreSchemaHandler +from pydantic import ValidatorFunctionWrapHandler +from pydantic_core import CoreSchema +from pydantic_core import core_schema from typing_extensions import Self -_T = TypeVar("_T") +_T = TypeVar("_T", bound="DictTuple") _KT = TypeVar("_KT") _VT = TypeVar("_VT") @@ -40,27 +48,27 @@ # to customize the way __new__ and __init__ work together, by iterating over key_value_pairs # once to extract both keys and values, then passing keys to __new__, values to __init__ # within the same function call. -class _Meta(type): +class _Meta(ABCMeta): @overload - def __call__(cls: type[_T]) -> _T: ... + def __call__(cls: type[_T], /) -> _T: ... # type: ignore[misc] @overload - def __call__(cls: type[_T], __value: Iterable[tuple[_KT, _VT]]) -> _T: ... + def __call__(cls: type[_T], __value: Iterable[tuple[_KT, _VT]], /) -> _T: ... # type: ignore[misc] @overload - def __call__(cls: type[_T], __value: Mapping[_KT, _VT]) -> _T: ... + def __call__(cls: type[_T], __value: Mapping[_KT, _VT], /) -> _T: ... # type: ignore[misc] @overload - def __call__( - cls: type[_T], __value: Iterable[_VT], __key: Collection[_KT] + def __call__( # type: ignore[misc] + cls: type[_T], __value: Iterable[_VT], __key: Collection[_KT], / ) -> _T: ... @overload - def __call__( - cls: type[_T], __value: Iterable[_VT], __key: Callable[[_VT], _KT] + def __call__( # type: ignore[misc] + cls: type[_T], __value: Iterable[_VT], __key: Callable[[_VT], _KT], / ) -> _T: ... - def __call__( + def __call__( # type: ignore[misc] cls: type[_T], __value: Iterable | None = None, __key: Callable | Collection | None = None, @@ -69,7 +77,7 @@ def __call__( # DictTuple() if __value is None and __key is None: obj = cls.__new__(cls) - obj.__init__() + obj.__init__() # type: ignore[misc] return obj # DictTuple(DictTuple(...)) @@ -79,27 +87,27 @@ def __call__( # DictTuple({"x": 123, "y": 456}) elif isinstance(__value, Mapping) and __key is None: obj = cls.__new__(cls, __value.values()) - obj.__init__(__value.keys()) + obj.__init__(__value.keys()) # type: ignore[misc] return obj # DictTuple(EnhancedDictTuple(...)) # EnhancedDictTuple(DictTuple(...)) # where EnhancedDictTuple subclasses DictTuple - elif hasattr(__value, "items") and callable(__value.items): - return cls.__call__(__value.items()) + elif callable(__value_items := getattr(__value, "items", None)): + return cls.__call__(__value_items()) # DictTuple([("x", 123), ("y", 456)]) elif isinstance(__value, Iterable) and __key is None: keys = OrderedDict() - values = deque() + values: deque = deque() for i, (k, v) in enumerate(__value): keys[k] = i values.append(v) obj = cls.__new__(cls, values) - obj.__init__(keys) + obj.__init__(keys) # type: ignore[misc] return obj @@ -108,15 +116,15 @@ def __call__( keys = OrderedDict((k, i) for i, k in enumerate(__key)) obj = cls.__new__(cls, __value) - obj.__init__(keys) + obj.__init__(keys) # type: ignore[misc] return obj # DictTuple(["abc", "xyz"], lambda x: x[0]) # equivalent to DictTuple({"a": "abc", "x": "xyz"}) elif isinstance(__value, Iterable) and isinstance(__key, Callable): - obj = cls.__new__(cls, __value) - obj.__init__(__key) + obj = cls.__new__(cls, __value) # type: ignore[misc] + obj.__init__(__key) # type: ignore[misc] return obj @@ -165,21 +173,23 @@ class DictTuple(tuple[_VT, ...], Generic[_KT, _VT], metaclass=_Meta): # These overloads are copied from _Meta.__call__ just for IDE hints @overload - def __init__(self) -> None: ... + def __init__(self, /) -> None: ... @overload - def __init__(self, __value: Iterable[tuple[_KT, _VT]]) -> None: ... + def __init__(self, __value: Iterable[tuple[_KT, _VT]], /) -> None: ... @overload - def __init__(self, __value: Mapping[_KT, _VT]) -> None: ... + def __init__(self, __value: Mapping[_KT, _VT], /) -> None: ... @overload - def __init__(self, __value: Iterable[_VT], __key: Collection[_KT]) -> None: ... + def __init__(self, __value: Iterable[_VT], __key: Collection[_KT], /) -> None: ... @overload - def __init__(self, __value: Iterable[_VT], __key: Callable[[_VT], _KT]) -> None: ... + def __init__( + self, __value: Iterable[_VT], __key: Callable[[_VT], _KT], / + ) -> None: ... - def __init__(self, __value=None, /): + def __init__(self, __value: Any = None, /) -> None: # type: ignore[misc] if isinstance(__value, MappingProxyType): self.__mapping = __value elif isinstance(__value, Mapping): @@ -204,16 +214,16 @@ def __init__(self, __value=None, /): "or implement `__index__()`" ) - @overload - def __getitem__(self, __key: _KT) -> _VT: ... + @overload # type: ignore[override] + def __getitem__(self, __key: _KT, /) -> _VT: ... - @overload - def __getitem__(self, __key: slice) -> Self: ... + @overload # type: ignore[overload-overlap] + def __getitem__(self, __key: slice, /) -> Self: ... @overload - def __getitem__(self, __key: SupportsIndex) -> _VT: ... + def __getitem__(self, __key: SupportsIndex, /) -> _VT: ... - def __getitem__(self, __key, /): + def __getitem__(self, __key: _KT | slice | SupportsIndex, /) -> _VT | Self: if isinstance(__key, slice): return self.__class__( super().__getitem__(__key), @@ -228,8 +238,59 @@ def __getitem__(self, __key, /): def __repr__(self) -> str: return f"{self.__class__.__qualname__}{super().__repr__()}" + def _repr_html_(self) -> str: + super_repr_html = getattr(super(), "_repr_html_", None) + if super_repr_html is None: + return repr(self) + if len(self) == 0: # empty DictTuple + return "0 records" + return super_repr_html() + def keys(self) -> KeysView[_KT]: return self.__mapping.keys() def items(self) -> Iterable[tuple[_KT, _VT]]: return zip(self.__mapping.keys(), self) + + # https://docs.pydantic.dev/latest/concepts/types/#handling-custom-generic-classes + # pydantic validator + # this enables annotating a field with DictTuple[K, V] instead of just DictTuple + # inside a pydantic BaseModel, e.g. + # + # class DatasetPageView(BaseModel): + # datasets: DictTuple[str, Dataset] + @classmethod + def __get_pydantic_core_schema__( + cls, source_type: Any, handler: GetCoreSchemaHandler + ) -> CoreSchema: + origin = get_origin(source_type) + if origin is None: + origin = source_type + kt, vt = (Any, Any) + else: + kt, vt, *_ = get_args(source_type) + + k_schema = handler.generate_schema(MappingProxyType[kt, int]) + v_schema = handler.generate_schema(vt) + + def val_k(v: cls, handler: ValidatorFunctionWrapHandler) -> cls: + handler(v.__mapping) + return v + + def val_v(v: cls, handler: ValidatorFunctionWrapHandler) -> cls: + handler(v) + return v + + # pydantic validator for DictTuple[K, V] + # - check that object has type DictTuple + # - check that object is a tuple[V] + # - check that the keys have type K + return core_schema.chain_schema( + [ + core_schema.is_instance_schema(cls), + core_schema.no_info_wrap_validator_function( + val_v, core_schema.tuple_variable_schema(items_schema=v_schema) + ), + core_schema.no_info_wrap_validator_function(val_k, k_schema), + ] + ) diff --git a/packages/syft/src/syft/types/errors.py b/packages/syft/src/syft/types/errors.py new file mode 100644 index 00000000000..d1255921a01 --- /dev/null +++ b/packages/syft/src/syft/types/errors.py @@ -0,0 +1,357 @@ +# stdlib +from collections.abc import Callable +from importlib import import_module +import inspect +import os +import traceback +from types import CodeType +from types import TracebackType +from typing import Any +from typing import TYPE_CHECKING +from typing import TypeVar +import uuid + +# third party +from IPython import get_ipython +from IPython.display import HTML +from IPython.display import display +import psutil +from typing_extensions import Self + +# relative +from ..service.user.user_roles import ServiceRole +from ..util.notebook_ui.components.tabulator_template import jinja_env + +if TYPE_CHECKING: + # relative + from ..service.context import AuthedServiceContext + + +class SyftException(Exception): + """ + A Syft custom exception class with distinct public and private messages. + + Attributes: + private_message (str): Detailed error message intended for administrators. + public_message (str): General error message for end-users. + """ + + public_message = "An error occurred. Contact the admin for more information." + + def __init__( + self, + private_message: str | None = None, + public_message: str | None = None, + server_trace: str | None = None, + *args: Any, + **kwargs: Any, + ) -> None: + if public_message is not None and not isinstance(public_message, str): + raise TypeError("public message should be a string") + if private_message is not None and not isinstance(private_message, str): + raise TypeError("private message should be a string") + + if public_message: + self.public_message = public_message + + self._private_message = private_message or "" + self._server_trace = server_trace or "" + super().__init__(self.public, *args, **kwargs) + + @property + def public(self) -> str: + """ + Returns the public error message. + + Returns: + str: The public error message. + """ + return self.public_message + + def get_message(self, context: "AuthedServiceContext") -> str: + """ + Retrieves the appropriate message based on the user's role, obtained via + `context.role`. + + Args: + context (AuthedServiceContext): The server context. + + Returns: + str: The private or public message based on the role. + """ + if context.role.value >= ServiceRole.DATA_OWNER.value or context.dev_mode: + return self._private_message or self.public + return self.public + + def get_tb( + self, + context: "AuthedServiceContext | None" = None, + overwrite_permission: bool = False, + ) -> str | None: + """ + Returns the error traceback as a string, if the user is able to see it. + + Args: + context (AuthedServiceContext): The authenticated service context which + contains the user's role. + + Returns: + str | None: A string representation of the current stack trace if the + user is a DataOwner or higher, otherwise None. + """ + # stdlib + import traceback + + if ( + overwrite_permission + or (context and context.role.value >= ServiceRole.DATA_OWNER.value) + or (context and context.dev_mode) + ): + return "".join(traceback.format_exception(self)) + return None + + @classmethod + def from_exception( + cls, + exc: BaseException, + public_message: str | None = None, + private_message: str | None = None, + ) -> Self: + """ + Builds an instance of SyftException from an existing exception. It allows + setting a public message for end users or resetting the private message. + If no private_message is provided, the original exception's message is used. + If no public_message is provided, the default public message is used. + + Args: + exception (BaseException): The original exception from which to create + the new instance. The message from this exception will be used as + the base message for the new instance. + + public_message (str, optional): An optional message intended for public + display. This message can provide user-friendly information about + the error. If not provided, the default message is used. + + private_message (str, optional): An optional message intended for private + display. This message should provide more information about the error + to administrators. If not provided, the exception's message is used. + + Returns: + Self: A new instance of the class. The new instance retains the traceback + of the original exception. + """ + if isinstance(exc, SyftException): + private_message = private_message or exc._private_message + public_message = public_message or exc.public_message + elif isinstance(exc, BaseException): + private_message = private_message or str(exc) + + new_exc = cls(private_message, public_message=public_message) + new_exc.__traceback__ = exc.__traceback__ + new_exc.__cause__ = exc + new_exc = process_traceback(new_exc) + return new_exc + + @property + def _repr_html_class_(self) -> str: + return "alert-danger" + + def __str__(self) -> str: + # this assumes that we show the server side error on the client side without a jupyter notebook + server_trace = self._server_trace + message = self._private_message or self.public + + if server_trace: + message = f"{message}\nserver_trace: {server_trace}" + + return message + + def _repr_html_(self) -> str: + is_dev_mode = os.getenv("DEV_MODE", "false").lower() == "true" + display = "block" if self._server_trace or is_dev_mode else "none" + + exc = process_traceback(self) + _traceback_str_list = traceback.format_exception(exc) + traceback_str = "".join(_traceback_str_list) + + table_template = jinja_env.get_template("syft_exception.jinja2") + table_html = table_template.render( + name=type(self).__name__, + html_id=uuid.uuid4().hex, + server_trace=self._server_trace, + message=self._private_message or self.public, + traceback_str=traceback_str, + display=display, + dev_mode=is_dev_mode, + ) + return table_html + + +class raises: + def __init__(self, expected_exception, show=False): # type: ignore + self.expected_exception = expected_exception + self.show = show + + def __enter__(self): # type: ignore + # Before block of code + pass + + def __exit__(self, exc_type, exc_value, traceback): # type: ignore + message = None + expected_exception_type = self.expected_exception + if not isinstance(expected_exception_type, type): + expected_exception_type = type(self.expected_exception) + if hasattr(self.expected_exception, "public_message"): + message = self.expected_exception.public_message.replace("*", "") + + # After block of code + if exc_type is None: + raise AssertionError( + f"Expected {self.expected_exception} to be raised, " + "but no exception was raised." + ) + if not issubclass(exc_type, expected_exception_type): + raise AssertionError( + f"Expected {expected_exception_type} to be raised, but got {exc_type}." + ) + if message and message not in exc_value.public_message: + raise AssertionError( + f"Expected {expected_exception_type} to be raised, " + f"did not contain {message}." + ) + if self.show: + # keep this print! + print("with sy.raises successfully caught the following exception:") + if hasattr(exc_value, "_repr_html_"): + display(HTML(exc_value._repr_html_())) + else: + print( + f"The following exception was catched\n{exc_value}", + ) + return True # Suppress the exception + + +class ExceptionFilter(tuple): + """ + Filter and store all exception classes from a given module path. This class can be + used in try/except blocks to handle these exceptions as a group (see example). + + Attributes: + module (str): The name of the module from which exceptions are filtered. + + Example: + ``` + from syft.types.errors import ExceptionFilter + + try: + ... + except ExceptionFilter("google.cloud.bigquery") as e: + ... + ``` + """ + + def __init__(self, module: str) -> None: + self.module = module + + def __new__(cls, module: str) -> Self: + """ + Creates a new instance of ExceptionFilter, which gathers all exception classes + from the specified module and stores them as a tuple. + """ + exceptions: tuple[type[BaseException], ...] + + try: + imported_module = import_module(module) + except ModuleNotFoundError: + # TODO: log warning + exceptions = () + else: + exceptions = tuple( + obj + for _, obj in inspect.getmembers(imported_module, inspect.isclass) + if issubclass(obj, BaseException) and not issubclass(obj, Warning) + ) + + instance = super().__new__(cls, exceptions) + + instance.module = module + + return instance + + +_excluded_code_objects: set[CodeType] = set() + +E = TypeVar("E", bound=BaseException) +F = TypeVar("F", bound=Callable[..., object]) + + +def exclude_from_traceback(f: F) -> F: + """ + Decorator to mark a function to be removed from the traceback when an + exception is raised. This is useful for functions that are not relevant to + the error message and would only clutter the traceback. + """ + _excluded_code_objects.add(f.__code__) + return f + + +def process_traceback(exc: E) -> E: + """ + Adjusts the traceback of an exception to remove specific frames related to + the as_result decorator and the unwrap() call, for cleaner and more relevant + error messages. + + Args: + exc (E): The exception whose traceback is to be adjusted. + + Returns: + E: The same exception with an adjusted traceback. + """ + # We want to adjust the traceback so we can remove frames which contain + # a function marked with the _excluded_code_objects decorator, improving + # the stacktrace of the error messages. + tb = exc.__traceback__ + frames: list[TracebackType] = [] + + while tb is not None: + if tb.tb_frame.f_code not in _excluded_code_objects: + frames.append(tb) + tb = tb.tb_next + + # Before being done, we need to adjust the traceback.tb_next so that + # the frames are linked together properly. + for i, tb in enumerate(frames): + if i + 1 == len(frames): + tb.tb_next = None + else: + tb.tb_next = frames[i + 1] + + return exc + + +class CredentialsError(SyftException): + public_message = "Invalid credentials." + + +def syft_exception_handler( + shell: Any, etype: Any, evalue: Any, tb: Any, tb_offset: Any = None +) -> None: + display(HTML(evalue._repr_html_())) + + +runs_in_pytest = False +for pid in psutil.pids(): + try: + if "PYTEST_CURRENT_TEST" in psutil.Process(pid).environ(): + runs_in_pytest = True + except Exception: + pass # nosec + + +# be very careful when changing this. pytest (with nbmake) will +# not pick up exceptions if they have a custom exception handler (fail silently) +if not runs_in_pytest: + try: + get_ipython().set_custom_exc((SyftException,), syft_exception_handler) # noqa: F821 + except Exception: + pass # nosec diff --git a/packages/syft/src/syft/types/grid_url.py b/packages/syft/src/syft/types/grid_url.py deleted file mode 100644 index 91cf53e46d7..00000000000 --- a/packages/syft/src/syft/types/grid_url.py +++ /dev/null @@ -1,171 +0,0 @@ -# future -from __future__ import annotations - -# stdlib -import copy -import os -import re -from urllib.parse import urlparse - -# third party -import requests -from typing_extensions import Self - -# relative -from ..serde.serializable import serializable -from ..util.util import verify_tls - - -@serializable(attrs=["protocol", "host_or_ip", "port", "path", "query"]) -class GridURL: - @classmethod - def from_url(cls, url: str | GridURL) -> GridURL: - if isinstance(url, GridURL): - return url - try: - # urlparse doesnt handle no protocol properly - if "://" not in url: - url = "http://" + url - parts = urlparse(url) - host_or_ip_parts = parts.netloc.split(":") - # netloc is host:port - port = 80 - if len(host_or_ip_parts) > 1: - port = int(host_or_ip_parts[1]) - host_or_ip = host_or_ip_parts[0] - if parts.scheme == "https": - port = 443 - return GridURL( - host_or_ip=host_or_ip, - path=parts.path, - port=port, - protocol=parts.scheme, - query=getattr(parts, "query", ""), - ) - except Exception as e: - print(f"Failed to convert url: {url} to GridURL. {e}") - raise e - - def __init__( - self, - protocol: str = "http", - host_or_ip: str = "localhost", - port: int | None = 80, - path: str = "", - query: str = "", - ) -> None: - # in case a preferred port is listed but its not clear if an alternative - # port was included in the supplied host_or_ip:port combo passed in earlier - match_port = re.search(":[0-9]{1,5}", host_or_ip) - if match_port: - sub_grid_url: GridURL = GridURL.from_url(host_or_ip) - host_or_ip = str(sub_grid_url.host_or_ip) # type: ignore - port = int(sub_grid_url.port) # type: ignore - protocol = str(sub_grid_url.protocol) # type: ignore - path = str(sub_grid_url.path) # type: ignore - - prtcl_pattrn = "://" - if prtcl_pattrn in host_or_ip: - protocol = host_or_ip[: host_or_ip.find(prtcl_pattrn)] - start_index = host_or_ip.find(prtcl_pattrn) + len(prtcl_pattrn) - host_or_ip = host_or_ip[start_index:] - - self.host_or_ip = host_or_ip - self.path: str = path - self.port = port - self.protocol = protocol - self.query = query - - def with_path(self, path: str) -> Self: - dupe = copy.copy(self) - dupe.path = path - return dupe - - def as_container_host(self, container_host: str | None = None) -> Self: - if self.host_or_ip not in [ - "localhost", - "host.docker.internal", - "host.k3d.internal", - ]: - return self - - if container_host is None: - # TODO: we could move config.py to syft and then the Settings singleton - # could be importable in all parts of the code - container_host = os.getenv("CONTAINER_HOST", None) - - if container_host: - if container_host == "docker": - hostname = "host.docker.internal" - elif container_host == "podman": - hostname = "host.containers.internal" - else: - hostname = "host.k3d.internal" - else: - # convert it back for non container clients - hostname = "localhost" - - return self.__class__( - protocol=self.protocol, - host_or_ip=hostname, - port=self.port, - path=self.path, - ) - - @property - def query_string(self) -> str: - query_string = "" - if len(self.query) > 0: - query_string = f"?{self.query}" - return query_string - - @property - def url(self) -> str: - return f"{self.base_url}{self.path}{self.query_string}" - - @property - def url_no_port(self) -> str: - return f"{self.base_url_no_port}{self.path}{self.query_string}" - - @property - def base_url(self) -> str: - return f"{self.protocol}://{self.host_or_ip}:{self.port}" - - @property - def base_url_no_port(self) -> str: - return f"{self.protocol}://{self.host_or_ip}" - - @property - def url_path(self) -> str: - return f"{self.path}{self.query_string}" - - def to_tls(self) -> GridURL: - if self.protocol == "https": - return self - - # TODO: only ignore ssl in dev mode - r = requests.get( # nosec - self.base_url, verify=verify_tls() - ) # ignore ssl cert if its fake - new_base_url = r.url - if new_base_url.endswith("/"): - new_base_url = new_base_url[0:-1] - return self.__class__.from_url( - url=f"{new_base_url}{self.path}{self.query_string}" - ) - - def __repr__(self) -> str: - return f"<{type(self).__name__} {self.url}>" - - def __str__(self) -> str: - return self.url - - def __hash__(self) -> int: - return hash(self.__str__()) - - def __copy__(self) -> GridURL: - return self.__class__.from_url(self.url) - - def set_port(self, port: int) -> Self: - self.port = port - return self diff --git a/packages/syft/src/syft/types/identity.py b/packages/syft/src/syft/types/identity.py index 52c61ef8c0d..f5c4f1f26ea 100644 --- a/packages/syft/src/syft/types/identity.py +++ b/packages/syft/src/syft/types/identity.py @@ -8,8 +8,8 @@ from typing_extensions import Self # relative -from ..node.credentials import SyftVerifyKey from ..serde.serializable import serializable +from ..server.credentials import SyftVerifyKey from .base import SyftBaseModel from .uid import UID @@ -19,23 +19,23 @@ class Identity(SyftBaseModel): - node_id: UID + server_id: UID verify_key: SyftVerifyKey __repr_attrs__ = ["id", "verify_key"] def __repr__(self) -> str: - return f"{self.__class__.__name__} " + return f"{self.__class__.__name__} " @classmethod def from_client(cls, client: SyftClient) -> Self: if not client.credentials: raise ValueError(f"{client} has no signing key!") - return cls(node_id=client.id, verify_key=client.credentials.verify_key) + return cls(server_id=client.id, verify_key=client.credentials.verify_key) -@serializable() +@serializable(canonical_name="UserIdentity", version=1) class UserIdentity(Identity): - """This class is used to identify the data scientist users of the node""" + """This class is used to identify the data scientist users of the server""" pass diff --git a/packages/syft/src/syft/types/result.py b/packages/syft/src/syft/types/result.py new file mode 100644 index 00000000000..bba6e80777b --- /dev/null +++ b/packages/syft/src/syft/types/result.py @@ -0,0 +1,129 @@ +# stdlib +from collections.abc import Callable +import functools +from typing import Any +from typing import Final +from typing import Generic +from typing import Literal +from typing import NoReturn +from typing import ParamSpec +from typing import TypeAlias +from typing import TypeVar + +# relative +from .errors import SyftException +from .errors import exclude_from_traceback +from .errors import process_traceback + +T = TypeVar("T", covariant=True) +E = TypeVar("E", covariant=True, bound=BaseException) +BE = TypeVar("BE", bound=BaseException) +P = ParamSpec("P") + + +class Ok(Generic[T]): + __slots__ = ("value",) + __match_args__ = ("ok_value",) + + def __init__(self, value: T): + self.value = value + + def __repr__(self) -> str: + return f"Ok({self.value})" + + @property + def ok_value(self) -> T: + return self.value + + def err(self) -> None: + return None + + def ok(self) -> T: + return self.value + + def is_err(self) -> Literal[False]: + return False + + def is_ok(self) -> Literal[True]: + return True + + def unwrap(self, *args: Any, **kwargs: Any) -> T: + return self.value + + +class Err(Generic[E]): + __slots__ = ("value",) + __match_args__ = ("error_value",) + + def __init__(self, value: E): + self.value = value + + def __repr__(self) -> str: + return f"Err({self.value})" + + @property + def error_value(self) -> E: + return self.value + + def err(self) -> E: + return self.value + + def ok(self) -> None: + return None + + def is_err(self) -> Literal[True]: + return True + + def is_ok(self) -> Literal[False]: + return False + + @exclude_from_traceback + def unwrap( + self, public_message: str | None = None, private_message: str | None = None + ) -> NoReturn: + if isinstance(self.value, SyftException): + if public_message is not None: + self.value.public_message = public_message + if private_message is not None: + self.value._private_message = private_message + if isinstance(self.value, BaseException): + raise self.value + raise TypeError("Error is not a BaseException") + + +OkErr: Final = (Ok, Err) +Result: TypeAlias = Ok[T] | Err[E] + + +def as_result( + *exceptions: type[BE], convert_to_syft_exception: bool = False +) -> Callable[[Callable[P, T]], Callable[P, Result[T, BE]]]: + if not exceptions or not all( + issubclass(exception, BaseException) for exception in exceptions + ): + raise TypeError("The as_result() decorator only accepts exceptions") + + class _AsResultError(Exception): ... + + def decorator(func: Callable[P, T]) -> Callable[P, Result[T, BE]]: + @exclude_from_traceback + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Result[T, BE]: + try: + output = func(*args, **kwargs) + if isinstance(output, Ok) or isinstance(output, Err): + raise _AsResultError( + f"Functions decorated with `as_result` should not return Result.\n" + f"Did you forget to unwrap() the result in {func.__name__}?\n" + f"result: {output}" + ) + return Ok(output) + except exceptions as exc: + if convert_to_syft_exception and not isinstance(exc, SyftException): + exc = SyftException.from_exception(exc) # type: ignore + exc = process_traceback(exc) + return Err(exc) + + return wrapper + + return decorator diff --git a/packages/syft/src/syft/types/server_url.py b/packages/syft/src/syft/types/server_url.py new file mode 100644 index 00000000000..c0bbab960a5 --- /dev/null +++ b/packages/syft/src/syft/types/server_url.py @@ -0,0 +1,182 @@ +# future +from __future__ import annotations + +# stdlib +import copy +import logging +import os +import re +from urllib.parse import urlparse + +# third party +import requests +from typing_extensions import Self + +# relative +from ..serde.serializable import serializable +from ..util.util import verify_tls + +logger = logging.getLogger(__name__) + + +@serializable( + attrs=["protocol", "host_or_ip", "port", "path", "query"], + canonical_name="ServerURL", + version=1, +) +class ServerURL: + @classmethod + def from_url(cls, url: str | ServerURL) -> ServerURL: + if isinstance(url, ServerURL): + return url + try: + # urlparse doesnt handle no protocol properly + if "://" not in url: + url = "http://" + url + parts = urlparse(url) + host_or_ip_parts = parts.netloc.split(":") + # netloc is host:port + port = 80 + if len(host_or_ip_parts) > 1: + port = int(host_or_ip_parts[1]) + host_or_ip = host_or_ip_parts[0] + if parts.scheme == "https": + port = 443 + return ServerURL( + host_or_ip=host_or_ip, + path=parts.path, + port=port, + protocol=parts.scheme, + query=getattr(parts, "query", ""), + ) + except Exception as e: + logger.error(f"Failed to convert url: {url} to ServerURL. {e}") + raise e + + def __init__( + self, + protocol: str = "http", + host_or_ip: str = "localhost", + port: int | None = 80, + path: str = "", + query: str = "", + ) -> None: + # in case a preferred port is listed but its not clear if an alternative + # port was included in the supplied host_or_ip:port combo passed in earlier + match_port = re.search(":[0-9]{1,5}", host_or_ip) + if match_port: + sub_server_url: ServerURL = ServerURL.from_url(host_or_ip) + host_or_ip = str(sub_server_url.host_or_ip) # type: ignore + port = int(sub_server_url.port) # type: ignore + protocol = str(sub_server_url.protocol) # type: ignore + path = str(sub_server_url.path) # type: ignore + + prtcl_pattrn = "://" + if prtcl_pattrn in host_or_ip: + protocol = host_or_ip[: host_or_ip.find(prtcl_pattrn)] + start_index = host_or_ip.find(prtcl_pattrn) + len(prtcl_pattrn) + host_or_ip = host_or_ip[start_index:] + + self.host_or_ip = host_or_ip + self.path: str = path + self.port = port + self.protocol = protocol + self.query = query + + def with_path(self, path: str) -> Self: + dupe = copy.copy(self) + dupe.path = path + return dupe + + def as_container_host(self, container_host: str | None = None) -> Self: + if self.host_or_ip not in [ + "localhost", + "host.docker.internal", + "host.k3d.internal", + ]: + return self + + if container_host is None: + # TODO: we could move config.py to syft and then the Settings singleton + # could be importable in all parts of the code + container_host = os.getenv("CONTAINER_HOST", None) + + if container_host: + if container_host == "docker": + hostname = "host.docker.internal" + elif container_host == "podman": + hostname = "host.containers.internal" + else: + hostname = "host.k3d.internal" + else: + # convert it back for non container clients + hostname = "localhost" + + return self.__class__( + protocol=self.protocol, + host_or_ip=hostname, + port=self.port, + path=self.path, + ) + + @property + def query_string(self) -> str: + query_string = "" + if len(self.query) > 0: + query_string = f"?{self.query}" + return query_string + + @property + def url(self) -> str: + return f"{self.base_url}{self.path}{self.query_string}" + + @property + def url_no_port(self) -> str: + return f"{self.base_url_no_port}{self.path}{self.query_string}" + + @property + def base_url(self) -> str: + return f"{self.protocol}://{self.host_or_ip}:{self.port}" + + @property + def base_url_no_port(self) -> str: + return f"{self.protocol}://{self.host_or_ip}" + + @property + def url_no_protocol(self) -> str: + return f"{self.host_or_ip}:{self.port}{self.path}" + + @property + def url_path(self) -> str: + return f"{self.path}{self.query_string}" + + def to_tls(self) -> ServerURL: + if self.protocol == "https": + return self + + # TODO: only ignore ssl in dev mode + r = requests.get( # nosec + self.base_url, verify=verify_tls() + ) # ignore ssl cert if its fake + new_base_url = r.url + if new_base_url.endswith("/"): + new_base_url = new_base_url[0:-1] + return self.__class__.from_url( + url=f"{new_base_url}{self.path}{self.query_string}" + ) + + def __repr__(self) -> str: + return f"<{type(self).__name__} {self.url}>" + + def __str__(self) -> str: + return self.url + + def __hash__(self) -> int: + return hash(self.__str__()) + + def __copy__(self) -> ServerURL: + return self.__class__.from_url(self.url) + + def set_port(self, port: int) -> Self: + self.port = port + return self diff --git a/packages/syft/src/syft/types/syft_equals.py b/packages/syft/src/syft/types/syft_equals.py new file mode 100644 index 00000000000..6abb589c14a --- /dev/null +++ b/packages/syft/src/syft/types/syft_equals.py @@ -0,0 +1,37 @@ +# stdlib +import math +from typing import Any + + +def _safe_isnan(v: Any) -> bool: + try: + return math.isnan(v) + except TypeError: + return False + + +def _syft_equals(v1: Any, v2: Any) -> bool: + if v1 is None: + return v2 is None + + # handle nan, since nan==nan is False + if _safe_isnan(v1): + return _safe_isnan(v2) + + if isinstance(v1, dict) and isinstance(v2, dict): + return _syft_recursive_dict_equals(v1, v2) + + return v1 == v2 + + +def _syft_recursive_dict_equals(d1: dict, d2: dict) -> bool: + if len(d1) != len(d2): + return False + + for k in d1.keys(): + if k not in d2: + return False + if not _syft_equals(d1[k], d2[k]): + return False + + return True diff --git a/packages/syft/src/syft/types/syft_metaclass.py b/packages/syft/src/syft/types/syft_metaclass.py index dadd8664aa6..a8d18386e2b 100644 --- a/packages/syft/src/syft/types/syft_metaclass.py +++ b/packages/syft/src/syft/types/syft_metaclass.py @@ -1,6 +1,5 @@ # stdlib from typing import Any -from typing import TypeVar from typing import final # third party @@ -10,8 +9,6 @@ # relative from ..serde.serializable import serializable -_T = TypeVar("_T", bound=BaseModel) - class EmptyType(type): def __repr__(self) -> str: @@ -21,19 +18,29 @@ def __bool__(self) -> bool: return False -@serializable() +@serializable(canonical_name="Empty", version=1) @final class Empty(metaclass=EmptyType): pass class PartialModelMetaclass(ModelMetaclass): - def __call__(cls: type[_T], *args: Any, **kwargs: Any) -> _T: - for field_info in cls.model_fields.values(): - if field_info.annotation is not None and field_info.is_required(): - field_info.annotation = field_info.annotation | EmptyType - field_info.default = Empty - - cls.model_rebuild(force=True) - - return super().__call__(*args, **kwargs) # type: ignore[misc] + def __new__( + mcs, + cls_name: str, + bases: tuple[type[Any], ...], + namespace: dict[str, Any], + *args: Any, + **kwargs: Any, + ) -> type: + cls = super().__new__(mcs, cls_name, bases, namespace, *args, **kwargs) + + if issubclass(cls, BaseModel): + for field_info in cls.model_fields.values(): + if field_info.annotation is not None and field_info.is_required(): + field_info.annotation = field_info.annotation | EmptyType + field_info.default = Empty + + cls.model_rebuild(force=True) + + return cls diff --git a/packages/syft/src/syft/types/syft_migration.py b/packages/syft/src/syft/types/syft_migration.py index f3205282194..bc9d8f5c4fb 100644 --- a/packages/syft/src/syft/types/syft_migration.py +++ b/packages/syft/src/syft/types/syft_migration.py @@ -44,7 +44,7 @@ def decorator(function: Callable) -> Callable: klass_from=klass_from, klass_to=klass_to, transforms=transforms ) - SyftMigrationRegistry.register_transform( + SyftMigrationRegistry.register_migration_function( klass_type_str=klass_from_str, version_from=version_from, version_to=version_to, diff --git a/packages/syft/src/syft/types/syft_object.py b/packages/syft/src/syft/types/syft_object.py index cbce6600589..7b30ffaa562 100644 --- a/packages/syft/src/syft/types/syft_object.py +++ b/packages/syft/src/syft/types/syft_object.py @@ -5,15 +5,17 @@ from collections.abc import Iterable from collections.abc import KeysView from collections.abc import Mapping -from collections.abc import MutableMapping -from collections.abc import MutableSequence from collections.abc import Sequence from collections.abc import Set +from datetime import datetime +from datetime import timedelta +from datetime import timezone +from functools import cache +from functools import total_ordering from hashlib import sha256 import inspect from inspect import Signature -import re -import traceback +import logging import types from types import NoneType from types import UnionType @@ -22,67 +24,76 @@ from typing import ClassVar from typing import Optional from typing import TYPE_CHECKING +from typing import TypeVar from typing import Union from typing import get_args from typing import get_origin -import warnings # third party -import pandas as pd import pydantic from pydantic import ConfigDict from pydantic import EmailStr from pydantic import Field from pydantic import model_validator from pydantic.fields import PydanticUndefined -from result import OkErr from typeguard import check_type from typing_extensions import Self # relative -from ..node.credentials import SyftVerifyKey -from ..serde.recursive_primitives import recursive_serde_register_type +from ..serde.serializable import serializable from ..serde.serialize import _serialize as serialize +from ..server.credentials import SyftVerifyKey from ..util.autoreload import autoreload_enabled from ..util.markdown import as_markdown_python_code -from ..util.notebook_ui.notebook_addons import create_table_template +from ..util.notebook_ui.components.tabulator_template import build_tabulator_table from ..util.util import aggressive_set_attr from ..util.util import full_name_with_qualname from ..util.util import get_qualname_for -from .dicttuple import DictTuple +from .result import Err +from .result import Ok +from .syft_equals import _syft_equals from .syft_metaclass import Empty from .syft_metaclass import PartialModelMetaclass +from .syft_object_registry import SyftObjectRegistry from .uid import UID +logger = logging.getLogger(__name__) + if TYPE_CHECKING: # relative + from ..client.api import SyftAPI from ..service.sync.diff_state import AttrDiff IntStr = int | str AbstractSetIntStr = Set[IntStr] MappingIntStrAny = Mapping[IntStr, Any] +T = TypeVar("T") SYFT_OBJECT_VERSION_1 = 1 SYFT_OBJECT_VERSION_2 = 2 SYFT_OBJECT_VERSION_3 = 3 SYFT_OBJECT_VERSION_4 = 4 +SYFT_OBJECT_VERSION_5 = 5 +SYFT_OBJECT_VERSION_6 = 6 supported_object_versions = [ SYFT_OBJECT_VERSION_1, SYFT_OBJECT_VERSION_2, SYFT_OBJECT_VERSION_3, SYFT_OBJECT_VERSION_4, + SYFT_OBJECT_VERSION_5, + SYFT_OBJECT_VERSION_6, ] HIGHEST_SYFT_OBJECT_VERSION = max(supported_object_versions) LOWEST_SYFT_OBJECT_VERSION = min(supported_object_versions) -# These attributes are dynamically added based on node/client +# These attributes are dynamically added based on server/client # that is interaction with the SyftObject DYNAMIC_SYFT_ATTRIBUTES = [ - "syft_node_location", + "syft_server_location", "syft_client_verify_key", ] @@ -128,108 +139,59 @@ class SyftBaseObject(pydantic.BaseModel, SyftHashableObject): __canonical_name__: str __version__: int # data is always versioned - syft_node_location: UID | None = Field(default=None, exclude=True) + syft_server_location: UID | None = Field(default=None, exclude=True) syft_client_verify_key: SyftVerifyKey | None = Field(default=None, exclude=True) - def _set_obj_location_(self, node_uid: UID, credentials: SyftVerifyKey) -> None: - self.syft_node_location = node_uid + def _set_obj_location_(self, server_uid: UID, credentials: SyftVerifyKey) -> None: + self.syft_server_location = server_uid self.syft_client_verify_key = credentials + def get_api( + self, + server_uid: UID | None = None, + user_verify_key: SyftVerifyKey | None = None, + ) -> "SyftAPI": + if server_uid is None: + server_uid = self.syft_server_location -class Context(SyftBaseObject): - __canonical_name__ = "Context" - __version__ = SYFT_OBJECT_VERSION_2 + if user_verify_key is None: + user_verify_key = self.syft_client_verify_key - pass + # relative + from ..client.api import APIRegistry + return APIRegistry.api_for( + server_uid=server_uid, + user_verify_key=user_verify_key, + ).unwrap( + public_message=f"Can't access Syft API using this object. You must login to {self.syft_server_location}" + ) -class SyftObjectRegistry: - __object_version_registry__: dict[ - str, type["SyftObject"] | type["SyftObjectRegistry"] - ] = {} - __object_transform_registry__: dict[str, Callable] = {} + def get_api_wrapped(self): # type: ignore + # relative + from ..client.api import APIRegistry - def __init_subclass__(cls, **kwargs: Any) -> None: - super().__init_subclass__(**kwargs) - if hasattr(cls, "__canonical_name__") and hasattr(cls, "__version__"): - mapping_string = f"{cls.__canonical_name__}_{cls.__version__}" + return APIRegistry.api_for( + server_uid=self.syft_server_location, + user_verify_key=self.syft_client_verify_key, + ) - if ( - mapping_string in cls.__object_version_registry__ - and not autoreload_enabled() - ): - current_cls = cls.__object_version_registry__[mapping_string] - if cls == current_cls: - # same class so noop - return None - - # user code is reinitialized which means it might have a new address - # in memory so for that we can just skip - if "syft.user" in cls.__module__: - # this happens every time we reload the user code - return None - else: - # this shouldn't happen and is usually a mistake of reusing the - # same __canonical_name__ and __version__ in two classes - raise Exception(f"Duplicate mapping for {mapping_string} and {cls}") - else: - # only if the cls has not been registered do we want to register it - cls.__object_version_registry__[mapping_string] = cls - @classmethod - def versioned_class( - cls, name: str, version: int - ) -> type["SyftObject"] | type["SyftObjectRegistry"] | None: - mapping_string = f"{name}_{version}" - if mapping_string not in cls.__object_version_registry__: - return None - return cls.__object_version_registry__[mapping_string] +class Context(SyftBaseObject): + __canonical_name__ = "Context" + __version__ = SYFT_OBJECT_VERSION_1 - @classmethod - def add_transform( - cls, - klass_from: str, - version_from: int, - klass_to: str, - version_to: int, - method: Callable, - ) -> None: - mapping_string = f"{klass_from}_{version_from}_x_{klass_to}_{version_to}" - cls.__object_transform_registry__[mapping_string] = method + pass - @classmethod - def get_transform( - cls, type_from: type["SyftObject"], type_to: type["SyftObject"] - ) -> Callable: - for type_from_mro in type_from.mro(): - if issubclass(type_from_mro, SyftObject): - klass_from = type_from_mro.__canonical_name__ - version_from = type_from_mro.__version__ - else: - klass_from = type_from_mro.__name__ - version_from = None - for type_to_mro in type_to.mro(): - if issubclass(type_to_mro, SyftBaseObject): - klass_to = type_to_mro.__canonical_name__ - version_to = type_to_mro.__version__ - else: - klass_to = type_to_mro.__name__ - version_to = None - - mapping_string = ( - f"{klass_from}_{version_from}_x_{klass_to}_{version_to}" - ) - if mapping_string in cls.__object_transform_registry__: - return cls.__object_transform_registry__[mapping_string] - raise Exception( - f"No mapping found for: {type_from} to {type_to} in" - f"the registry: {cls.__object_transform_registry__.keys()}" - ) + +@cache +def cached_get_type_hints(cls: type) -> dict[str, Any]: + return typing.get_type_hints(cls) class SyftMigrationRegistry: __migration_version_registry__: dict[str, dict[int, str]] = {} - __migration_transform_registry__: dict[str, dict[str, Callable]] = {} + __migration_function_registry__: dict[str, dict[str, Callable]] = {} def __init_subclass__(cls, **kwargs: Any) -> None: """ @@ -266,16 +228,16 @@ def register_version(cls, klass: type) -> None: klass_version: fqn } - @classmethod - def get_versions(cls, canonical_name: str) -> list[int]: - available_versions: dict = cls.__migration_version_registry__.get( - canonical_name, - {}, - ) - return list(available_versions.keys()) + # @classmethod + # def get_versions(cls, canonical_name: str) -> list[int]: + # available_versions: dict = cls.__migration_version_registry__.get( + # canonical_name, + # {}, + # ) + # return list(available_versions.keys()) @classmethod - def register_transform( + def register_migration_function( cls, klass_type_str: str, version_from: int, version_to: int, method: Callable ) -> None: """ @@ -284,7 +246,7 @@ def register_transform( "canonical_name": {"version_from x version_to": } } For example - {'NodeMetadata': {'1x2': , + {'ServerMetadata': {'1x2': , '2x1': }} """ if klass_type_str not in cls.__migration_version_registry__: @@ -298,11 +260,9 @@ def register_transform( if versions_exists: mapping_string = f"{version_from}x{version_to}" - if klass_type_str not in cls.__migration_transform_registry__: - cls.__migration_transform_registry__[klass_type_str] = {} - cls.__migration_transform_registry__[klass_type_str][mapping_string] = ( - method - ) + if klass_type_str not in cls.__migration_function_registry__: + cls.__migration_function_registry__[klass_type_str] = {} + cls.__migration_function_registry__[klass_type_str][mapping_string] = method else: raise Exception( f"Available versions for {klass_type_str} are: {available_versions}." @@ -333,9 +293,9 @@ def get_migration( mapping_string = f"{version_from}x{version_to}" if ( mapping_string - in cls.__migration_transform_registry__[klass_from] + in cls.__migration_function_registry__[klass_from] ): - return cls.__migration_transform_registry__[klass_from][ + return cls.__migration_function_registry__[klass_from][ mapping_string ] raise ValueError( @@ -360,11 +320,9 @@ def get_migration_for_version( mapping_string = f"{version_from}x{version_to}" if ( mapping_string - in cls.__migration_transform_registry__[ - type_from.__canonical_name__ - ] + in cls.__migration_function_registry__[type_from.__canonical_name__] ): - return cls.__migration_transform_registry__[klass_from][ + return cls.__migration_function_registry__[klass_from][ mapping_string ] @@ -378,14 +336,58 @@ def get_migration_for_version( base_attrs_sync_ignore = [ - "syft_node_location", + "syft_server_location", "syft_client_verify_key", ] -class SyftObject(SyftBaseObject, SyftObjectRegistry, SyftMigrationRegistry): +@serializable() +class SyftObjectVersioned(SyftBaseObject, SyftMigrationRegistry): + __canonical_name__ = "SyftObjectVersioned" + __version__ = SYFT_OBJECT_VERSION_1 + + +@serializable() +@total_ordering +class BaseDateTime(SyftObjectVersioned): + __canonical_name__ = "BaseDateTime" + __version__ = SYFT_OBJECT_VERSION_1 + # id: UID | None = None # type: ignore + utc_timestamp: float + + @classmethod + def now(cls) -> Self: + return cls(utc_timestamp=datetime.now(timezone.utc).timestamp()) + + def __str__(self) -> str: + utc_datetime = datetime.fromtimestamp(self.utc_timestamp, tz=timezone.utc) + return utc_datetime.strftime("%Y-%m-%d %H:%M:%S") + + def __hash__(self) -> int: + return hash(self.utc_timestamp) + + def __sub__(self, other: Self) -> timedelta: + res = timedelta(seconds=self.utc_timestamp - other.utc_timestamp) + return res + + def __eq__(self, other: Any) -> bool: + if other is None: + return False + return self.utc_timestamp == other.utc_timestamp + + def __lt__(self, other: Self) -> bool: + return self.utc_timestamp < other.utc_timestamp + + +EXCLUDED_FROM_SIGNATURE = set( + DYNAMIC_SYFT_ATTRIBUTES + ["created_date", "updated_date", "deleted_date"] +) + + +@serializable() +class SyftObject(SyftObjectVersioned): __canonical_name__ = "SyftObject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 model_config = ConfigDict( arbitrary_types_allowed=True, @@ -394,6 +396,9 @@ class SyftObject(SyftBaseObject, SyftObjectRegistry, SyftMigrationRegistry): # all objects have a UID id: UID + created_date: BaseDateTime | None = None + updated_date: BaseDateTime | None = None + deleted_date: BaseDateTime | None = None # # move this to transforms @model_validator(mode="before") @@ -405,6 +410,7 @@ def make_id(cls, values: Any) -> Any: values["id"] = id_field.annotation() return values + __order_by__: ClassVar[tuple[str, str]] = ("_created_at", "asc") __attr_searchable__: ClassVar[ list[str] ] = [] # keys which can be searched in the ORM @@ -420,6 +426,19 @@ def make_id(cls, values: Any) -> Any: None # show these in html repr of an object ) __validate_private_attrs__: ClassVar[bool] = True + __table_coll_widths__: ClassVar[list[str] | None] = None + __table_sort_attr__: ClassVar[str | None] = None + + def refresh(self) -> None: + try: + api = self._get_api() + new_object = api.services.migration._get_object( + uid=self.id, object_type=type(self) + ) + if type(new_object) == type(self): + self.__dict__.update(new_object.__dict__) + except Exception as _: + return def __syft_get_funcs__(self) -> list[tuple[str, Signature]]: funcs = print_type_cache[type(self)] @@ -532,21 +551,10 @@ def keys(self) -> KeysView[str]: def __getitem__(self, key: str | int) -> Any: return self.__dict__.__getitem__(key) # type: ignore - def _upgrade_version(self, latest: bool = True) -> "SyftObject": - constructor = SyftObjectRegistry.versioned_class( - name=self.__canonical_name__, version=self.__version__ + 1 - ) - if not constructor: - return self - else: - # should we do some kind of recursive upgrades? - upgraded = constructor._from_previous_version(self) - if latest: - upgraded = upgraded._upgrade_version(latest=latest) - return upgraded - # transform from one supported type to another - def to(self, projection: type, context: Context | None = None) -> Any: + def to(self, projection: type[T], context: Context | None = None) -> T: + # relative + # 🟡 TODO 19: Could we do an mro style inheritence conversion? Risky? transform = SyftObjectRegistry.get_transform(type(self), projection) return transform(self, context) @@ -554,25 +562,17 @@ def to(self, projection: type, context: Context | None = None) -> Any: def to_dict( self, exclude_none: bool = False, exclude_empty: bool = False ) -> dict[str, Any]: - warnings.warn( - "`SyftObject.to_dict` is deprecated and will be removed in a future version", - PendingDeprecationWarning, - stacklevel=2, - ) - # 🟡 TODO 18: Remove to_dict and replace usage with transforms etc - if not exclude_none and not exclude_empty: - return self.dict() - else: - new_dict = {} - for k, v in dict(self).items(): - # exclude dynamically added syft attributes - if k in DYNAMIC_SYFT_ATTRIBUTES: - continue - if exclude_empty and v is not Empty: - new_dict[k] = v - if exclude_none and v is not None: - new_dict[k] = v - return new_dict + new_dict = {} + for k, v in dict(self).items(): + # exclude dynamically added syft attributes + if k in DYNAMIC_SYFT_ATTRIBUTES: + continue + if exclude_empty and v is Empty: + continue + if exclude_none and v is None: + continue + new_dict[k] = v + return new_dict def __post_init__(self) -> None: pass @@ -582,7 +582,7 @@ def _syft_set_validate_private_attrs_(self, **kwargs: Any) -> None: return # Validate and set private attributes # https://github.com/pydantic/pydantic/issues/2105 - annotations = typing.get_type_hints(self.__class__, localns=locals()) + annotations = cached_get_type_hints(self.__class__) for attr, decl in self.__private_attributes__.items(): value = kwargs.get(attr, decl.get_default()) var_annotation = annotations.get(attr) @@ -618,8 +618,9 @@ def _syft_keys_types_dict(cls, attr_name: str) -> dict[str, type]: if isinstance(method, types.FunctionType): type_ = method.__annotations__["return"] except Exception as e: - print( - f"Failed to get attribute from key {key} type for {cls} storage. {e}" + logger.error( + f"Failed to get attribute from key {key} type for {cls} storage.", + exc_info=e, ) raise e # EmailStr seems to be lost every time the value is set even with a validator @@ -656,6 +657,7 @@ def syft_eq(self, ext_obj: Self | None) -> bool: attrs_to_check = self.__dict__.keys() obj_exclude_attrs = getattr(self, "__exclude_sync_diff_attrs__", []) + obj_exclude_attrs.extend(["created_date", "updated_date", "deleted_date"]) for attr in attrs_to_check: if attr not in base_attrs_sync_ignore and attr not in obj_exclude_attrs: obj_attr = getattr(self, attr) @@ -663,7 +665,7 @@ def syft_eq(self, ext_obj: Self | None) -> bool: if hasattr(obj_attr, "syft_eq") and not inspect.isclass(obj_attr): if not obj_attr.syft_eq(ext_obj=ext_obj_attr): return False - elif obj_attr != ext_obj_attr: + elif not _syft_equals(obj_attr, ext_obj_attr): return False return True @@ -682,11 +684,13 @@ def syft_get_diffs(self, ext_obj: Self) -> list["AttrDiff"]: attrs_to_check = self.__dict__.keys() obj_exclude_attrs = getattr(self, "__exclude_sync_diff_attrs__", []) + obj_exclude_attrs.extend(["created_date", "updated_date", "deleted_date"]) for attr in attrs_to_check: if attr not in base_attrs_sync_ignore and attr not in obj_exclude_attrs: obj_attr = getattr(self, attr) ext_obj_attr = getattr(ext_obj, attr) + # TODO move to _syft_equals if isinstance(obj_attr, list) and isinstance(ext_obj_attr, list): list_diff = ListDiff.from_lists( attr_name=attr, low_list=obj_attr, high_list=ext_obj_attr @@ -694,13 +698,13 @@ def syft_get_diffs(self, ext_obj: Self) -> list["AttrDiff"]: if not list_diff.is_empty: diff_attrs.append(list_diff) - # TODO: to the same check as above for Dicts when we use them else: - cmp = obj_attr.__eq__ if hasattr(obj_attr, "syft_eq"): - cmp = obj_attr.syft_eq + is_equal = obj_attr.syft_eq(ext_obj_attr) + else: + is_equal = _syft_equals(obj_attr, ext_obj_attr) - if not cmp(ext_obj_attr): + if not is_equal: diff_attr = AttrDiff( attr_name=attr, low_attr=obj_attr, @@ -709,6 +713,15 @@ def syft_get_diffs(self, ext_obj: Self) -> list["AttrDiff"]: diff_attrs.append(diff_attr) return diff_attrs + # TODO: Move this away from here + def _get_api(self) -> "SyftAPI": + # relative + from ..client.api import APIRegistry + + return APIRegistry.api_for( + self.syft_server_location, self.syft_client_verify_key + ).unwrap() + ## OVERRIDING pydantic.BaseModel.__getattr__ ## return super().__getattribute__(item) -> return self.__getattribute__(item) ## so that ActionObject.__getattribute__ works properly, @@ -770,179 +783,18 @@ def short_uid(uid: UID | None) -> str | None: return str(uid)[:6] + "..." -def get_repr_values_table( - _self: Mapping | Iterable, - is_homogenous: bool, - extra_fields: list | None = None, -) -> dict: - if extra_fields is None: - extra_fields = [] - - cols = defaultdict(list) - for item in iter(_self.items() if isinstance(_self, Mapping) else _self): - # unpack dict - if isinstance(_self, Mapping): - key, item = item - cols["key"].append(key) - - # get id - id_ = getattr(item, "id", None) - if id_ is not None: - cols["id"].append({"value": str(id_), "type": "clipboard"}) - - if type(item) == type: - t = full_name_with_qualname(item) - else: - try: - t = item.__class__.__name__ - except Exception: - t = item.__repr__() - - if not is_homogenous: - cols["type"].append(t) - - # if has _coll_repr_ - if hasattr(item, "_coll_repr_"): - ret_val = item._coll_repr_() - if "id" in ret_val: - del ret_val["id"] - for key in ret_val.keys(): - cols[key].append(ret_val[key]) - else: - for field in extra_fields: - value = item - try: - attrs = field.split(".") - for i, attr in enumerate(attrs): - # find indexing like abc[1] - res = re.search(r"\[[+-]?\d+\]", attr) - has_index = False - if res: - has_index = True - index_str = res.group() - index = int(index_str.replace("[", "").replace("]", "")) - attr = attr.replace(index_str, "") - - value = getattr(value, attr, None) - if isinstance(value, list) and has_index: - value = value[index] - # If the object has a special representation when nested we will use that instead - if ( - hasattr(value, "__repr_syft_nested__") - and i == len(attrs) - 1 - ): - value = value.__repr_syft_nested__() - if ( - isinstance(value, list) - and i == len(attrs) - 1 - and len(value) > 0 - and hasattr(value[0], "__repr_syft_nested__") - ): - value = [ - ( - x.__repr_syft_nested__() - if hasattr(x, "__repr_syft_nested__") - else x - ) - for x in value - ] - if value is None: - value = "n/a" - - except Exception as e: - print(e) - value = None - cols[field].append(str(value)) - - df = pd.DataFrame(cols) - - if "created_at" in df.columns: - df.sort_values(by="created_at", ascending=False, inplace=True) - - return df.to_dict("records") # type: ignore - - -def list_dict_repr_html(self: Mapping | Set | Iterable) -> str: - try: - max_check = 1 - items_checked = 0 - has_syft = False - extra_fields: list = [] - if isinstance(self, Mapping): - values: Any = list(self.values()) - elif isinstance(self, Set): - values = list(self) - else: - values = self - - if len(values) == 0: - return self.__repr__() - - for item in iter(self.values() if isinstance(self, Mapping) else self): - items_checked += 1 - if items_checked > max_check: - break - - if hasattr(type(item), "mro") and type(item) != type: - mro: list | str = type(item).mro() - elif hasattr(item, "mro") and type(item) != type: - mro = item.mro() - else: - mro = str(self) - - if "syft" in str(mro).lower(): - has_syft = True - extra_fields = getattr(item, "__repr_attrs__", []) - break - - if has_syft: - # if custom_repr: - table_icon = None - if hasattr(values[0], "icon"): - table_icon = values[0].icon - # this is a list of dicts - is_homogenous = len({type(x) for x in values}) == 1 - # third party - first_value = values[0] - if is_homogenous: - cls_name = first_value.__class__.__name__ - else: - cls_name = "" - try: - vals = get_repr_values_table( - self, is_homogenous, extra_fields=extra_fields - ) - except Exception: - return str(self) - - return create_table_template( - vals, - f"{cls_name} {self.__class__.__name__.capitalize()}", - table_icon=table_icon, - ) - - except Exception as e: - print( - f"error representing {type(self)} of objects. {e}, {traceback.format_exc()}" - ) - pass - - # stdlib - import html - - return html.escape(self.__repr__()) - - # give lists and dicts a _repr_html_ if they contain SyftObject's -aggressive_set_attr(type([]), "_repr_html_", list_dict_repr_html) -aggressive_set_attr(type({}), "_repr_html_", list_dict_repr_html) -aggressive_set_attr(type(set()), "_repr_html_", list_dict_repr_html) -aggressive_set_attr(tuple, "_repr_html_", list_dict_repr_html) +aggressive_set_attr(type([]), "_repr_html_", build_tabulator_table) +aggressive_set_attr(type({}), "_repr_html_", build_tabulator_table) +aggressive_set_attr(type(set()), "_repr_html_", build_tabulator_table) +aggressive_set_attr(tuple, "_repr_html_", build_tabulator_table) class StorableObjectType: def to(self, projection: type, context: Context | None = None) -> Any: # 🟡 TODO 19: Could we do an mro style inheritence conversion? Risky? + # relative + transform = SyftObjectRegistry.get_transform(type(self), projection) return transform(self, context) @@ -953,46 +805,36 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: TupleGenerator = Generator[tuple[str, Any], None, None] +@serializable() class PartialSyftObject(SyftObject, metaclass=PartialModelMetaclass): """Syft Object to which partial arguments can be provided.""" __canonical_name__ = "PartialSyftObject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 def __iter__(self) -> TupleGenerator: yield from ((k, v) for k, v in super().__iter__() if v is not Empty) + def apply(self, to: SyftObject) -> None: + for k, v in self: + setattr(to, k, v) -recursive_serde_register_type(PartialSyftObject) - - -def attach_attribute_to_syft_object(result: Any, attr_dict: dict[str, Any]) -> Any: - constructor = None - extra_args = [] - single_entity = False +def attach_attribute_to_syft_object(result: Any, attr_dict: dict[str, Any]) -> None: + iterator: Iterable - if isinstance(result, OkErr): - constructor = type(result) - result = result.value - - if isinstance(result, MutableMapping): - iterable_keys: Iterable = result.keys() - elif isinstance(result, MutableSequence): - iterable_keys = range(len(result)) - elif isinstance(result, tuple): - iterable_keys = range(len(result)) - constructor = type(result) - if isinstance(result, DictTuple): - extra_args.append(result.keys()) - result = list(result) + if isinstance(result, Ok): + iterator = (result.ok(),) + elif isinstance(result, Err): + iterator = (result.err(),) + elif isinstance(result, Mapping): + iterator = result.values() + elif isinstance(result, Sequence): + iterator = result else: - iterable_keys = range(1) - result = [result] - single_entity = True + iterator = (result,) - for key in iterable_keys: - _object = result[key] + for _object in iterator: # if object is SyftBaseObject, # then attach the value to the attribute # on the object @@ -1000,13 +842,5 @@ def attach_attribute_to_syft_object(result: Any, attr_dict: dict[str, Any]) -> A for attr_name, attr_value in attr_dict.items(): setattr(_object, attr_name, attr_value) - for field_name, attr in _object.__dict__.items(): - updated_attr = attach_attribute_to_syft_object(attr, attr_dict) - setattr(_object, field_name, updated_attr) - result[key] = _object - - wrapped_result = result[0] if single_entity else result - if constructor is not None: - wrapped_result = constructor(wrapped_result, *extra_args) - - return wrapped_result + for field in _object.model_fields.keys(): + attach_attribute_to_syft_object(getattr(_object, field), attr_dict) diff --git a/packages/syft/src/syft/types/syft_object_registry.py b/packages/syft/src/syft/types/syft_object_registry.py new file mode 100644 index 00000000000..9487ae6ece6 --- /dev/null +++ b/packages/syft/src/syft/types/syft_object_registry.py @@ -0,0 +1,173 @@ +# stdlib +from collections.abc import Callable +from typing import Any +from typing import TYPE_CHECKING + +SYFT_086_PROTOCOL_VERSION = "4" + +# third party + +# relative +if TYPE_CHECKING: + # relative + from .syft_object import SyftObject + + +class SyftObjectRegistry: + __object_transform_registry__: dict[str, Callable] = {} + __object_serialization_registry__: dict[str, dict[int, tuple]] = {} + __type_to_canonical_name__: dict[type, tuple[str, int]] = {} + + @classmethod + def register_cls( + cls, canonical_name: str, version: int, serde_attributes: tuple + ) -> None: + if canonical_name not in cls.__object_serialization_registry__: + cls.__object_serialization_registry__[canonical_name] = {} + cls.__object_serialization_registry__[canonical_name][version] = ( + serde_attributes + ) + + cls.__type_to_canonical_name__[serde_attributes[7]] = (canonical_name, version) + + @classmethod + def get_versions(cls, canonical_name: str) -> list[int]: + available_versions: dict = cls.__object_serialization_registry__.get( + canonical_name, + {}, + ) + return list(available_versions.keys()) + + @classmethod + def get_latest_version(cls, canonical_name: str) -> int: + available_versions = cls.get_versions(canonical_name) + if not available_versions: + return 0 + return sorted(available_versions, reverse=True)[0] + + @classmethod + def get_identifier_for_type(cls, obj: Any) -> tuple[str, int]: + """ + This is to create the string in nonrecursiveBlob + """ + return cls.__type_to_canonical_name__[obj] + + @classmethod + def get_canonical_name_version(cls, obj: Any) -> tuple[str, int]: + """ + Retrieves the canonical name for both objects and types. + + This function works for both objects and types, returning the canonical name + as a string. It handles various cases, including built-in types, instances of + classes, and enum members. + + If the object is not registered in the registry, a ValueError is raised. + + Examples: + get_canonical_name_version([1,2,3]) -> "list" + get_canonical_name_version(list) -> "type" + get_canonical_name_version(MyEnum.A) -> "MyEnum" + get_canonical_name_version(MyEnum) -> "type" + + Args: + obj: The object or type for which to get the canonical name. + + Returns: + The canonical name and version of the object or type. + """ + + # for types we return "type" + if isinstance(obj, type): + return cls.__type_to_canonical_name__[type] + + obj_type = type(obj) + if obj_type in cls.__type_to_canonical_name__: + return cls.__type_to_canonical_name__[obj_type] + + raise ValueError( + f"Could not find canonical name for '{obj_type.__module__}.{obj_type.__name__}'" + ) + + @classmethod + def get_serde_properties(cls, canonical_name: str, version: int) -> tuple: + try: + return cls.__object_serialization_registry__[canonical_name][version] + except Exception: + # This is a hack for python 3.10 in which Any is not a type + # if the server uses py>3.10 and the client 3.10 this goes wrong + if canonical_name == "Any_typing._SpecialForm": + return cls.__object_serialization_registry__["Any"][version] + else: + if canonical_name not in cls.__object_serialization_registry__: + raise ValueError(f"Could not find {canonical_name} in registry") + elif ( + version not in cls.__object_serialization_registry__[canonical_name] + ): + raise ValueError( + f"Could not find {canonical_name} version {version} in registry" + ) + else: + raise + + @classmethod + def get_serde_class(cls, canonical_name: str, version: int) -> type["SyftObject"]: + serde_properties = cls.get_serde_properties(canonical_name, version) + return serde_properties[7] + + @classmethod + def has_serde_class(cls, canonical_name: str | None, version: int) -> bool: + # relative + return ( + canonical_name in cls.__object_serialization_registry__ + and version in cls.__object_serialization_registry__[canonical_name] + ) + + @classmethod + def add_transform( + cls, + klass_from: str, + version_from: int, + klass_to: str, + version_to: int, + method: Callable, + ) -> None: + mapping_string = f"{klass_from}_{version_from}_x_{klass_to}_{version_to}" + cls.__object_transform_registry__[mapping_string] = method + + @classmethod + def get_transform( + cls, type_from: type["SyftObject"], type_to: type["SyftObject"] + ) -> Callable: + # relative + from .syft_object import SyftBaseObject + from .syft_object import SyftObject + + for type_from_mro in type_from.mro(): + if issubclass(type_from_mro, SyftObject): + klass_from = type_from_mro.__canonical_name__ + version_from = type_from_mro.__version__ + else: + klass_from = type_from_mro.__name__ + version_from = None + for type_to_mro in type_to.mro(): + if ( + issubclass(type_to_mro, SyftBaseObject) + and type_to_mro != SyftBaseObject + ): + klass_to = type_to_mro.__canonical_name__ + version_to = type_to_mro.__version__ + else: + klass_to = type_to_mro.__name__ + version_to = None + + mapping_string = ( + f"{klass_from}_{version_from}_x_{klass_to}_{version_to}" + ) + if mapping_string in SyftObjectRegistry.__object_transform_registry__: + return SyftObjectRegistry.__object_transform_registry__[ + mapping_string + ] + raise Exception( + f"No mapping found for: {type_from} to {type_to} in" + f"the registry: {SyftObjectRegistry.__object_transform_registry__.keys()}" + ) diff --git a/packages/syft/src/syft/types/syncable_object.py b/packages/syft/src/syft/types/syncable_object.py index f7f6e56c61c..35759fd1aa6 100644 --- a/packages/syft/src/syft/types/syncable_object.py +++ b/packages/syft/src/syft/types/syncable_object.py @@ -7,7 +7,7 @@ from typing_extensions import Self # relative -from ..service.response import SyftError +from ..service.context import AuthedServiceContext from .syft_object import SYFT_OBJECT_VERSION_1 from .syft_object import SyftObject from .uid import UID @@ -29,5 +29,5 @@ def create_shareable_sync_copy(self, mock: bool) -> Self: update |= copy.deepcopy(self.__private_sync_attr_mocks__) return self.model_copy(update=update, deep=True) - def get_sync_dependencies(self, api: Any = None) -> list[UID] | SyftError: + def get_sync_dependencies(self, context: AuthedServiceContext) -> list[UID]: return [] diff --git a/packages/syft/src/syft/types/transforms.py b/packages/syft/src/syft/types/transforms.py index 3bd9a224a33..60e9722a029 100644 --- a/packages/syft/src/syft/types/transforms.py +++ b/packages/syft/src/syft/types/transforms.py @@ -7,14 +7,14 @@ from typing_extensions import Self # relative -from ..abstract_node import AbstractNode -from ..node.credentials import SyftVerifyKey +from ..abstract_server import AbstractServer +from ..server.credentials import SyftVerifyKey from ..service.context import AuthedServiceContext -from ..service.context import NodeServiceContext -from .grid_url import GridURL +from ..service.context import ServerServiceContext +from .server_url import ServerURL from .syft_object import Context from .syft_object import SyftBaseObject -from .syft_object import SyftObjectRegistry +from .syft_object_registry import SyftObjectRegistry from .uid import UID @@ -24,7 +24,7 @@ class NotNone: class TransformContext(Context): output: dict[str, Any] | None = None - node: AbstractNode | None = None + server: AbstractServer | None = None credentials: SyftVerifyKey | None = None obj: Any | None = None @@ -40,15 +40,17 @@ def from_context(cls, obj: Any, context: Context | None = None) -> Self: return t_context if hasattr(context, "credentials"): t_context.credentials = context.credentials - if hasattr(context, "node"): - t_context.node = context.node + if hasattr(context, "server"): + t_context.server = context.server return t_context - def to_node_context(self) -> NodeServiceContext: + def to_server_context(self) -> ServerServiceContext: if self.credentials: - return AuthedServiceContext(node=self.node, credentials=self.credentials) - if self.node: - return NodeServiceContext(node=self.node) + return AuthedServiceContext( + server=self.server, credentials=self.credentials + ) + if self.server: + return ServerServiceContext(server=self.server) return Context() @@ -143,9 +145,19 @@ def generate_id(context: TransformContext) -> TransformContext: return context +def generate_action_object_id(context: TransformContext) -> TransformContext: + if context.output is None: + return context + if "action_object_id" not in context.output or not isinstance( + context.output["action_object_id"], UID + ): + context.output["action_object_id"] = UID() + return context + + def validate_url(context: TransformContext) -> TransformContext: if context.output and context.output["url"] is not None: - context.output["url"] = GridURL.from_url(context.output["url"]).url_no_port + context.output["url"] = ServerURL.from_url(context.output["url"]).url_no_port return context @@ -155,11 +167,11 @@ def validate_email(context: TransformContext) -> TransformContext: return context -def str_url_to_grid_url(context: TransformContext) -> TransformContext: +def str_url_to_server_url(context: TransformContext) -> TransformContext: if context.output: url = context.output.get("url", None) if url is not None and isinstance(url, str): - context.output["url"] = GridURL.from_url(str) + context.output["url"] = ServerURL.from_url(str) return context @@ -172,13 +184,13 @@ def add_credentials(context: TransformContext) -> TransformContext: return add_credentials -def add_node_uid_for_key(key: str) -> Callable: - def add_node_uid(context: TransformContext) -> TransformContext: - if context.output is not None and context.node is not None: - context.output[key] = context.node.id +def add_server_uid_for_key(key: str) -> Callable: + def add_server_uid(context: TransformContext) -> TransformContext: + if context.output is not None and context.server is not None: + context.output[key] = context.server.id return context - return add_node_uid + return add_server_uid def generate_transform_wrapper( @@ -186,7 +198,7 @@ def generate_transform_wrapper( ) -> Callable: def wrapper( self: klass_from, - context: TransformContext | NodeServiceContext | None = None, + context: TransformContext | ServerServiceContext | None = None, ) -> klass_to: t_context = TransformContext.from_context(obj=self, context=context) for transform in transforms: diff --git a/packages/syft/src/syft/types/twin_object.py b/packages/syft/src/syft/types/twin_object.py index 458c69c0923..2da2cf3a8df 100644 --- a/packages/syft/src/syft/types/twin_object.py +++ b/packages/syft/src/syft/types/twin_object.py @@ -2,6 +2,7 @@ from __future__ import annotations # stdlib +import logging from typing import Any from typing import ClassVar @@ -11,15 +12,21 @@ from typing_extensions import Self # relative +from ..client.client import SyftClient from ..serde.serializable import serializable from ..service.action.action_object import ActionObject from ..service.action.action_object import TwinMode from ..service.action.action_types import action_types -from ..service.response import SyftError -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..service.response import SyftSuccess +from ..service.response import SyftWarning +from ..types.syft_object import SYFT_OBJECT_VERSION_1 +from .errors import SyftException +from .result import as_result from .syft_object import SyftObject from .uid import UID +logger = logging.getLogger(__name__) + def to_action_object(obj: Any) -> ActionObject: if isinstance(obj, ActionObject): @@ -33,7 +40,7 @@ def to_action_object(obj: Any) -> ActionObject: @serializable() class TwinObject(SyftObject): __canonical_name__ = "TwinObject" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 __attr_searchable__: ClassVar[list[str]] = [] @@ -81,15 +88,29 @@ def mock(self) -> ActionObject: mock.id = twin_id return mock - def _save_to_blob_storage(self) -> SyftError | None: - # Set node location and verify key + @as_result(SyftException) + def _save_to_blob_storage( + self, allow_empty: bool = False + ) -> SyftSuccess | SyftWarning: + # Set server location and verify key self.private_obj._set_obj_location_( - self.syft_node_location, + self.syft_server_location, + self.syft_client_verify_key, + ) + self.mock_obj._set_obj_location_( + self.syft_server_location, self.syft_client_verify_key, ) - # self.mock_obj._set_obj_location_( - # self.syft_node_location, - # self.syft_client_verify_key, - # ) - return self.private_obj._save_to_blob_storage() - # self.mock_obj._save_to_blob_storage() + self.mock_obj._save_to_blob_storage(allow_empty=allow_empty).unwrap() + return self.private_obj._save_to_blob_storage(allow_empty=allow_empty).unwrap() + + def send(self, client: SyftClient, add_storage_permission: bool = True) -> Any: + self._set_obj_location_(client.id, client.verify_key) + blob_store_result = self._save_to_blob_storage().unwrap() + if isinstance(blob_store_result, SyftWarning): + logger.debug(blob_store_result.message) + res = client.api.services.action.set( + self, + add_storage_permission=add_storage_permission, + ) + return res diff --git a/packages/syft/src/syft/types/uid.py b/packages/syft/src/syft/types/uid.py index 88c55512b0b..de364d7b10a 100644 --- a/packages/syft/src/syft/types/uid.py +++ b/packages/syft/src/syft/types/uid.py @@ -1,24 +1,30 @@ +# future +from __future__ import annotations + # stdlib from collections.abc import Callable from collections.abc import Sequence import hashlib +import logging from typing import Any -from typing import Union import uuid from uuid import UUID as uuid_type +# third party +from typing_extensions import Self + # relative from ..serde.serializable import serializable -from ..util.logger import critical -from ..util.logger import traceback_and_raise +logger = logging.getLogger(__name__) -@serializable(attrs=["value"]) + +@serializable(attrs=["value"], canonical_name="UID", version=1) class UID: """A unique ID for every Syft object. This object creates a unique ID for every object in the Syft - ecosystem. This ID is guaranteed to be unique for the node on + ecosystem. This ID is guaranteed to be unique for the server on which it is initialized and is very likely to be unique across the whole ecosystem (because it is long and randomly generated). @@ -38,7 +44,7 @@ class UID: __slots__ = "value" value: uuid_type - def __init__(self, value: Union[uuid_type, str, bytes, "UID"] | None = None): + def __init__(self, value: Self | uuid_type | str | bytes | None = None): """Initializes the internal id using the uuid package. This initializes the object. Normal use for this object is @@ -72,16 +78,15 @@ def __init__(self, value: Union[uuid_type, str, bytes, "UID"] | None = None): self.value = uuid.uuid4() if value is None else value @staticmethod - def from_string(value: str) -> "UID": + def from_string(value: str) -> UID: try: return UID(value=uuid.UUID(value)) except ValueError as e: - critical(f"Unable to convert {value} to UUID. {e}") - traceback_and_raise(e) - raise + logger.critical(f"Unable to convert {value} to UUID. {e}") + raise e @staticmethod - def with_seed(value: str) -> "UID": + def with_seed(value: str) -> UID: md5 = hashlib.md5(value.encode("utf-8"), usedforsecurity=False) return UID(md5.hexdigest()) @@ -149,6 +154,10 @@ def is_valid_uuid(value: Any) -> bool: def no_dash(self) -> str: return str(self.value).replace("-", "") + @property + def hex(self) -> str: + return self.value.hex + def __repr__(self) -> str: """Returns a human-readable version of the ID @@ -188,25 +197,25 @@ def short(self) -> str: return str(self.value)[:8] @property - def id(self) -> "UID": + def id(self) -> UID: return self - @staticmethod - def _check_or_convert(value: Union[str, "UID", uuid.UUID]) -> "UID": + @classmethod + def _check_or_convert(cls, value: str | uuid.UUID | UID) -> UID: if isinstance(value, uuid.UUID): return UID(value) elif isinstance(value, str): return UID.from_string(value) - elif isinstance(value, UID): + elif isinstance(value, cls): return value else: # Ask @Madhava , can we check for invalid types , even though type annotation is specified. return ValueError( # type: ignore - f"Incorrect value,type:{value,type(value)} for conversion to UID, expected Union[str,UID,UUID]" + f"Incorrect value,type:{value,type(value)} for conversion to UID, expected str | uuid.UUID | Self" ) -@serializable(attrs=["syft_history_hash"]) +@serializable(attrs=["syft_history_hash"], canonical_name="LineageID", version=1) class LineageID(UID): """Extended UID containing a history hash as well, which is used for comparisons.""" @@ -214,7 +223,7 @@ class LineageID(UID): def __init__( self, - value: Union[uuid_type, str, bytes, "LineageID"] | None = None, + value: Self | UID | uuid_type | str | bytes | None = None, syft_history_hash: int | None = None, ): if isinstance(value, LineageID): diff --git a/packages/syft/src/syft/util/__init__.py b/packages/syft/src/syft/util/__init__.py index f6394760c7b..e69de29bb2d 100644 --- a/packages/syft/src/syft/util/__init__.py +++ b/packages/syft/src/syft/util/__init__.py @@ -1,2 +0,0 @@ -# relative -from .schema import generate_json_schemas # noqa: F401 diff --git a/packages/syft/src/syft/util/api_snapshot/api_snapshot.py b/packages/syft/src/syft/util/api_snapshot/api_snapshot.py new file mode 100644 index 00000000000..a802bddd60d --- /dev/null +++ b/packages/syft/src/syft/util/api_snapshot/api_snapshot.py @@ -0,0 +1,243 @@ +# stdlib +from collections import OrderedDict +import hashlib +from inspect import Signature +import json +import os +from pathlib import Path + +# relative +from ...service.service import ServiceConfigRegistry +from ...service.warnings import APIEndpointWarning +from ..util import get_root_data_path +from ..util import str_to_bool +from .json_diff import json_diff + +API_SPEC_JSON_FILENAME = "syft_api_spec.json" +API_DIFF_JSON_FILENAME = "syft_api_diff.json" + + +def api_snapshot_dir() -> Path: + """A helper function to get the path of the API snapshot directory.""" + return Path(os.path.abspath(str(Path(__file__).parent))) + + +class SyftAPISnapshot: + def __init__( + self, + filename: str = API_SPEC_JSON_FILENAME, + stable_release: bool = False, + ) -> None: + """ + Initialize the SyftAPISnapshot object. + + Args: + filename (str): The name of the JSON file to load the API snapshot from. + Defaults to API_SPEC_JSON_FILENAME. + """ + + filename = self.get_filename(filename, stable_release) + self.file_path = api_snapshot_dir() / filename + self.history = self.load_map() + self.state = self.build_map() + + def get_filename(self, filename: str, stable_release: bool) -> str: + """ + Get the modified filename based on the stable_release flag. + + Args: + filename (str): The original filename. + stable_release (bool): Flag indicating if it's a stable release. + + Returns: + str: The modified filename. + """ + if stable_release: + return f"{filename.split('.')[0]}_stable.json" + else: + return f"{filename.split('.')[0]}_beta.json" + + @staticmethod + def extract_service_name(path: str) -> str: + """ + Extract the service name from the given path. + + Args: + path (str): The path of the service. + + Returns: + str: The extracted service name. + """ + return path.split(".")[0].capitalize() + + @staticmethod + def extract_arguments(signature: Signature) -> dict: + """ + Extract the arguments from the given signature. + + Args: + signature (Signature): The signature object. + + Returns: + dict: The extracted arguments as a dictionary. + """ + signature_kwargs = { + f"{v.name}": f"{v.annotation}" for k, v in signature.parameters.items() + } + return OrderedDict(sorted(signature_kwargs.items())) + + @staticmethod + def get_role_level(roles: list) -> str: + """ + Get the role level from the given list of roles. + + Args: + roles (list): The list of roles. + + Returns: + str: The role level. + """ + return sorted(roles)[0].name + "_ROLE_LEVEL" + + @staticmethod + def extract_warning_info(warning: APIEndpointWarning) -> dict | str: + """ + Extract the warning information from the given APIEndpointWarning object. + + Args: + warning (APIEndpointWarning): The APIEndpointWarning object. + + Returns: + dict: The extracted warning information. + """ + if not warning: + return "" + + return { + "name": f"{warning.__class__.__name__}", + "confirmation": warning.confirmation, + "enabled": warning.enabled, + } + + @staticmethod + def generate_hash(api_map: OrderedDict) -> str: + """ + Generate a hash for the given API map. + + Args: + api_map (OrderedDict): The API map. + + Returns: + str: The generated hash. + """ + return hashlib.sha256(json.dumps(api_map).encode()).hexdigest() + + def load_map(self) -> OrderedDict: + """ + Load the API map from the JSON file. + + Returns: + OrderedDict: The loaded API map. + """ + + if not self.file_path.exists(): + return OrderedDict() + + return OrderedDict(json.loads(self.file_path.read_text())) + + def build_map(self) -> OrderedDict: + """ + Build the API map. + + Returns: + OrderedDict: The built API map. + """ + api_details = {} + for ( + _, + service_config, + ) in ServiceConfigRegistry.__service_config_registry__.items(): + service_name = self.extract_service_name(service_config.private_path) + warning = service_config.warning + signature = service_config.signature + role_level = self.get_role_level(service_config.roles) + api_detail = { + "public_path": service_config.public_path, + "RBAC_permission": f"{role_level}", + "signature": self.extract_arguments(service_config.signature), + "return_type": f"{signature.return_annotation}" if signature else "", + "warning": self.extract_warning_info(warning), + # "unwrap_on_success": getattrservice_config.unwrap_on_success, + } + api_detail["hash"] = self.generate_hash(api_detail) + api_details[f"{service_name}.{service_config.public_path}"] = OrderedDict( + api_detail + ) + + api_details_ordered = OrderedDict(sorted(api_details.items())) + return api_details_ordered + + def save_as_json(self) -> None: + """ + Save the API map as a JSON file. + """ + self.file_path.write_text(json.dumps(self.state, indent=2)) + + def calc_diff(self, save: bool = False) -> dict: + """ + Calculate the difference between the current API snapshot and the previous one. + + Args: + save (bool): Whether to save the difference as a JSON file. Defaults to False. + + Returns: + dict: The difference between the API snapshots. + """ + diff = json_diff(self.history, self.state) + if save: + diff_file_path = get_root_data_path() / API_DIFF_JSON_FILENAME + diff_file_path.write_text(json.dumps(diff, indent=2)) + + return diff + + +def get_api_snapshot(stable_release: bool = False) -> SyftAPISnapshot: + """ + Retrieves the API snapshot. + """ + snapshot = SyftAPISnapshot( + filename=API_SPEC_JSON_FILENAME, + stable_release=stable_release, + ) + return snapshot + + +def take_api_snapshot() -> SyftAPISnapshot: + """ + Takes a stable release snapshot of the API and saves it as a JSON file. + """ + + # Get the stable_release flag from the environment variable + stable_release = str_to_bool(os.environ.get("STABLE_RELEASE", "False")) + + snapshot = get_api_snapshot(stable_release=stable_release) + snapshot.save_as_json() + print("API snapshot saved at: ", snapshot.file_path) + return snapshot + + +def show_api_diff() -> None: + """ + Calculates the difference between the current API snapshot and the previous one, + saves it as a JSON file, and returns the difference. + """ + + # Get the stable_release flag from the environment variable + stable_release = str_to_bool(os.environ.get("STABLE_RELEASE", "False")) + + snapshot = get_api_snapshot(stable_release=stable_release) + + # Calculate the difference between the current API snapshot and the previous one + diff = snapshot.calc_diff(save=True) + print(json.dumps(diff, indent=2)) + print("Generated API diff file at: ", get_root_data_path() / API_DIFF_JSON_FILENAME) diff --git a/packages/syft/src/syft/util/api_snapshot/json_diff.py b/packages/syft/src/syft/util/api_snapshot/json_diff.py new file mode 100644 index 00000000000..3c1cb86aefb --- /dev/null +++ b/packages/syft/src/syft/util/api_snapshot/json_diff.py @@ -0,0 +1,77 @@ +# stdlib + + +def json_diff(json1: dict, json2: dict) -> dict: + """ + Calculate the difference between two JSON objects and return the differences as a JSON object. + + Args: + json1 (dict): The first JSON object. + json2 (dict): The second JSON object. + + Returns: + dict: A JSON object representing the differences. + """ + + def compare_dicts(d1: dict, d2: dict) -> dict: + diffs = {} + + # Keys in d1 but not in d2 (deleted) + for key in d1.keys() - d2.keys(): + diffs[key] = {"status": "deleted", "value": d1[key]} + + # Keys in d2 but not in d1 (added) + for key in d2.keys() - d1.keys(): + diffs[key] = {"status": "added", "value": d2[key]} + + # Keys in both, but with different values (updated) + for key in d1.keys() & d2.keys(): + if isinstance(d1[key], dict) and isinstance(d2[key], dict): + # Recursively compare nested dictionaries + nested_diff_dict = compare_dicts(d1[key], d2[key]) + if nested_diff_dict: + diffs[key] = nested_diff_dict + elif isinstance(d1[key], list) and isinstance(d2[key], list): + # Compare lists + nested_diff_list = compare_lists(d1[key], d2[key]) + if nested_diff_list: + diffs[key] = nested_diff_list # type: ignore + elif d1[key] != d2[key]: + diffs[key] = { + "status": "updated", + "old_value": d1[key], + "new_value": d2[key], + } + + return diffs + + def compare_lists(l1: list, l2: list) -> list | None: + diffs = [] + max_len = max(len(l1), len(l2)) + + for i in range(max_len): + if i >= len(l1): + diffs.append({"status": "added", "value": l2[i]}) + elif i >= len(l2): + diffs.append({"status": "deleted", "value": l1[i]}) + elif isinstance(l1[i], dict) and isinstance(l2[i], dict): + # Recursively compare dictionaries in lists + nested_diff_dict = compare_dicts(l1[i], l2[i]) + if nested_diff_dict: + diffs.append(nested_diff_dict) + elif isinstance(l1[i], list) and isinstance(l2[i], list): + # Recursively compare nested lists + nested_diff_list = compare_lists(l1[i], l2[i]) + if nested_diff_list: + diffs.append(nested_diff_list) + elif l1[i] != l2[i]: + diffs.append( + {"status": "updated", "old_value": l1[i], "new_value": l2[i]} + ) + + return diffs if diffs else None + + # Generate the JSON diff + json_diff_result = compare_dicts(json1, json2) + + return json_diff_result diff --git a/packages/syft/src/syft/util/api_snapshot/syft_api_spec_beta.json b/packages/syft/src/syft/util/api_snapshot/syft_api_spec_beta.json new file mode 100644 index 00000000000..583041fcc78 --- /dev/null +++ b/packages/syft/src/syft/util/api_snapshot/syft_api_spec_beta.json @@ -0,0 +1,2371 @@ +{ + "Actionservice.action.delete": { + "public_path": "action.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "soft_delete": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "7188dd0b7944ed26955b97a6a6149586fe144db59970d23f61af190aa54497c7" + }, + "Actionservice.action.execute": { + "public_path": "action.execute", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "action": "" + }, + "return_type": "", + "warning": "", + "hash": "71de9ba14014e015a5724978ffdec210b5b844c88dabded615580334bf9b0ef5" + }, + "Actionservice.action.exists": { + "public_path": "action.exists", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "obj_id": "" + }, + "return_type": "", + "warning": "", + "hash": "2daf41389eef09019c1f114f948ccc47c7094f94484b7ae467b06de224efb36c" + }, + "Actionservice.action.get": { + "public_path": "action.get", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "resolve_nested": "", + "twin_mode": "", + "uid": "" + }, + "return_type": "syft.service.action.action_object.ActionObject | syft.types.twin_object.TwinObject", + "warning": "", + "hash": "661a736ced3c33e3312986f31fb7be79410053708eb64a126565274866ff075d" + }, + "Actionservice.action.get_mock": { + "public_path": "action.get_mock", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "002fc053488b5203bc6133344e4c81507e31c66cf6820e60d0a9c07b82036ffa" + }, + "Actionservice.action.get_pointer": { + "public_path": "action.get_pointer", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "874f3ab136390f7b15755f7b466be42228385d865d43fe80ae0f33b161863d47" + }, + "Actionservice.action.has_storage_permission": { + "public_path": "action.has_storage_permission", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "d66a3462cd2aae6361ed8f92607125c8e0814273a6cabce8c1ec801648040f27" + }, + "Actionservice.action.is_resolved": { + "public_path": "action.is_resolved", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "8e980361137993b3f6a343728186ef54ca83f51dcd88a7bd3519ca603e4f130b" + }, + "Actionservice.action.np_array": { + "public_path": "action.np_array", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "data": "typing.Any" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "f0e75b79b4ce7349430bb1e62677c353e33ddcf998b5bf5c26328f246049fbf5" + }, + "Actionservice.action.set": { + "public_path": "action.set", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "action_object": "syft.service.action.action_object.ActionObject | syft.types.twin_object.TwinObject", + "add_storage_permission": "", + "ignore_detached_objs": "" + }, + "return_type": "", + "warning": "", + "hash": "c7d28af8ba385a5ca2369d00c388a1d02348a0e73c0a3e5b92d0e4da07966e33" + }, + "Apiservice.api.add": { + "public_path": "api.add", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "endpoint": "syft.service.api.api.CreateTwinAPIEndpoint | syft.service.api.api.TwinAPIEndpoint" + }, + "return_type": "", + "warning": "", + "hash": "792b2fb4d7b28b85d25a276872518122b0c487e362a4091fb0a8d9fbf203d71c" + }, + "Apiservice.api.api_endpoints": { + "public_path": "api.api_endpoints", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.api.api.TwinAPIEndpointView]", + "warning": "", + "hash": "a9691c938047b00ede6b417e37e2c4d6a6fce3513ac1ebf3d5d9cad9119bd9ec" + }, + "Apiservice.api.call": { + "public_path": "api.call", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "log_id": "syft.types.uid.UID | None", + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "8b10d896d67b7074d4fcdf64094b56776adc266225b5c3e3d4ba91099c757ba8" + }, + "Apiservice.api.call_in_jobs": { + "public_path": "api.call_in_jobs", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "path": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "d4b8cb0fb3a8427ebf9de1d1dac3b48ef270bc3a33c283e190de6f0fe84f321b" + }, + "Apiservice.api.call_private": { + "public_path": "api.call_private", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "log_id": "syft.types.uid.UID | None", + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "4099aeeb2f1e23fd47843a12defd5a8a54b3706d67b8a26a2d609a8446f97dfb" + }, + "Apiservice.api.call_private_in_jobs": { + "public_path": "api.call_private_in_jobs", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "path": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "15c33f832c617790fd54a7c52334778cacb0c56f8b18bad9c6f4ecea37a1766f" + }, + "Apiservice.api.call_public": { + "public_path": "api.call_public", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "log_id": "syft.types.uid.UID | None", + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "b7da0456e21a375389dc1059e1f95ba37f0e0dab184aa673917f6739be6a8182" + }, + "Apiservice.api.call_public_in_jobs": { + "public_path": "api.call_public_in_jobs", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "path": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "33730cbb7819450ea8828954cf05bccbabe9811b58909a49b5e00f95efcf5167" + }, + "Apiservice.api.delete": { + "public_path": "api.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "endpoint_path": "" + }, + "return_type": "", + "warning": "", + "hash": "7cbc47eff81bbc9c87e9f784eb45bd23f9d716e6a37475f1ec6322263b636c40" + }, + "Apiservice.api.exists": { + "public_path": "api.exists", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "0a6b260535469d49ff5959783af8ab447addedbdb9acf8df50b95f06c0b3d42e" + }, + "Apiservice.api.get": { + "public_path": "api.get", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "api_path": "" + }, + "return_type": "", + "warning": "", + "hash": "8f37b25ba23d940645d8a5096408c0908dec3b0b43c3aab5230f6773a777b71a" + }, + "Apiservice.api.get_all": { + "public_path": "api.get_all", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.api.api.TwinAPIEndpoint]", + "warning": "", + "hash": "eec93ade9eee93398db1cb855837069d339f5f744f73c3b747de848286cd82c4" + }, + "Apiservice.api.get_private_context": { + "public_path": "api.get_private_context", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "path": "" + }, + "return_type": "dict[str, typing.Any]", + "warning": "", + "hash": "131a77e8e1bf9f93f0aa8dee5ebc90e11d225a34fdd5a6c2a69c41e2e71362e5" + }, + "Apiservice.api.get_public_context": { + "public_path": "api.get_public_context", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "path": "" + }, + "return_type": "dict[str, typing.Any]", + "warning": "", + "hash": "8d190d6d2eaf7fdd5dfc3d217d0ca78f9933d28e8e07182a6f3cf62fbb55b811" + }, + "Apiservice.api.set_settings": { + "public_path": "api.set_settings", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "api_path": "", + "both": "", + "mock": "", + "private": "", + "settings": "" + }, + "return_type": "", + "warning": "", + "hash": "2bb16f38b222529833f12992caef1ef4f43de0e8b9aa7be0d332310daf2f7976" + }, + "Apiservice.api.set_state": { + "public_path": "api.set_state", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "api_path": "", + "both": "", + "mock": "", + "private": "", + "state": "" + }, + "return_type": "", + "warning": "", + "hash": "db2b0966fb2e28b775c47834546d98b33f5e10f511eaf5752f953e3f2bcaeed1" + }, + "Apiservice.api.update": { + "public_path": "api.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "endpoint_path": "", + "endpoint_timeout": "int | None", + "hide_mock_definition": "bool | None", + "mock_function": "syft.service.api.api.Endpoint | None", + "private_function": "syft.service.api.api.Endpoint | None" + }, + "return_type": "", + "warning": "", + "hash": "7d426096b634f4883d16bb05f5fa4ccc6404915dc834f294467baa1ff4994128" + }, + "Apiservice.api.view": { + "public_path": "api.view", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "572b69c12966396e98f7f37298c6402caf0813acea66dba95ddd2af016b18dcc" + }, + "Attestationservice.attestation.get_cpu_attestation": { + "public_path": "attestation.get_cpu_attestation", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "raw_token": "" + }, + "return_type": "str | syft.service.response.SyftSuccess", + "warning": "", + "hash": "4246dcc86b4dfdbc13b23a6314f1e8591f2775e64dc52bd6fe2e55602e0a0498" + }, + "Attestationservice.attestation.get_gpu_attestation": { + "public_path": "attestation.get_gpu_attestation", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "raw_token": "" + }, + "return_type": "str | syft.service.response.SyftSuccess", + "warning": "", + "hash": "e0124c33246e750a7b22ad3f626a7eef03f28fb2471d9ba29f8f071ce7629032" + }, + "Blobstorageservice.blob_storage.allocate": { + "public_path": "blob_storage.allocate", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "obj": "" + }, + "return_type": "syft.store.blob_storage.on_disk.OnDiskBlobDeposit | syft.store.blob_storage.seaweedfs.SeaweedFSBlobDeposit", + "warning": "", + "hash": "4a65f796fa149a2335d9763a24c8e64539ea9f609b4e42129ca826152ae2678e" + }, + "Blobstorageservice.blob_storage.allocate_for_user": { + "public_path": "blob_storage.allocate_for_user", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "obj": "", + "uploaded_by": "" + }, + "return_type": "syft.store.blob_storage.on_disk.OnDiskBlobDeposit | syft.store.blob_storage.seaweedfs.SeaweedFSBlobDeposit", + "warning": "", + "hash": "6641df95c2b76ead317755b15928d0baf353e86f1aec1feabc95f9f48f6c032e" + }, + "Blobstorageservice.blob_storage.delete": { + "public_path": "blob_storage.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "4dfb5250b5d8b07c7e5580f4906f2641cdd532c1ea8eb9b82f3f3d649422b696" + }, + "Blobstorageservice.blob_storage.get_all": { + "public_path": "blob_storage.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.types.blob_storage.BlobStorageEntry]", + "warning": "", + "hash": "e65c241f16475e2d06aa56695c606f1b8238fbe2529e80f4c537ccba27528f61" + }, + "Blobstorageservice.blob_storage.get_by_uid": { + "public_path": "blob_storage.get_by_uid", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "99b2ae3648407efeee2bb70021534af55d2169fa7942b1116aadb1b58044c475" + }, + "Blobstorageservice.blob_storage.get_files_from_bucket": { + "public_path": "blob_storage.get_files_from_bucket", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "bucket_name": "" + }, + "return_type": "", + "warning": "", + "hash": "3ae6052377fc7224a087bd38b28d5dcf4c60f3408c9604b66d91051d6ffe36b2" + }, + "Blobstorageservice.blob_storage.get_metadata": { + "public_path": "blob_storage.get_metadata", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "793989fe94223e81ebce742640a456eb19543800f145c4a4a4b0058b0558c7b3" + }, + "Blobstorageservice.blob_storage.mark_write_complete": { + "public_path": "blob_storage.mark_write_complete", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "etags": "", + "no_lines": "int | None", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "4fdf01eae7e41b52137b3b248cb1826cbaa3246ddd44f32fffa0e9610184e9ec" + }, + "Blobstorageservice.blob_storage.mount_azure": { + "public_path": "blob_storage.mount_azure", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "account_key": "", + "account_name": "", + "bucket_name": "", + "container_name": "", + "use_direct_connections": "" + }, + "return_type": "", + "warning": "", + "hash": "a4efc044cd27e3d8ef96603c1c8ab008ceca8c0dbaae8bc4b12a3295fbe0540e" + }, + "Blobstorageservice.blob_storage.read": { + "public_path": "blob_storage.read", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "d00c264dd04ca50531390748ce0409cda67d32d3c5e85e3335108dd11ec07c98" + }, + "Blobstorageservice.blob_storage.write_to_disk": { + "public_path": "blob_storage.write_to_disk", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "data": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "8384cd726618f43f38e3705ec7f0625711171e51ca4cb9713232c9b69f4ed231" + }, + "Codehistoryservice.code_history.delete": { + "public_path": "code_history.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "77b1ae2eebcbb7b6df33b5c1cfd2692c1865d4558caafb04be042f3e87f69d27" + }, + "Codehistoryservice.code_history.get": { + "public_path": "code_history.get", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "258594ec0e7e2f7475030024ceabf04bd7741a0f80dfb8fe68c86afd6e02ac96" + }, + "Codehistoryservice.code_history.get_all": { + "public_path": "code_history.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code_history.code_history.CodeHistory]", + "warning": "", + "hash": "7676007066a95921c60339dbb903c8cd9b1419e88baca830e7e87cf739a040a3" + }, + "Codehistoryservice.code_history.get_by_name_and_user_email": { + "public_path": "code_history.get_by_name_and_user_email", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "service_func_name": "", + "user_email": "", + "user_id": "" + }, + "return_type": "list[syft.service.code_history.code_history.CodeHistory]", + "warning": "", + "hash": "fdd6237cbaba33e0a166eaabbfafce3cadef26ac5e811c21520e4d0e3886484d" + }, + "Codehistoryservice.code_history.get_histories": { + "public_path": "code_history.get_histories", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "24ee3c1f7d3ee5ffd283a71f1dbb72782e35fc1f3792b47b27435f3b91aee7f9" + }, + "Codehistoryservice.code_history.get_history": { + "public_path": "code_history.get_history", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "1725971eaeefdf14c09721dc0a523d2122c20d896bfa56e97a2f24ecbf4f1909" + }, + "Codehistoryservice.code_history.get_history_for_user": { + "public_path": "code_history.get_history_for_user", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "email": "" + }, + "return_type": "", + "warning": "", + "hash": "9f9eef0b862420b25bf7e2d41a966f3539f2381afb5b6d28f97e206bf4dfc6ee" + }, + "Codehistoryservice.code_history.submit_version": { + "public_path": "code_history.submit_version", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "code": "syft.service.code.user_code.SubmitUserCode | syft.service.code.user_code.UserCode", + "comment": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "c0ad156201c844651b8ae181eb52e5096884dc1456d12e13ec11e4c590cc12c6" + }, + "Datasetservice.dataset.add": { + "public_path": "dataset.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "dataset": "" + }, + "return_type": "", + "warning": "", + "hash": "b3cb7f311fe96a7e3e7a529ae7d8c8ccc73b119215c94b57f7ed901987bee2f2" + }, + "Datasetservice.dataset.delete": { + "public_path": "dataset.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "delete_assets": "", + "uid": "" + }, + "return_type": "", + "warning": { + "name": "HighSideCRUDWarning", + "confirmation": true, + "enabled": true + }, + "hash": "7925e8e36c29cb4aad184418275f74448cb3768de8c0134190e2cde103a655fb" + }, + "Datasetservice.dataset.get_all": { + "public_path": "dataset.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "page_index": "int | None", + "page_size": "int | None" + }, + "return_type": "syft.service.dataset.dataset.DatasetPageView | syft.types.dicttuple.DictTuple[str, syft.service.dataset.dataset.Dataset]", + "warning": { + "name": "CRUDReminder", + "confirmation": false, + "enabled": true + }, + "hash": "6f7b1f3e8e5b535dc79ce0e6c9346bfc94a39aebaa3d6ce3abbd546f05a34bf5" + }, + "Datasetservice.dataset.get_assets_by_action_id": { + "public_path": "dataset.get_assets_by_action_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "list[syft.service.dataset.dataset.Asset]", + "warning": "", + "hash": "1de4213bb01018864816fdd5fafa9c015311288bcebf959ed2e024d1650d3213" + }, + "Datasetservice.dataset.get_by_action_id": { + "public_path": "dataset.get_by_action_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "list[syft.service.dataset.dataset.Dataset]", + "warning": "", + "hash": "63072d0181c8dfe3efe57c2034e0062e0e6bc86af38f95406ddadb9fe1580ac2" + }, + "Datasetservice.dataset.get_by_id": { + "public_path": "dataset.get_by_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "2fe0717a4fcbc376f5f3ebc9f095f8065a063e7ef4b23234679dbade3176ad43" + }, + "Datasetservice.dataset.search": { + "public_path": "dataset.search", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "name": "", + "page_index": "int | None", + "page_size": "int | None" + }, + "return_type": "syft.service.dataset.dataset.DatasetPageView | syft.types.dicttuple.DictTuple[str, syft.service.dataset.dataset.Dataset]", + "warning": "", + "hash": "3568d899e5b4c3fc13b08d14eb10fc5ed92d3c94989f069941b576290c095a19" + }, + "Datasubjectservice.data_subject.add": { + "public_path": "data_subject.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "data_subject": "" + }, + "return_type": "", + "warning": "", + "hash": "4ffae9b8c21d8dd399b3b89f2f343f9f6ce6cad505bb0466b2d85c7c1ba2756c" + }, + "Datasubjectservice.data_subject.get_all": { + "public_path": "data_subject.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.data_subject.data_subject.DataSubject]", + "warning": "", + "hash": "e2bb85406d13d14b0d61fb64b99347325c7964ab2d841cf10f8f5d3c7ec8ab62" + }, + "Datasubjectservice.data_subject.get_by_name": { + "public_path": "data_subject.get_by_name", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "name": "" + }, + "return_type": "", + "warning": "", + "hash": "667ed49796f3c706375dfbc4421d9f462bd9d8986a254866fc0aaca431921f60" + }, + "Datasubjectservice.data_subject.get_members": { + "public_path": "data_subject.get_members", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "data_subject_name": "" + }, + "return_type": "list[syft.service.data_subject.data_subject.DataSubject]", + "warning": "", + "hash": "4b0701f7b1dd23165f4e7229a06a3f5d9ce31fd69b6bf3e819a9dd23c68dc58f" + }, + "Jobservice.job.add_read_permission_job_for_code_owner": { + "public_path": "job.add_read_permission_job_for_code_owner", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "job": "", + "user_code": "" + }, + "return_type": "None", + "warning": "", + "hash": "cdb6b543910b2464a76f49eeae6640f47369e6c1da05b3ecd0868e3245e4ecfe" + }, + "Jobservice.job.add_read_permission_log_for_code_owner": { + "public_path": "job.add_read_permission_log_for_code_owner", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "log_id": "", + "user_code": "" + }, + "return_type": "None", + "warning": "", + "hash": "64fe924db47a478187578b06799ca543e88d2a93da7bd69ecb35c482f162224e" + }, + "Jobservice.job.create_job_for_user_code_id": { + "public_path": "job.create_job_for_user_code_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "add_code_owner_read_permissions": "", + "log_stderr": "", + "log_stdout": "", + "result": "syft.service.action.action_object.ActionObject | None", + "status": "", + "user_code_id": "" + }, + "return_type": "", + "warning": "", + "hash": "2bc0ee6f4a40b648d56a7c7aed852984bd0eae15c677368b9b30a56dfbb84446" + }, + "Jobservice.job.delete": { + "public_path": "job.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "3024a04ad5e7b2b37672c057aceeb8f8cd45fc5d57a9927bd2f3ba293f1f57cc" + }, + "Jobservice.job.get": { + "public_path": "job.get", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "2e3110b40381c9ffbdb6db828af1cfd19e15b04f24e4fc63971f05796d6e391c" + }, + "Jobservice.job.get_active": { + "public_path": "job.get_active", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "56c9476b920e66985109c1331231b679d9ab17d38b3b2ea8a1a380d18523650d" + }, + "Jobservice.job.get_all": { + "public_path": "job.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "4bf302170ced81d766aec017b45ac04dc56bd0a059744604664961af53f49101" + }, + "Jobservice.job.get_by_result_id": { + "public_path": "job.get_by_result_id", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "result_id": "" + }, + "return_type": "", + "warning": "", + "hash": "301d4c22b206ff7a6ce8c7b48279140570452e791bd4843344b767f37527e82e" + }, + "Jobservice.job.get_by_user_code_id": { + "public_path": "job.get_by_user_code_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "user_code_id": "" + }, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "e6496d263e8a15e4dac3da7d73d1e326a94332543f5ad5506729b95c3b4896c5" + }, + "Jobservice.job.get_subjobs": { + "public_path": "job.get_subjobs", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "6207a08d00ba36bdf676a08f260f167d99dd5ca8f1d4d417459522d812a4edfa" + }, + "Jobservice.job.kill": { + "public_path": "job.kill", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "id": "" + }, + "return_type": "", + "warning": "", + "hash": "05835718234e2001d7af125baa27e974972c658fc842d92513d7a5a98009655c" + }, + "Jobservice.job.restart": { + "public_path": "job.restart", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "760a1deabe7da438c1d160fa6b76c99fcc5519a3d9677f9b2e26ece02d43fcc0" + }, + "Jobservice.job.update": { + "public_path": "job.update", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "job": "" + }, + "return_type": "", + "warning": "", + "hash": "ae79a1a3994120adc1fa2562e29a97a60d92e327404c22226c0e5134f6a6bd3b" + }, + "Logservice.log.add": { + "public_path": "log.add", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "job_id": "", + "stderr": "", + "stdout": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "5089dc491838e74168c8242f6f7335539dba0692f10f96a58d4c62c362ba917c" + }, + "Logservice.log.append": { + "public_path": "log.append", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "new_err": "", + "new_str": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "23374285e90cc403c4aab4fcfb46929aadfab8f3417ea5439fefdf6db437f3c0" + }, + "Logservice.log.delete": { + "public_path": "log.delete", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "eb09c52c47b1929b7083dfef076a9d423bb9ed60725493b217f98c642e3f4bd2" + }, + "Logservice.log.get": { + "public_path": "log.get", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "4d6d4c378f2774f71a45d4112f917125ec3d2effbf80a002d0dc488a62388728" + }, + "Logservice.log.get_all": { + "public_path": "log.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.log.log.SyftLog]", + "warning": "", + "hash": "60e6c47439d90d9a8c97954081620d3a29bd170f61247b00820da819f8c01ef3" + }, + "Logservice.log.get_stderr": { + "public_path": "log.get_stderr", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "ec451dfe7e0f68159c07a54eef448d94d2394bffda91910bddf2c9398721d3b4" + }, + "Logservice.log.get_stdout": { + "public_path": "log.get_stdout", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "538f3b033fcd84c18b167522106879aa0a78163c8dc181964bac0a5f324c8f96" + }, + "Logservice.log.has_storage_permission": { + "public_path": "log.has_storage_permission", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "18c5c002e607a77e8f26226c4fc7283e75924095c7bc204328288e69dcb1dc52" + }, + "Logservice.log.restart": { + "public_path": "log.restart", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "0d3a97c22b4f3fd40b352dd692f164524aa2c66c40af20fefd9c6d21ff005667" + }, + "Metadataservice.metadata.get_env": { + "public_path": "metadata.get_env", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "07f51344162f4525f0de2352b3ca114ee9f5513fa17a4f03f5bb159fa4fccec6" + }, + "Metadataservice.metadata.get_metadata": { + "public_path": "metadata.get_metadata", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "09378779f1a3d1fef088897d4f9cbed03d769d162b59d5a872ff9c20f451bc28" + }, + "Migrationservice.migration": { + "public_path": "migration", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "canonical_name": "" + }, + "return_type": "", + "warning": "", + "hash": "14594041323aa75d7d16bfa2b263dd209b8b3fbfbf324197b7bf5f905c5b9847" + }, + "Migrationservice.migration._get_object": { + "public_path": "migration._get_object", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "object_type": "", + "uid": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "370048f733d7c8723d3c9ddea11831a914e75d59ce1335f8c8ee03a51f1e3bb1" + }, + "Migrationservice.migration._update_object": { + "public_path": "migration._update_object", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "object": "typing.Any" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "dbe06a09ac0f9abb4a022fe08b58008bd9801ca5178e3ec920214657df99be87" + }, + "Migrationservice.migration.apply_migration_data": { + "public_path": "migration.apply_migration_data", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "migration_data": "" + }, + "return_type": "", + "warning": "", + "hash": "4a1ac9c75d034b1ba6ff182694c9d95dc58c4f71301749d47a9dc35c31e287db" + }, + "Migrationservice.migration.create_migrated_objects": { + "public_path": "migration.create_migrated_objects", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "ignore_existing": "", + "migrated_objects": "list[syft.types.syft_object.SyftObject]" + }, + "return_type": "", + "warning": "", + "hash": "fe410f6d5a17ebcdeb4772b235c38f58d8f1fb6822734ffd871662b19a1fad60" + }, + "Migrationservice.migration.get_all_store_metadata": { + "public_path": "migration.get_all_store_metadata", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "document_store_object_types": "list[type[syft.types.syft_object.SyftObject]] | None", + "include_action_store": "" + }, + "return_type": "dict[type[syft.types.syft_object.SyftObject], syft.service.migration.object_migration_state.StoreMetadata]", + "warning": "", + "hash": "f088367a243684313db56ee8a0988b2d904751b20744b011cf27b73883211a87" + }, + "Migrationservice.migration.get_migration_actionobjects": { + "public_path": "migration.get_migration_actionobjects", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "get_all": "" + }, + "return_type": "", + "warning": "", + "hash": "533151db7bdc4cbdd8a5f2fbdd1a66353319bcfb4298cc35caf6d8828579d296" + }, + "Migrationservice.migration.get_migration_data": { + "public_path": "migration.get_migration_data", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "7c773afbe686d21208eb3ea19ba3d7e6d048d0123dc78e5714428a63b816cd90" + }, + "Migrationservice.migration.migrate_data": { + "public_path": "migration.migrate_data", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "document_store_object_types": "list[type[syft.types.syft_object.SyftObject]] | None" + }, + "return_type": "", + "warning": "", + "hash": "919967fd93ea697e4e57f52ed97f5c31bb15073a92f81320646a4aee3f34f508" + }, + "Migrationservice.migration.reset_and_restore": { + "public_path": "migration.reset_and_restore", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "migration_data": "" + }, + "return_type": "syft.service.response.SyftSuccess | syft.service.response.SyftError", + "warning": "", + "hash": "675fb5f3904cb487354ef73e499c70cb01a67b6583541704538359af13de5ca2" + }, + "Networkservice.network.add_peer": { + "public_path": "network.add_peer", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "challenge": "", + "peer": "", + "self_server_route": "", + "verify_key": "" + }, + "return_type": "syft.service.request.request.Request | syft.service.response.SyftSuccess", + "warning": "", + "hash": "248700efa099c1a57c38feffa9f5ee223e9b51e7368e114294059d8fc8924ade" + }, + "Networkservice.network.add_route": { + "public_path": "network.add_route", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "called_by_peer": "", + "peer_verify_key": "", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "292af4e3d1243d43f1557ade1b24044c74695965debdc04f8aac73a034469b0c" + }, + "Networkservice.network.add_route_on_peer": { + "public_path": "network.add_route_on_peer", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "peer": "", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "5cd7ea5ca401bc18802628b7948e2bf9acdd0b77eaef1cd8229fe6b732e3cf7b" + }, + "Networkservice.network.check_peer_association": { + "public_path": "network.check_peer_association", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "peer_id": "" + }, + "return_type": "", + "warning": "", + "hash": "ea1b082bd78576b63abe1101574ebd7db05c01091ada31bb809de45ff7f8ca33" + }, + "Networkservice.network.delete_peer_by_id": { + "public_path": "network.delete_peer_by_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "088d460ae5d9abe719e60696eebcd951e755d172649980ff20d7718d404ce75f" + }, + "Networkservice.network.delete_route": { + "public_path": "network.delete_route", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "called_by_peer": "", + "peer_verify_key": "", + "route": "syft.service.network.routes.ServerRoute | None" + }, + "return_type": "syft.service.response.SyftSuccess | syft.service.response.SyftInfo", + "warning": "", + "hash": "320b21094fee33b6a8e640581140270e100971bab88f701fc8bfd16b0149379e" + }, + "Networkservice.network.delete_route_on_peer": { + "public_path": "network.delete_route_on_peer", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "peer": "", + "route": "" + }, + "return_type": "syft.service.response.SyftSuccess | syft.service.response.SyftInfo", + "warning": "", + "hash": "296b0b18482160ed54af6a8669c62c0e028c5e7f624fa9f385bfb26f5a0fccea" + }, + "Networkservice.network.exchange_credentials_with": { + "public_path": "network.exchange_credentials_with", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "remote_server_route": "", + "remote_server_verify_key": "", + "reverse_tunnel": "", + "self_server_route": "" + }, + "return_type": "syft.service.request.request.Request | syft.service.response.SyftSuccess", + "warning": { + "name": "CRUDWarning", + "confirmation": true, + "enabled": true + }, + "hash": "3975636f5dc68fb30e7afd1f90d2f00e48df9b8b7c11ff223157e2ebfa11b15d" + }, + "Networkservice.network.get_all_peers": { + "public_path": "network.get_all_peers", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.network.server_peer.ServerPeer]", + "warning": "", + "hash": "ade2724f4f0e1f197ee784d4262622ba86f03df5c3a253f9e913c313ebb91398" + }, + "Networkservice.network.get_peer_by_name": { + "public_path": "network.get_peer_by_name", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "name": "" + }, + "return_type": "", + "warning": "", + "hash": "db2f1198940b75c065993dbc9daa34e9c721bc6a77e7ef3432e52e04c337df4c" + }, + "Networkservice.network.get_peers_by_type": { + "public_path": "network.get_peers_by_type", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "server_type": "" + }, + "return_type": "list[syft.service.network.server_peer.ServerPeer]", + "warning": "", + "hash": "bb3ea79f5c7aadab152336dcf4320c5ac9b1be6b18cfa29e8c35668e25b0c0b1" + }, + "Networkservice.network.ping": { + "public_path": "network.ping", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "challenge": "" + }, + "return_type": "", + "warning": "", + "hash": "40b0675248738145eac95b049f20ff81d0390b7cb6a5076875cac3d108c64c46" + }, + "Networkservice.network.update_peer": { + "public_path": "network.update_peer", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "peer_update": "" + }, + "return_type": "", + "warning": "", + "hash": "2b46754fee83322cc434bad2c081d2ce3fd717eea9e458904ea27a246d80bd1c" + }, + "Networkservice.network.update_route_priority": { + "public_path": "network.update_route_priority", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "called_by_peer": "", + "peer_verify_key": "", + "priority": "int | None", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "ba2fa210de8f17f998ae4d725c5f0f26d2f9f41c9ead790098593977f5df9903" + }, + "Networkservice.network.update_route_priority_on_peer": { + "public_path": "network.update_route_priority_on_peer", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "peer": "", + "priority": "int | None", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "7601b207dc0989ecd0d4fa0081ab7238cbc09f181b81732f499e68fe75b468c2" + }, + "Notificationservice.notifications.activate": { + "public_path": "notifications.activate", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "dd7ee2f01ea20424d16876e200505ae611aaebb1a4741a6ee33ac13528236e9a" + }, + "Notificationservice.notifications.clear": { + "public_path": "notifications.clear", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "5bd81b8705c00214e84925d08b2e6ced1cab811961939d2a6cf0f64bc113aa7c" + }, + "Notificationservice.notifications.deactivate": { + "public_path": "notifications.deactivate", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "de7645699ea1be064486950eb987accdb95a5583847c15f51c23f15ac386adcf" + }, + "Notificationservice.notifications.get_all": { + "public_path": "notifications.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "ac013b589ff061b229cebd2ebb4af8f265cbfd526d5ce0957a056ee1b163b88a" + }, + "Notificationservice.notifications.get_all_read": { + "public_path": "notifications.get_all_read", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "671c5d779c7cbdc8b4bf28f8909f1eee1e8e5dfd2361d82cd5a021c270399056" + }, + "Notificationservice.notifications.get_all_sent": { + "public_path": "notifications.get_all_sent", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "51ad7c8a0d0a880eed4f9ace9ea788cb169b5f499288acc619df8f7f36dd4572" + }, + "Notificationservice.notifications.get_all_unread": { + "public_path": "notifications.get_all_unread", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "ba9cfd8725d4d204edf1dd6a467331b89b68bbe06f4358561724f60fe6658e11" + }, + "Notificationservice.notifications.mark_as_read": { + "public_path": "notifications.mark_as_read", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "9c3db05496d63b05d556a1fb9b4a72e7f85b7101fc0c031a0a5ff64042974e40" + }, + "Notificationservice.notifications.mark_as_unread": { + "public_path": "notifications.mark_as_unread", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "7092e713add3f74cebba68755ff947c3cac2e2d774b39bf438db8d119803aecf" + }, + "Notificationservice.notifications.reply": { + "public_path": "notifications.reply", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "reply": "" + }, + "return_type": "", + "warning": "", + "hash": "6cf926abd11ef14b25afeb8bd292d7232cc10b06c5c4ed856885a9ebb82a82eb" + }, + "Notificationservice.notifications.resolve_object": { + "public_path": "notifications.resolve_object", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "linked_obj": "" + }, + "return_type": "", + "warning": "", + "hash": "4018a9e1d50166f20d1e73fa8448c765eedfa417980883da7cddbb3199569467" + }, + "Notificationservice.notifications.send": { + "public_path": "notifications.send", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "notification": "" + }, + "return_type": "", + "warning": "", + "hash": "cd6d675329c403a45803f36caae26657c18ede175ed28657a561bb1ab48b2d19" + }, + "Notificationservice.notifications.settings": { + "public_path": "notifications.settings", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "8a3250a191cb17af16fa98bef8a30e5edacda83c2445f202b05369c475fa72d0" + }, + "Notificationservice.notifications.user_settings": { + "public_path": "notifications.user_settings", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "06b41cf3485dbb1c638e5f75d20c8c6c023bacb4701247b0e6a92f574ce375f3" + }, + "Outputservice.output.create": { + "public_path": "output.create", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "executing_user_verify_key": "", + "input_ids": "dict[str, syft.types.uid.UID] | None", + "job_id": "syft.types.uid.UID | None", + "output_ids": "syft.types.uid.UID | list[syft.types.uid.UID] | dict[str, syft.types.uid.UID]", + "output_policy_id": "syft.types.uid.UID | None", + "user_code_id": "" + }, + "return_type": "", + "warning": "", + "hash": "dc4ea34317e12bf98edf01c4bc3cf44fb1a297e59c06a72fc26aeaa47929d57a" + }, + "Outputservice.output.get": { + "public_path": "output.get", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "id": "" + }, + "return_type": "", + "warning": "", + "hash": "2986bfb8974ece73066bc347e5b44bcb0d3dd5c7d6116db76bde18f486c0b241" + }, + "Outputservice.output.get_all": { + "public_path": "output.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.output.output_service.ExecutionOutput]", + "warning": "", + "hash": "1522501915d85c7f5f7fa9efc82a40cd5fdb1c10f733af8356be4884e34a89cd" + }, + "Outputservice.output.get_by_job_id": { + "public_path": "output.get_by_job_id", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "job_id": "" + }, + "return_type": "", + "warning": "", + "hash": "d7bc1d31fd46e20b7c294c07f10a1003389a421e50f6cacc2184e06e8a06c9e4" + }, + "Outputservice.output.get_by_output_policy_id": { + "public_path": "output.get_by_output_policy_id", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "output_policy_id": "" + }, + "return_type": "list[syft.service.output.output_service.ExecutionOutput]", + "warning": "", + "hash": "8e2c24f55fcb1c84fb2206bcb81e92ae73e45527d8d59f99a565420bf3a4ca61" + }, + "Outputservice.output.get_by_user_code_id": { + "public_path": "output.get_by_user_code_id", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "user_code_id": "" + }, + "return_type": "list[syft.service.output.output_service.ExecutionOutput]", + "warning": "", + "hash": "a7d95b8196de70c5f6e97fde7f0bf8e7a526b3a1de904205507e64dc8c118b90" + }, + "Outputservice.output.has_output_read_permissions": { + "public_path": "output.has_output_read_permissions", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "user_code_id": "", + "user_verify_key": "" + }, + "return_type": "", + "warning": "", + "hash": "e5f95987d84cb73a11dd6b92992f6fb93476710fad37a9c5d15c4a0d03acefd0" + }, + "Policyservice.policy.add": { + "public_path": "policy.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "policy_code": "syft.service.policy.policy.SubmitUserPolicy | syft.service.policy.policy.UserPolicy" + }, + "return_type": "", + "warning": "", + "hash": "630387c7d8672e922c3ca5c91b094cf472303abc254ec2be8b33a77461e66def" + }, + "Policyservice.policy.get_all": { + "public_path": "policy.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.policy.policy.UserPolicy]", + "warning": "", + "hash": "40467db5c219bf970d4b9a720f4ba9630ea7637c2b6f67abbd4cfcf1422ff455" + }, + "Policyservice.policy.get_by_uid": { + "public_path": "policy.get_by_uid", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "5ad41d5c3493fd66bc7036d58b4b8b22146f17d1db5b090abae3069e70586056" + }, + "Projectservice.project.add_event": { + "public_path": "project.add_event", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "project_event": "" + }, + "return_type": "", + "warning": "", + "hash": "34bcd382b8c043cda5743a1876f9020601267b65e37ea74495cb16d78983876e" + }, + "Projectservice.project.broadcast_event": { + "public_path": "project.broadcast_event", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "project_event": "" + }, + "return_type": "", + "warning": "", + "hash": "aef73e447c92a4c7b6443ffd4d47e4150c317717de70be49540164c5bc02cd9b" + }, + "Projectservice.project.can_create_project": { + "public_path": "project.can_create_project", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "3c106d238a89f35cc4b14e987678e61135afc089a8c53be303859d2d924f0880" + }, + "Projectservice.project.create_project": { + "public_path": "project.create_project", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "project": "" + }, + "return_type": "", + "warning": "", + "hash": "d844d0aa785d9f4414d229467a3f61c2529c85dcddfcc5cf34feb45663692bb9" + }, + "Projectservice.project.get_all": { + "public_path": "project.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.project.project.Project]", + "warning": "", + "hash": "a82c5b56ca9e2ef198e8fad21540d53cd7f32ab10efaf5ebac15e8d5f855c334" + }, + "Projectservice.project.get_by_name": { + "public_path": "project.get_by_name", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "name": "" + }, + "return_type": "", + "warning": "", + "hash": "2c72148209f032bf48c2d225ef49bd555f1647c4e3121692dad4b5d028d81340" + }, + "Projectservice.project.get_by_uid": { + "public_path": "project.get_by_uid", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "ef71eb772a4783d09377a11ce50c6f3a4978aeac5b2afb6d9cfe7febca0efeae" + }, + "Projectservice.project.sync": { + "public_path": "project.sync", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "project_id": "", + "seq_no": "" + }, + "return_type": "list[syft.service.project.project.ProjectEvent]", + "warning": "", + "hash": "ed217bbf1aeb9c262985d36397f314f37a2b05c7f32c0f71d76b1b3045d2d3ba" + }, + "Requestservice.request.add_changes": { + "public_path": "request.add_changes", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "changes": "list[syft.service.request.request.Change]", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "d4d149f5f3742cfd0e022a6032b4e80b742540506b8465a3ab9c7fd85aa750c4" + }, + "Requestservice.request.apply": { + "public_path": "request.apply", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "kwargs": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "dd7dc406eb3061dd5c55f97634cfc94815aed8226dc4ed87cdba43ae1e7a3f76" + }, + "Requestservice.request.delete_by_uid": { + "public_path": "request.delete_by_uid", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "2adfc43d40f40cf00e8a20d4e2b74002c05635b2d5410ff3a361b99ef269bd5f" + }, + "Requestservice.request.filter_all_info": { + "public_path": "request.filter_all_info", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "page_index": "int | None", + "page_size": "int | None", + "request_filter": "" + }, + "return_type": "list[syft.service.request.request.RequestInfo]", + "warning": "", + "hash": "e310c82d8b725a00f7ebb673123b1d6da38a2eb520eca2432cf15ef2e3214398" + }, + "Requestservice.request.get_all": { + "public_path": "request.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "cf90b8a370cd33abc37908bb50c09a4fa3b79fe1e374b2557ab6e753deceec79" + }, + "Requestservice.request.get_all_approved": { + "public_path": "request.get_all_approved", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "3e2a003fdd238554a545497578e1e0786edc1bf5b794c579b0bb0a61fa3f6605" + }, + "Requestservice.request.get_all_info": { + "public_path": "request.get_all_info", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "page_index": "int | None", + "page_size": "int | None" + }, + "return_type": "list[list[syft.service.request.request.RequestInfo]] | list[syft.service.request.request.RequestInfo]", + "warning": "", + "hash": "b8687d0df70b62635798c6bc818cf548b7fb4887ec0938c89e396194d14d100f" + }, + "Requestservice.request.get_all_pending": { + "public_path": "request.get_all_pending", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "a0c16eec47701f9639fac41c1871fb0956e44b84aac76dfb7845bacdfcd132bb" + }, + "Requestservice.request.get_all_rejected": { + "public_path": "request.get_all_rejected", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "59195c8267af462cc61a2e9251ce653288327c34428c43606a5cef6044a76d4f" + }, + "Requestservice.request.get_by_uid": { + "public_path": "request.get_by_uid", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "a949e549fb2cbe37293689809476c62956d8991d323dce97dcfbf1c5cb2394c1" + }, + "Requestservice.request.get_by_usercode_id": { + "public_path": "request.get_by_usercode_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "usercode_id": "" + }, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "929038ac093c8b90cf1d17ac4acbe65903a18efe91295e0b5504238577ead5ea" + }, + "Requestservice.request.set_tags": { + "public_path": "request.set_tags", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "request": "", + "tags": "list[str]" + }, + "return_type": "", + "warning": "", + "hash": "30a233cf90353daa9fb15e6e7d7aaea211a77fca6ac31354eea3fc20a4706e86" + }, + "Requestservice.request.submit": { + "public_path": "request.submit", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "reason": "str | None", + "request": "", + "send_message": "" + }, + "return_type": "", + "warning": "", + "hash": "80c620b37cc1d5ef564d4ec0a9df143584090bf94a7bf1fc0ce92f92180b9dd3" + }, + "Requestservice.request.undo": { + "public_path": "request.undo", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "reason": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "e3ad64b4d02b659c16cda7773176ce7f368ec66543961c6a6c8452080a667fce" + }, + "Settingsservice.settings.allow_association_request_auto_approval": { + "public_path": "settings.allow_association_request_auto_approval", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "enable": "" + }, + "return_type": "", + "warning": "", + "hash": "915181662bbb92b3c6101d8797be56b2d2cf58fe43bce2707f265655536cde21" + }, + "Settingsservice.settings.allow_guest_signup": { + "public_path": "settings.allow_guest_signup", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "enable": "" + }, + "return_type": "", + "warning": { + "name": "HighSideCRUDWarning", + "confirmation": true, + "enabled": true + }, + "hash": "3a8f017c0779b5e56b85b8bf485487fc5ed95ea14bd404c55247fafa69c8c0a1" + }, + "Settingsservice.settings.batch_notifications": { + "public_path": "settings.batch_notifications", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "email_type": "", + "frequency": "", + "start_time": "" + }, + "return_type": "", + "warning": "", + "hash": "c74ca80e0efc8fd75de9e80e93fbe636bfc095708c0987224a44c8f8558d1535" + }, + "Settingsservice.settings.disable_notifications": { + "public_path": "settings.disable_notifications", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "5aab76e9ee5aaef9aa2e5569159625ec79552acedb300512a97d9b8d804eb185" + }, + "Settingsservice.settings.enable_notifications": { + "public_path": "settings.enable_notifications", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "email_password": "str | None", + "email_port": "str | None", + "email_sender": "str | None", + "email_server": "str | None", + "email_username": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "2ed74d0c4a4fe4d45e5bb0156e88b35912e72b799465608425a669b668d57bcb" + }, + "Settingsservice.settings.get": { + "public_path": "settings.get", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "6f690cf4d02b0b5dd378565e04a1fccc48dd595a28f7d7ffd1e10e92572aebfb" + }, + "Settingsservice.settings.get_server_config": { + "public_path": "settings.get_server_config", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "dict[str, typing.Any]", + "warning": "", + "hash": "b25a48efc5a3256ca7441aed9ffdcb347e9db4e80bb5408e0325b577cccebba1" + }, + "Settingsservice.settings.set": { + "public_path": "settings.set", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "settings": "" + }, + "return_type": "", + "warning": "", + "hash": "14e50764b0c3a2baa3a1c6b3678d8686aa63a7fe3eb2ef70e960c8e33ac06aae" + }, + "Settingsservice.settings.set_email_rate_limit": { + "public_path": "settings.set_email_rate_limit", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "daily_limit": "", + "email_type": "" + }, + "return_type": "", + "warning": "", + "hash": "c0eee38460af71b53cd9d9b74c1b49bbee2d2a0c1651c14f07812a6cf52d1a96" + }, + "Settingsservice.settings.set_server_side_type_dangerous": { + "public_path": "settings.set_server_side_type_dangerous", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "server_side_type": "" + }, + "return_type": "", + "warning": "", + "hash": "df16ae6c26ba4d0cf369ee31d486c81081396b477aea544913121475f514d0ae" + }, + "Settingsservice.settings.update": { + "public_path": "settings.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "admin_email": "str | syft.types.syft_metaclass.EmptyType", + "allow_guest_sessions": "bool | syft.types.syft_metaclass.EmptyType", + "association_request_auto_approval": "bool | syft.types.syft_metaclass.EmptyType", + "description": "str | syft.types.syft_metaclass.EmptyType", + "eager_execution_enabled": "bool | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "name": "str | syft.types.syft_metaclass.EmptyType", + "notifications_enabled": "bool | syft.types.syft_metaclass.EmptyType", + "on_board": "bool | syft.types.syft_metaclass.EmptyType", + "organization": "str | syft.types.syft_metaclass.EmptyType", + "pwd_token_config": "syft.service.settings.settings.PwdTokenResetConfig | syft.types.syft_metaclass.EmptyType", + "signup_enabled": "bool | syft.types.syft_metaclass.EmptyType", + "welcome_markdown": "syft.util.misc_objs.HTMLObject | syft.util.misc_objs.MarkdownDescription | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "72b89a87128736c8d058fc8b52673aec3362d3395d909c8f4486da2733c266a2" + }, + "Settingsservice.settings.welcome_customize": { + "public_path": "settings.welcome_customize", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "html": "", + "markdown": "" + }, + "return_type": "", + "warning": "", + "hash": "76b58fcf910ce249ab50eeda43ae572afe0090799ae7093aea35c4cf0ccfcc59" + }, + "Settingsservice.settings.welcome_preview": { + "public_path": "settings.welcome_preview", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "html": "", + "markdown": "" + }, + "return_type": "syft.util.misc_objs.MarkdownDescription | syft.util.misc_objs.HTMLObject", + "warning": "", + "hash": "5a43c8e7b827a694e58201057b9eeee0652c374414956e6051441ded200188c7" + }, + "Settingsservice.settings.welcome_show": { + "public_path": "settings.welcome_show", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "syft.util.misc_objs.HTMLObject | syft.util.misc_objs.MarkdownDescription", + "warning": "", + "hash": "10bce24c8bdf650a18f126f8907d098767a4502a9579afbc18abdf2e97cb06ed" + }, + "Syftimageregistryservice.image_registry.add": { + "public_path": "image_registry.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "url": "" + }, + "return_type": "", + "warning": "", + "hash": "64a4280e61983f88002373c751eaea8929699ece69a394634c682c3f77d6a88a" + }, + "Syftimageregistryservice.image_registry.delete": { + "public_path": "image_registry.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "syft.types.uid.UID | None", + "url": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "1b8d83ba5e1db162e8a1f6b3dc35ef8567590b0d6feedcf0715cb7440b58be18" + }, + "Syftimageregistryservice.image_registry.get_all": { + "public_path": "image_registry.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.worker.image_registry.SyftImageRegistry]", + "warning": "", + "hash": "b26f9e22130c342a4ac03fb91fb3237581532c909dd481b419f2ff26dc4ebb9e" + }, + "Syftimageregistryservice.image_registry.get_by_id": { + "public_path": "image_registry.get_by_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "516c5eacb7f2b0ece6402e40078a7834fa3ed82f5a4b1a607872e4dc972afe75" + }, + "Syftworkerimageservice.worker_image.build": { + "public_path": "worker_image.build", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "force_build": "", + "image_uid": "", + "pull_image": "", + "registry_uid": "syft.types.uid.UID | None", + "tag": "" + }, + "return_type": "", + "warning": "", + "hash": "74a4b94b50c84507019dc90db213599e3ecba320b76eac12f40a0c54a2f20ab7" + }, + "Syftworkerimageservice.worker_image.get_all": { + "public_path": "worker_image.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "syft.types.dicttuple.DictTuple[str, syft.service.worker.worker_image.SyftWorkerImage]", + "warning": "", + "hash": "2fc718a2dc2f739472afe8b59dd9ed9b4610c8ea6fa1fe154fb4957362bad695" + }, + "Syftworkerimageservice.worker_image.get_by_config": { + "public_path": "worker_image.get_by_config", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "worker_config": "" + }, + "return_type": "", + "warning": "", + "hash": "14ed82d9ac2ff31ee2116f59ed5d84e7ef371d5d4610c35b579b817486a29793" + }, + "Syftworkerimageservice.worker_image.get_by_uid": { + "public_path": "worker_image.get_by_uid", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "06965ad2051fb7bd27236f91f407fcac114487e2563aeff38eb6967d20450819" + }, + "Syftworkerimageservice.worker_image.push": { + "public_path": "worker_image.push", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "image_uid": "", + "password": "str | None", + "username": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "31e41d284837c038e63535aa24a7a80821600240d8dd61dcfeea993789c4102d" + }, + "Syftworkerimageservice.worker_image.remove": { + "public_path": "worker_image.remove", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "251c2f98944c6e26e3ac0a034537f99bccdcbf80958c209ec4ab3e1355592d70" + }, + "Syftworkerimageservice.worker_image.submit": { + "public_path": "worker_image.submit", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "worker_config": "" + }, + "return_type": "", + "warning": "", + "hash": "6110f27a7eae93bcf75af03ec2a8b9ce75abc613320c9e7ae7d73c5492f3e047" + }, + "Syftworkerpoolservice.worker_pool.add_workers": { + "public_path": "worker_pool.add_workers", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "number": "", + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None", + "registry_password": "str | None", + "registry_username": "str | None" + }, + "return_type": "list[syft.service.worker.worker_pool.ContainerSpawnStatus]", + "warning": "", + "hash": "06f251d4c0e1396ed12891b8f9f944589fa25ded053543c4f2959f2316a14156" + }, + "Syftworkerpoolservice.worker_pool.create_image_and_pool_request": { + "public_path": "worker_pool.create_image_and_pool_request", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "config": "", + "num_workers": "", + "pod_annotations": "dict[str, str] | None", + "pod_labels": "dict[str, str] | None", + "pool_name": "", + "pull_image": "", + "reason": "str | None", + "registry_uid": "syft.types.uid.UID | None", + "tag": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "c06d361c083a000443e74197dd59a3d9001b18f5078876689dc057f4a2edc44d" + }, + "Syftworkerpoolservice.worker_pool.create_pool_request": { + "public_path": "worker_pool.create_pool_request", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "image_uid": "", + "num_workers": "", + "pod_annotations": "dict[str, str] | None", + "pod_labels": "dict[str, str] | None", + "pool_name": "", + "reason": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "b4817014698bf833808f0f563233605f513c0f22446610a191e52e9f703fe980" + }, + "Syftworkerpoolservice.worker_pool.delete": { + "public_path": "worker_pool.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "bab446b49cf97c5750f38f6c38b49646d1db02e06b0a41fcd82691b46d46ef2e" + }, + "Syftworkerpoolservice.worker_pool.filter_by_image_id": { + "public_path": "worker_pool.filter_by_image_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "image_uid": "" + }, + "return_type": "list[syft.service.worker.worker_pool.WorkerPool]", + "warning": "", + "hash": "888db009c94350a86d9d284549e2d56a4928614419a81ac201af63a8a7c38ff8" + }, + "Syftworkerpoolservice.worker_pool.get_all": { + "public_path": "worker_pool.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "syft.types.dicttuple.DictTuple[str, syft.service.worker.worker_pool.WorkerPool]", + "warning": "", + "hash": "d21402f0343206f9f7b8449a3a55931d90c44d21e8142cb28588dc9fd8b468f5" + }, + "Syftworkerpoolservice.worker_pool.get_by_name": { + "public_path": "worker_pool.get_by_name", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "pool_name": "" + }, + "return_type": "list[syft.service.worker.worker_pool.WorkerPool]", + "warning": "", + "hash": "b7badddcb3f3c127377d649c939e7c0c3ffa4cd753ff7939efbaa706f1f3e82e" + }, + "Syftworkerpoolservice.worker_pool.launch": { + "public_path": "worker_pool.launch", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "image_uid": "syft.types.uid.UID | None", + "num_workers": "", + "pod_annotations": "dict[str, str] | None", + "pod_labels": "dict[str, str] | None", + "pool_name": "", + "registry_password": "str | None", + "registry_username": "str | None" + }, + "return_type": "list[syft.service.worker.worker_pool.ContainerSpawnStatus]", + "warning": "", + "hash": "451ffdf11b26b7d0041f88d523dd9a668507d2a99f5d2b5a38815c0413b9359d" + }, + "Syftworkerpoolservice.worker_pool.purge_workers": { + "public_path": "worker_pool.purge_workers", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "20ed1708fb676dde51cab7a220b5264dc6b864881fd911d5696a4fe860167bfd" + }, + "Syftworkerpoolservice.worker_pool.scale": { + "public_path": "worker_pool.scale", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "number": "", + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "f934aa52a48943f34e6bcb0c782d878a0642a083d45d65ff3df68bf33ccb4043" + }, + "Syftworkerpoolservice.worker_pool.sync_pool_from_request": { + "public_path": "worker_pool.sync_pool_from_request", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "request": "" + }, + "return_type": "", + "warning": "", + "hash": "77f33550b908f73926f203b3eaa71d9394bab3176591113e94af8cb1b0664e80" + }, + "Syncservice.sync._get_state": { + "public_path": "sync._get_state", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "d41943b8063064a2e843a418a6eb5de3c11c626d7d30d65507381f7095eb56ac" + }, + "Syncservice.sync.get_permissions": { + "public_path": "sync.get_permissions", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "items": "list[syft.types.syncable_object.SyncableSyftObject]" + }, + "return_type": "tuple[dict[syft.types.uid.UID, set[str]], dict[syft.types.uid.UID, set[syft.types.uid.UID]]]", + "warning": "", + "hash": "dcd880281d2fa43c3ac75b9ec8644e5ee3a9c0741ed4401f0fa873d63cd7a8b5" + }, + "Syncservice.sync.sync_items": { + "public_path": "sync.sync_items", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "ignored_batches": "dict[syft.types.uid.UID, int]", + "items": "list[syft.types.syncable_object.SyncableSyftObject]", + "permissions": "dict[type, list[syft.service.action.action_permissions.ActionObjectPermission]]", + "storage_permissions": "list[syft.service.action.action_permissions.StoragePermission]", + "unignored_batches": "set[syft.types.uid.UID]" + }, + "return_type": "", + "warning": "", + "hash": "7561cf4ace912bd7235ad03fc706c85fb13623cbab2af0c40596b5b9c1f01582" + }, + "Usercodeservice.code.call": { + "public_path": "code.call", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "kwargs": "typing.Any", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "a7963f36d2a6ae35a3fbe9ba5362cbad9fc4ccadd21d21fe667f3d996f16ba63" + }, + "Usercodeservice.code.delete": { + "public_path": "code.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "143b5c8c2444eb952c11a187a0cc4265eae8cfd854790885a0b0ef586ff34f2e" + }, + "Usercodeservice.code.get_all": { + "public_path": "code.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code.user_code.UserCode]", + "warning": "", + "hash": "0e98e7ab505e5b54e17f41816d7978c424d4d9c5130e78ccceb053d3e4ade501" + }, + "Usercodeservice.code.get_all_for_user": { + "public_path": "code.get_all_for_user", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code.user_code.UserCode]", + "warning": "", + "hash": "96ddc9ca6b46e107fcf2fed249d1266dc31370585bf5fb1a877f2dda6a2f7f0c" + }, + "Usercodeservice.code.get_by_id": { + "public_path": "code.get_by_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "48dcebf5d98a1f5a14a825729ff9bd46e96defc9b262f337db090f2f3a47e21c" + }, + "Usercodeservice.code.get_by_service_func_name": { + "public_path": "code.get_by_service_func_name", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "service_func_name": "" + }, + "return_type": "list[syft.service.code.user_code.UserCode]", + "warning": "", + "hash": "f1dc8817a0dc8f0c1ee80a8a86d38ed75eda76d5f55f47a8836980ca106c21fb" + }, + "Usercodeservice.code.request_code_execution": { + "public_path": "code.request_code_execution", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "code": "syft.service.code.user_code.SubmitUserCode | syft.service.code.user_code.UserCode", + "reason": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "e69c925579c700ab97cb02bcf930d777a726843f58daaae425815da16bbee0a3" + }, + "Usercodeservice.code.store_execution_output": { + "public_path": "code.store_execution_output", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "input_ids": "dict[str, syft.types.uid.UID] | None", + "job_id": "syft.types.uid.UID | None", + "outputs": "typing.Any", + "user_code_id": "" + }, + "return_type": "", + "warning": "", + "hash": "1952677c98c0e59fd4dcfec5d554fdbb533c88fe2a90381590e9194d03233c59" + }, + "Usercodeservice.code.submit": { + "public_path": "code.submit", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "code": "" + }, + "return_type": "", + "warning": "", + "hash": "10ea470830a5c833d84585682d6d602669504773917ff9b3d38ecd02f7bc9c58" + }, + "Usercodeservice.code.update": { + "public_path": "code.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "l0_deny_reason": "str | None | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "d824a955105b9c4febd06536f2b2752973d6b2b25c4b73bc05ed8cad5641f252" + }, + "Usercodestatusservice.code_status.create": { + "public_path": "code_status.create", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "status": "" + }, + "return_type": "", + "warning": "", + "hash": "e7e813f1933e93fad8024cb1a469b71d9223482803e6c31aafe5765b6f3ed578" + }, + "Usercodestatusservice.code_status.get_all": { + "public_path": "code_status.get_all", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code.user_code.UserCodeStatusCollection]", + "warning": "", + "hash": "9feae68b9c5a79873d3573e17b20374ae74f2d11a97227b1d8b3a3d44f8c1d37" + }, + "Usercodestatusservice.code_status.get_by_uid": { + "public_path": "code_status.get_by_uid", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "105347de75f51d1e38b34ed1686e2a6a96aa394c090c831c278e1a3ca8d3116f" + }, + "Usercodestatusservice.code_status.remove": { + "public_path": "code_status.remove", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "38d79b611e9cd5911635072294b325a9a77a5c66f6c8b3007cea6fd6116d0f2a" + }, + "Usercodestatusservice.code_status.update": { + "public_path": "code_status.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "decision": "syft.service.code.user_code.ApprovalDecision | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "960b4f65972823d62ef88e83089d616b4176f8c0a5560e27f623791bfcfebc39" + }, + "Userservice.user.create": { + "public_path": "user.create", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "created_by": "syft.server.credentials.SyftSigningKey | None", + "email": "", + "id": "", + "institution": "str | None", + "mock_execution_permission": "", + "name": "", + "password": "", + "password_verify": "str | None", + "role": "syft.service.user.user_roles.ServiceRole | None", + "verify_key": "syft.server.credentials.SyftVerifyKey | None", + "website": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "562536dd46bf07735ea56135a6a8e547e6f23707673103c86b7887bf39507d42" + }, + "Userservice.user.delete": { + "public_path": "user.delete", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "8b1a55fba9717e5a15206f838c767c20566fdc7a706caef0e1aafe69039fcfed" + }, + "Userservice.user.get_all": { + "public_path": "user.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "order_by": "str | None", + "page_index": "int | None", + "page_size": "int | None", + "sort_order": "str | None" + }, + "return_type": "list[syft.service.user.user.UserView]", + "warning": "", + "hash": "db30734fa2ba8cde141a7c0058572c7371897a63dcced42f0438dc3e80b1a86e" + }, + "Userservice.user.get_by_verify_key": { + "public_path": "user.get_by_verify_key", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "verify_key": "" + }, + "return_type": "", + "warning": "", + "hash": "697555ff4d40a26ef9328c26a68ef63518d3241284f0e5a612ae4741cd9fca8d" + }, + "Userservice.user.get_current_user": { + "public_path": "user.get_current_user", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "08424242302088be04173bfd09309243a7e3511838546af15e1322e4cca10ce0" + }, + "Userservice.user.get_index": { + "public_path": "user.get_index", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "index": "" + }, + "return_type": "", + "warning": "", + "hash": "43f9af96488f20804b6d6d5e3154499edd97e8ddfb0dbb6e0829f36b3e317e1c" + }, + "Userservice.user.request_password_reset": { + "public_path": "user.request_password_reset", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "031ffc151087bc11cbd3809de3a885b6656b232ef766586d14bd0accb81502bb" + }, + "Userservice.user.search": { + "public_path": "user.search", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "email": "pydantic.networks.EmailStr | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "name": "str | syft.types.syft_metaclass.EmptyType", + "page_index": "int | None", + "page_size": "int | None", + "verify_key": "syft.server.credentials.SyftVerifyKey | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "list[syft.service.user.user.UserView]", + "warning": "", + "hash": "3d7f1b56e2b2d1e5580887742fb825f244041846b64de3a72beff02178fc3ac7" + }, + "Userservice.user.update": { + "public_path": "user.update", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "email": "pydantic.networks.EmailStr | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "institution": "str | syft.types.syft_metaclass.EmptyType", + "mock_execution_permission": "bool | syft.types.syft_metaclass.EmptyType", + "name": "str | syft.types.syft_metaclass.EmptyType", + "password": "str | syft.types.syft_metaclass.EmptyType", + "password_verify": "str | syft.types.syft_metaclass.EmptyType", + "role": "syft.service.user.user_roles.ServiceRole | syft.types.syft_metaclass.EmptyType", + "uid": "", + "verify_key": "syft.server.credentials.SyftVerifyKey | syft.types.syft_metaclass.EmptyType", + "website": "str | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "a3be1d1c772bfbafb237d5d7e01c986cdea9cb23374e075e28e6d5c12419c302" + }, + "Userservice.user.view": { + "public_path": "user.view", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "b21973538da6cd08387e3e40eb11d88d31393accd0de76dd7ad6fd51123aad51" + }, + "Workerservice.worker.delete": { + "public_path": "worker.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "force": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "82681e3a435dffb544f131c3fd2b067a0b1a9f9989754da645d5f4bcaf6f0615" + }, + "Workerservice.worker.get": { + "public_path": "worker.get", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "6a5140d7730b39024a81c9a93b4204d1a7cce505b5db2b4eed30292950daf376" + }, + "Workerservice.worker.get_all": { + "public_path": "worker.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.worker.worker_pool.SyftWorker]", + "warning": "", + "hash": "0bfc387f28e4ed5c95a6c2db4cdf89d5778bdc2a8dfa3b57e6767ae764341353" + }, + "Workerservice.worker.logs": { + "public_path": "worker.logs", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "raw": "", + "uid": "" + }, + "return_type": "bytes | str", + "warning": "", + "hash": "dfa9c826fd8269b8f942685555f127ea2a056d60e3f288c7c6d0ed4d680b21c2" + }, + "Workerservice.worker.start_workers": { + "public_path": "worker.start_workers", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "n": "" + }, + "return_type": "list[syft.service.worker.worker_pool.ContainerSpawnStatus]", + "warning": "", + "hash": "2c5937bb99db09b1a6eeca68a877b6120e3d7b776bc36b2b0414d96849e2ea9c" + }, + "Workerservice.worker.status": { + "public_path": "worker.status", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "tuple[syft.service.worker.worker_pool.WorkerStatus, syft.service.worker.worker_pool.WorkerHealth | None]", + "warning": "", + "hash": "b761b2b2b7bb865d6559838aea133398fac8a6c74532e817a220d26e85af2eb3" + } +} diff --git a/packages/syft/src/syft/util/api_snapshot/syft_api_spec_stable.json b/packages/syft/src/syft/util/api_snapshot/syft_api_spec_stable.json new file mode 100644 index 00000000000..583041fcc78 --- /dev/null +++ b/packages/syft/src/syft/util/api_snapshot/syft_api_spec_stable.json @@ -0,0 +1,2371 @@ +{ + "Actionservice.action.delete": { + "public_path": "action.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "soft_delete": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "7188dd0b7944ed26955b97a6a6149586fe144db59970d23f61af190aa54497c7" + }, + "Actionservice.action.execute": { + "public_path": "action.execute", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "action": "" + }, + "return_type": "", + "warning": "", + "hash": "71de9ba14014e015a5724978ffdec210b5b844c88dabded615580334bf9b0ef5" + }, + "Actionservice.action.exists": { + "public_path": "action.exists", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "obj_id": "" + }, + "return_type": "", + "warning": "", + "hash": "2daf41389eef09019c1f114f948ccc47c7094f94484b7ae467b06de224efb36c" + }, + "Actionservice.action.get": { + "public_path": "action.get", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "resolve_nested": "", + "twin_mode": "", + "uid": "" + }, + "return_type": "syft.service.action.action_object.ActionObject | syft.types.twin_object.TwinObject", + "warning": "", + "hash": "661a736ced3c33e3312986f31fb7be79410053708eb64a126565274866ff075d" + }, + "Actionservice.action.get_mock": { + "public_path": "action.get_mock", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "002fc053488b5203bc6133344e4c81507e31c66cf6820e60d0a9c07b82036ffa" + }, + "Actionservice.action.get_pointer": { + "public_path": "action.get_pointer", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "874f3ab136390f7b15755f7b466be42228385d865d43fe80ae0f33b161863d47" + }, + "Actionservice.action.has_storage_permission": { + "public_path": "action.has_storage_permission", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "d66a3462cd2aae6361ed8f92607125c8e0814273a6cabce8c1ec801648040f27" + }, + "Actionservice.action.is_resolved": { + "public_path": "action.is_resolved", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "8e980361137993b3f6a343728186ef54ca83f51dcd88a7bd3519ca603e4f130b" + }, + "Actionservice.action.np_array": { + "public_path": "action.np_array", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "data": "typing.Any" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "f0e75b79b4ce7349430bb1e62677c353e33ddcf998b5bf5c26328f246049fbf5" + }, + "Actionservice.action.set": { + "public_path": "action.set", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "action_object": "syft.service.action.action_object.ActionObject | syft.types.twin_object.TwinObject", + "add_storage_permission": "", + "ignore_detached_objs": "" + }, + "return_type": "", + "warning": "", + "hash": "c7d28af8ba385a5ca2369d00c388a1d02348a0e73c0a3e5b92d0e4da07966e33" + }, + "Apiservice.api.add": { + "public_path": "api.add", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "endpoint": "syft.service.api.api.CreateTwinAPIEndpoint | syft.service.api.api.TwinAPIEndpoint" + }, + "return_type": "", + "warning": "", + "hash": "792b2fb4d7b28b85d25a276872518122b0c487e362a4091fb0a8d9fbf203d71c" + }, + "Apiservice.api.api_endpoints": { + "public_path": "api.api_endpoints", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.api.api.TwinAPIEndpointView]", + "warning": "", + "hash": "a9691c938047b00ede6b417e37e2c4d6a6fce3513ac1ebf3d5d9cad9119bd9ec" + }, + "Apiservice.api.call": { + "public_path": "api.call", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "log_id": "syft.types.uid.UID | None", + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "8b10d896d67b7074d4fcdf64094b56776adc266225b5c3e3d4ba91099c757ba8" + }, + "Apiservice.api.call_in_jobs": { + "public_path": "api.call_in_jobs", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "path": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "d4b8cb0fb3a8427ebf9de1d1dac3b48ef270bc3a33c283e190de6f0fe84f321b" + }, + "Apiservice.api.call_private": { + "public_path": "api.call_private", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "log_id": "syft.types.uid.UID | None", + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "4099aeeb2f1e23fd47843a12defd5a8a54b3706d67b8a26a2d609a8446f97dfb" + }, + "Apiservice.api.call_private_in_jobs": { + "public_path": "api.call_private_in_jobs", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "path": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "15c33f832c617790fd54a7c52334778cacb0c56f8b18bad9c6f4ecea37a1766f" + }, + "Apiservice.api.call_public": { + "public_path": "api.call_public", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "log_id": "syft.types.uid.UID | None", + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "b7da0456e21a375389dc1059e1f95ba37f0e0dab184aa673917f6739be6a8182" + }, + "Apiservice.api.call_public_in_jobs": { + "public_path": "api.call_public_in_jobs", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "args": "typing.Any", + "kwargs": "typing.Any", + "path": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "33730cbb7819450ea8828954cf05bccbabe9811b58909a49b5e00f95efcf5167" + }, + "Apiservice.api.delete": { + "public_path": "api.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "endpoint_path": "" + }, + "return_type": "", + "warning": "", + "hash": "7cbc47eff81bbc9c87e9f784eb45bd23f9d716e6a37475f1ec6322263b636c40" + }, + "Apiservice.api.exists": { + "public_path": "api.exists", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "0a6b260535469d49ff5959783af8ab447addedbdb9acf8df50b95f06c0b3d42e" + }, + "Apiservice.api.get": { + "public_path": "api.get", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "api_path": "" + }, + "return_type": "", + "warning": "", + "hash": "8f37b25ba23d940645d8a5096408c0908dec3b0b43c3aab5230f6773a777b71a" + }, + "Apiservice.api.get_all": { + "public_path": "api.get_all", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.api.api.TwinAPIEndpoint]", + "warning": "", + "hash": "eec93ade9eee93398db1cb855837069d339f5f744f73c3b747de848286cd82c4" + }, + "Apiservice.api.get_private_context": { + "public_path": "api.get_private_context", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "path": "" + }, + "return_type": "dict[str, typing.Any]", + "warning": "", + "hash": "131a77e8e1bf9f93f0aa8dee5ebc90e11d225a34fdd5a6c2a69c41e2e71362e5" + }, + "Apiservice.api.get_public_context": { + "public_path": "api.get_public_context", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "path": "" + }, + "return_type": "dict[str, typing.Any]", + "warning": "", + "hash": "8d190d6d2eaf7fdd5dfc3d217d0ca78f9933d28e8e07182a6f3cf62fbb55b811" + }, + "Apiservice.api.set_settings": { + "public_path": "api.set_settings", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "api_path": "", + "both": "", + "mock": "", + "private": "", + "settings": "" + }, + "return_type": "", + "warning": "", + "hash": "2bb16f38b222529833f12992caef1ef4f43de0e8b9aa7be0d332310daf2f7976" + }, + "Apiservice.api.set_state": { + "public_path": "api.set_state", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "api_path": "", + "both": "", + "mock": "", + "private": "", + "state": "" + }, + "return_type": "", + "warning": "", + "hash": "db2b0966fb2e28b775c47834546d98b33f5e10f511eaf5752f953e3f2bcaeed1" + }, + "Apiservice.api.update": { + "public_path": "api.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "endpoint_path": "", + "endpoint_timeout": "int | None", + "hide_mock_definition": "bool | None", + "mock_function": "syft.service.api.api.Endpoint | None", + "private_function": "syft.service.api.api.Endpoint | None" + }, + "return_type": "", + "warning": "", + "hash": "7d426096b634f4883d16bb05f5fa4ccc6404915dc834f294467baa1ff4994128" + }, + "Apiservice.api.view": { + "public_path": "api.view", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "path": "" + }, + "return_type": "", + "warning": "", + "hash": "572b69c12966396e98f7f37298c6402caf0813acea66dba95ddd2af016b18dcc" + }, + "Attestationservice.attestation.get_cpu_attestation": { + "public_path": "attestation.get_cpu_attestation", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "raw_token": "" + }, + "return_type": "str | syft.service.response.SyftSuccess", + "warning": "", + "hash": "4246dcc86b4dfdbc13b23a6314f1e8591f2775e64dc52bd6fe2e55602e0a0498" + }, + "Attestationservice.attestation.get_gpu_attestation": { + "public_path": "attestation.get_gpu_attestation", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "raw_token": "" + }, + "return_type": "str | syft.service.response.SyftSuccess", + "warning": "", + "hash": "e0124c33246e750a7b22ad3f626a7eef03f28fb2471d9ba29f8f071ce7629032" + }, + "Blobstorageservice.blob_storage.allocate": { + "public_path": "blob_storage.allocate", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "obj": "" + }, + "return_type": "syft.store.blob_storage.on_disk.OnDiskBlobDeposit | syft.store.blob_storage.seaweedfs.SeaweedFSBlobDeposit", + "warning": "", + "hash": "4a65f796fa149a2335d9763a24c8e64539ea9f609b4e42129ca826152ae2678e" + }, + "Blobstorageservice.blob_storage.allocate_for_user": { + "public_path": "blob_storage.allocate_for_user", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "obj": "", + "uploaded_by": "" + }, + "return_type": "syft.store.blob_storage.on_disk.OnDiskBlobDeposit | syft.store.blob_storage.seaweedfs.SeaweedFSBlobDeposit", + "warning": "", + "hash": "6641df95c2b76ead317755b15928d0baf353e86f1aec1feabc95f9f48f6c032e" + }, + "Blobstorageservice.blob_storage.delete": { + "public_path": "blob_storage.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "4dfb5250b5d8b07c7e5580f4906f2641cdd532c1ea8eb9b82f3f3d649422b696" + }, + "Blobstorageservice.blob_storage.get_all": { + "public_path": "blob_storage.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.types.blob_storage.BlobStorageEntry]", + "warning": "", + "hash": "e65c241f16475e2d06aa56695c606f1b8238fbe2529e80f4c537ccba27528f61" + }, + "Blobstorageservice.blob_storage.get_by_uid": { + "public_path": "blob_storage.get_by_uid", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "99b2ae3648407efeee2bb70021534af55d2169fa7942b1116aadb1b58044c475" + }, + "Blobstorageservice.blob_storage.get_files_from_bucket": { + "public_path": "blob_storage.get_files_from_bucket", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "bucket_name": "" + }, + "return_type": "", + "warning": "", + "hash": "3ae6052377fc7224a087bd38b28d5dcf4c60f3408c9604b66d91051d6ffe36b2" + }, + "Blobstorageservice.blob_storage.get_metadata": { + "public_path": "blob_storage.get_metadata", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "793989fe94223e81ebce742640a456eb19543800f145c4a4a4b0058b0558c7b3" + }, + "Blobstorageservice.blob_storage.mark_write_complete": { + "public_path": "blob_storage.mark_write_complete", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "etags": "", + "no_lines": "int | None", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "4fdf01eae7e41b52137b3b248cb1826cbaa3246ddd44f32fffa0e9610184e9ec" + }, + "Blobstorageservice.blob_storage.mount_azure": { + "public_path": "blob_storage.mount_azure", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "account_key": "", + "account_name": "", + "bucket_name": "", + "container_name": "", + "use_direct_connections": "" + }, + "return_type": "", + "warning": "", + "hash": "a4efc044cd27e3d8ef96603c1c8ab008ceca8c0dbaae8bc4b12a3295fbe0540e" + }, + "Blobstorageservice.blob_storage.read": { + "public_path": "blob_storage.read", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "d00c264dd04ca50531390748ce0409cda67d32d3c5e85e3335108dd11ec07c98" + }, + "Blobstorageservice.blob_storage.write_to_disk": { + "public_path": "blob_storage.write_to_disk", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "data": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "8384cd726618f43f38e3705ec7f0625711171e51ca4cb9713232c9b69f4ed231" + }, + "Codehistoryservice.code_history.delete": { + "public_path": "code_history.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "77b1ae2eebcbb7b6df33b5c1cfd2692c1865d4558caafb04be042f3e87f69d27" + }, + "Codehistoryservice.code_history.get": { + "public_path": "code_history.get", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "258594ec0e7e2f7475030024ceabf04bd7741a0f80dfb8fe68c86afd6e02ac96" + }, + "Codehistoryservice.code_history.get_all": { + "public_path": "code_history.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code_history.code_history.CodeHistory]", + "warning": "", + "hash": "7676007066a95921c60339dbb903c8cd9b1419e88baca830e7e87cf739a040a3" + }, + "Codehistoryservice.code_history.get_by_name_and_user_email": { + "public_path": "code_history.get_by_name_and_user_email", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "service_func_name": "", + "user_email": "", + "user_id": "" + }, + "return_type": "list[syft.service.code_history.code_history.CodeHistory]", + "warning": "", + "hash": "fdd6237cbaba33e0a166eaabbfafce3cadef26ac5e811c21520e4d0e3886484d" + }, + "Codehistoryservice.code_history.get_histories": { + "public_path": "code_history.get_histories", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "24ee3c1f7d3ee5ffd283a71f1dbb72782e35fc1f3792b47b27435f3b91aee7f9" + }, + "Codehistoryservice.code_history.get_history": { + "public_path": "code_history.get_history", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "1725971eaeefdf14c09721dc0a523d2122c20d896bfa56e97a2f24ecbf4f1909" + }, + "Codehistoryservice.code_history.get_history_for_user": { + "public_path": "code_history.get_history_for_user", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "email": "" + }, + "return_type": "", + "warning": "", + "hash": "9f9eef0b862420b25bf7e2d41a966f3539f2381afb5b6d28f97e206bf4dfc6ee" + }, + "Codehistoryservice.code_history.submit_version": { + "public_path": "code_history.submit_version", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "code": "syft.service.code.user_code.SubmitUserCode | syft.service.code.user_code.UserCode", + "comment": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "c0ad156201c844651b8ae181eb52e5096884dc1456d12e13ec11e4c590cc12c6" + }, + "Datasetservice.dataset.add": { + "public_path": "dataset.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "dataset": "" + }, + "return_type": "", + "warning": "", + "hash": "b3cb7f311fe96a7e3e7a529ae7d8c8ccc73b119215c94b57f7ed901987bee2f2" + }, + "Datasetservice.dataset.delete": { + "public_path": "dataset.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "delete_assets": "", + "uid": "" + }, + "return_type": "", + "warning": { + "name": "HighSideCRUDWarning", + "confirmation": true, + "enabled": true + }, + "hash": "7925e8e36c29cb4aad184418275f74448cb3768de8c0134190e2cde103a655fb" + }, + "Datasetservice.dataset.get_all": { + "public_path": "dataset.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "page_index": "int | None", + "page_size": "int | None" + }, + "return_type": "syft.service.dataset.dataset.DatasetPageView | syft.types.dicttuple.DictTuple[str, syft.service.dataset.dataset.Dataset]", + "warning": { + "name": "CRUDReminder", + "confirmation": false, + "enabled": true + }, + "hash": "6f7b1f3e8e5b535dc79ce0e6c9346bfc94a39aebaa3d6ce3abbd546f05a34bf5" + }, + "Datasetservice.dataset.get_assets_by_action_id": { + "public_path": "dataset.get_assets_by_action_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "list[syft.service.dataset.dataset.Asset]", + "warning": "", + "hash": "1de4213bb01018864816fdd5fafa9c015311288bcebf959ed2e024d1650d3213" + }, + "Datasetservice.dataset.get_by_action_id": { + "public_path": "dataset.get_by_action_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "list[syft.service.dataset.dataset.Dataset]", + "warning": "", + "hash": "63072d0181c8dfe3efe57c2034e0062e0e6bc86af38f95406ddadb9fe1580ac2" + }, + "Datasetservice.dataset.get_by_id": { + "public_path": "dataset.get_by_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "2fe0717a4fcbc376f5f3ebc9f095f8065a063e7ef4b23234679dbade3176ad43" + }, + "Datasetservice.dataset.search": { + "public_path": "dataset.search", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "name": "", + "page_index": "int | None", + "page_size": "int | None" + }, + "return_type": "syft.service.dataset.dataset.DatasetPageView | syft.types.dicttuple.DictTuple[str, syft.service.dataset.dataset.Dataset]", + "warning": "", + "hash": "3568d899e5b4c3fc13b08d14eb10fc5ed92d3c94989f069941b576290c095a19" + }, + "Datasubjectservice.data_subject.add": { + "public_path": "data_subject.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "data_subject": "" + }, + "return_type": "", + "warning": "", + "hash": "4ffae9b8c21d8dd399b3b89f2f343f9f6ce6cad505bb0466b2d85c7c1ba2756c" + }, + "Datasubjectservice.data_subject.get_all": { + "public_path": "data_subject.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.data_subject.data_subject.DataSubject]", + "warning": "", + "hash": "e2bb85406d13d14b0d61fb64b99347325c7964ab2d841cf10f8f5d3c7ec8ab62" + }, + "Datasubjectservice.data_subject.get_by_name": { + "public_path": "data_subject.get_by_name", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "name": "" + }, + "return_type": "", + "warning": "", + "hash": "667ed49796f3c706375dfbc4421d9f462bd9d8986a254866fc0aaca431921f60" + }, + "Datasubjectservice.data_subject.get_members": { + "public_path": "data_subject.get_members", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "data_subject_name": "" + }, + "return_type": "list[syft.service.data_subject.data_subject.DataSubject]", + "warning": "", + "hash": "4b0701f7b1dd23165f4e7229a06a3f5d9ce31fd69b6bf3e819a9dd23c68dc58f" + }, + "Jobservice.job.add_read_permission_job_for_code_owner": { + "public_path": "job.add_read_permission_job_for_code_owner", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "job": "", + "user_code": "" + }, + "return_type": "None", + "warning": "", + "hash": "cdb6b543910b2464a76f49eeae6640f47369e6c1da05b3ecd0868e3245e4ecfe" + }, + "Jobservice.job.add_read_permission_log_for_code_owner": { + "public_path": "job.add_read_permission_log_for_code_owner", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "log_id": "", + "user_code": "" + }, + "return_type": "None", + "warning": "", + "hash": "64fe924db47a478187578b06799ca543e88d2a93da7bd69ecb35c482f162224e" + }, + "Jobservice.job.create_job_for_user_code_id": { + "public_path": "job.create_job_for_user_code_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "add_code_owner_read_permissions": "", + "log_stderr": "", + "log_stdout": "", + "result": "syft.service.action.action_object.ActionObject | None", + "status": "", + "user_code_id": "" + }, + "return_type": "", + "warning": "", + "hash": "2bc0ee6f4a40b648d56a7c7aed852984bd0eae15c677368b9b30a56dfbb84446" + }, + "Jobservice.job.delete": { + "public_path": "job.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "3024a04ad5e7b2b37672c057aceeb8f8cd45fc5d57a9927bd2f3ba293f1f57cc" + }, + "Jobservice.job.get": { + "public_path": "job.get", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "2e3110b40381c9ffbdb6db828af1cfd19e15b04f24e4fc63971f05796d6e391c" + }, + "Jobservice.job.get_active": { + "public_path": "job.get_active", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "56c9476b920e66985109c1331231b679d9ab17d38b3b2ea8a1a380d18523650d" + }, + "Jobservice.job.get_all": { + "public_path": "job.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "4bf302170ced81d766aec017b45ac04dc56bd0a059744604664961af53f49101" + }, + "Jobservice.job.get_by_result_id": { + "public_path": "job.get_by_result_id", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "result_id": "" + }, + "return_type": "", + "warning": "", + "hash": "301d4c22b206ff7a6ce8c7b48279140570452e791bd4843344b767f37527e82e" + }, + "Jobservice.job.get_by_user_code_id": { + "public_path": "job.get_by_user_code_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "user_code_id": "" + }, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "e6496d263e8a15e4dac3da7d73d1e326a94332543f5ad5506729b95c3b4896c5" + }, + "Jobservice.job.get_subjobs": { + "public_path": "job.get_subjobs", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "list[syft.service.job.job_stash.Job]", + "warning": "", + "hash": "6207a08d00ba36bdf676a08f260f167d99dd5ca8f1d4d417459522d812a4edfa" + }, + "Jobservice.job.kill": { + "public_path": "job.kill", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "id": "" + }, + "return_type": "", + "warning": "", + "hash": "05835718234e2001d7af125baa27e974972c658fc842d92513d7a5a98009655c" + }, + "Jobservice.job.restart": { + "public_path": "job.restart", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "760a1deabe7da438c1d160fa6b76c99fcc5519a3d9677f9b2e26ece02d43fcc0" + }, + "Jobservice.job.update": { + "public_path": "job.update", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "job": "" + }, + "return_type": "", + "warning": "", + "hash": "ae79a1a3994120adc1fa2562e29a97a60d92e327404c22226c0e5134f6a6bd3b" + }, + "Logservice.log.add": { + "public_path": "log.add", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "job_id": "", + "stderr": "", + "stdout": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "5089dc491838e74168c8242f6f7335539dba0692f10f96a58d4c62c362ba917c" + }, + "Logservice.log.append": { + "public_path": "log.append", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "new_err": "", + "new_str": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "23374285e90cc403c4aab4fcfb46929aadfab8f3417ea5439fefdf6db437f3c0" + }, + "Logservice.log.delete": { + "public_path": "log.delete", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "eb09c52c47b1929b7083dfef076a9d423bb9ed60725493b217f98c642e3f4bd2" + }, + "Logservice.log.get": { + "public_path": "log.get", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "4d6d4c378f2774f71a45d4112f917125ec3d2effbf80a002d0dc488a62388728" + }, + "Logservice.log.get_all": { + "public_path": "log.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.log.log.SyftLog]", + "warning": "", + "hash": "60e6c47439d90d9a8c97954081620d3a29bd170f61247b00820da819f8c01ef3" + }, + "Logservice.log.get_stderr": { + "public_path": "log.get_stderr", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "ec451dfe7e0f68159c07a54eef448d94d2394bffda91910bddf2c9398721d3b4" + }, + "Logservice.log.get_stdout": { + "public_path": "log.get_stdout", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "538f3b033fcd84c18b167522106879aa0a78163c8dc181964bac0a5f324c8f96" + }, + "Logservice.log.has_storage_permission": { + "public_path": "log.has_storage_permission", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "18c5c002e607a77e8f26226c4fc7283e75924095c7bc204328288e69dcb1dc52" + }, + "Logservice.log.restart": { + "public_path": "log.restart", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "0d3a97c22b4f3fd40b352dd692f164524aa2c66c40af20fefd9c6d21ff005667" + }, + "Metadataservice.metadata.get_env": { + "public_path": "metadata.get_env", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "07f51344162f4525f0de2352b3ca114ee9f5513fa17a4f03f5bb159fa4fccec6" + }, + "Metadataservice.metadata.get_metadata": { + "public_path": "metadata.get_metadata", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "09378779f1a3d1fef088897d4f9cbed03d769d162b59d5a872ff9c20f451bc28" + }, + "Migrationservice.migration": { + "public_path": "migration", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "canonical_name": "" + }, + "return_type": "", + "warning": "", + "hash": "14594041323aa75d7d16bfa2b263dd209b8b3fbfbf324197b7bf5f905c5b9847" + }, + "Migrationservice.migration._get_object": { + "public_path": "migration._get_object", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "object_type": "", + "uid": "" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "370048f733d7c8723d3c9ddea11831a914e75d59ce1335f8c8ee03a51f1e3bb1" + }, + "Migrationservice.migration._update_object": { + "public_path": "migration._update_object", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "object": "typing.Any" + }, + "return_type": "typing.Any", + "warning": "", + "hash": "dbe06a09ac0f9abb4a022fe08b58008bd9801ca5178e3ec920214657df99be87" + }, + "Migrationservice.migration.apply_migration_data": { + "public_path": "migration.apply_migration_data", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "migration_data": "" + }, + "return_type": "", + "warning": "", + "hash": "4a1ac9c75d034b1ba6ff182694c9d95dc58c4f71301749d47a9dc35c31e287db" + }, + "Migrationservice.migration.create_migrated_objects": { + "public_path": "migration.create_migrated_objects", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "ignore_existing": "", + "migrated_objects": "list[syft.types.syft_object.SyftObject]" + }, + "return_type": "", + "warning": "", + "hash": "fe410f6d5a17ebcdeb4772b235c38f58d8f1fb6822734ffd871662b19a1fad60" + }, + "Migrationservice.migration.get_all_store_metadata": { + "public_path": "migration.get_all_store_metadata", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "document_store_object_types": "list[type[syft.types.syft_object.SyftObject]] | None", + "include_action_store": "" + }, + "return_type": "dict[type[syft.types.syft_object.SyftObject], syft.service.migration.object_migration_state.StoreMetadata]", + "warning": "", + "hash": "f088367a243684313db56ee8a0988b2d904751b20744b011cf27b73883211a87" + }, + "Migrationservice.migration.get_migration_actionobjects": { + "public_path": "migration.get_migration_actionobjects", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "get_all": "" + }, + "return_type": "", + "warning": "", + "hash": "533151db7bdc4cbdd8a5f2fbdd1a66353319bcfb4298cc35caf6d8828579d296" + }, + "Migrationservice.migration.get_migration_data": { + "public_path": "migration.get_migration_data", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "7c773afbe686d21208eb3ea19ba3d7e6d048d0123dc78e5714428a63b816cd90" + }, + "Migrationservice.migration.migrate_data": { + "public_path": "migration.migrate_data", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "document_store_object_types": "list[type[syft.types.syft_object.SyftObject]] | None" + }, + "return_type": "", + "warning": "", + "hash": "919967fd93ea697e4e57f52ed97f5c31bb15073a92f81320646a4aee3f34f508" + }, + "Migrationservice.migration.reset_and_restore": { + "public_path": "migration.reset_and_restore", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "migration_data": "" + }, + "return_type": "syft.service.response.SyftSuccess | syft.service.response.SyftError", + "warning": "", + "hash": "675fb5f3904cb487354ef73e499c70cb01a67b6583541704538359af13de5ca2" + }, + "Networkservice.network.add_peer": { + "public_path": "network.add_peer", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "challenge": "", + "peer": "", + "self_server_route": "", + "verify_key": "" + }, + "return_type": "syft.service.request.request.Request | syft.service.response.SyftSuccess", + "warning": "", + "hash": "248700efa099c1a57c38feffa9f5ee223e9b51e7368e114294059d8fc8924ade" + }, + "Networkservice.network.add_route": { + "public_path": "network.add_route", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "called_by_peer": "", + "peer_verify_key": "", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "292af4e3d1243d43f1557ade1b24044c74695965debdc04f8aac73a034469b0c" + }, + "Networkservice.network.add_route_on_peer": { + "public_path": "network.add_route_on_peer", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "peer": "", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "5cd7ea5ca401bc18802628b7948e2bf9acdd0b77eaef1cd8229fe6b732e3cf7b" + }, + "Networkservice.network.check_peer_association": { + "public_path": "network.check_peer_association", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "peer_id": "" + }, + "return_type": "", + "warning": "", + "hash": "ea1b082bd78576b63abe1101574ebd7db05c01091ada31bb809de45ff7f8ca33" + }, + "Networkservice.network.delete_peer_by_id": { + "public_path": "network.delete_peer_by_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "088d460ae5d9abe719e60696eebcd951e755d172649980ff20d7718d404ce75f" + }, + "Networkservice.network.delete_route": { + "public_path": "network.delete_route", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "called_by_peer": "", + "peer_verify_key": "", + "route": "syft.service.network.routes.ServerRoute | None" + }, + "return_type": "syft.service.response.SyftSuccess | syft.service.response.SyftInfo", + "warning": "", + "hash": "320b21094fee33b6a8e640581140270e100971bab88f701fc8bfd16b0149379e" + }, + "Networkservice.network.delete_route_on_peer": { + "public_path": "network.delete_route_on_peer", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "peer": "", + "route": "" + }, + "return_type": "syft.service.response.SyftSuccess | syft.service.response.SyftInfo", + "warning": "", + "hash": "296b0b18482160ed54af6a8669c62c0e028c5e7f624fa9f385bfb26f5a0fccea" + }, + "Networkservice.network.exchange_credentials_with": { + "public_path": "network.exchange_credentials_with", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "remote_server_route": "", + "remote_server_verify_key": "", + "reverse_tunnel": "", + "self_server_route": "" + }, + "return_type": "syft.service.request.request.Request | syft.service.response.SyftSuccess", + "warning": { + "name": "CRUDWarning", + "confirmation": true, + "enabled": true + }, + "hash": "3975636f5dc68fb30e7afd1f90d2f00e48df9b8b7c11ff223157e2ebfa11b15d" + }, + "Networkservice.network.get_all_peers": { + "public_path": "network.get_all_peers", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.network.server_peer.ServerPeer]", + "warning": "", + "hash": "ade2724f4f0e1f197ee784d4262622ba86f03df5c3a253f9e913c313ebb91398" + }, + "Networkservice.network.get_peer_by_name": { + "public_path": "network.get_peer_by_name", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "name": "" + }, + "return_type": "", + "warning": "", + "hash": "db2f1198940b75c065993dbc9daa34e9c721bc6a77e7ef3432e52e04c337df4c" + }, + "Networkservice.network.get_peers_by_type": { + "public_path": "network.get_peers_by_type", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "server_type": "" + }, + "return_type": "list[syft.service.network.server_peer.ServerPeer]", + "warning": "", + "hash": "bb3ea79f5c7aadab152336dcf4320c5ac9b1be6b18cfa29e8c35668e25b0c0b1" + }, + "Networkservice.network.ping": { + "public_path": "network.ping", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "challenge": "" + }, + "return_type": "", + "warning": "", + "hash": "40b0675248738145eac95b049f20ff81d0390b7cb6a5076875cac3d108c64c46" + }, + "Networkservice.network.update_peer": { + "public_path": "network.update_peer", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "peer_update": "" + }, + "return_type": "", + "warning": "", + "hash": "2b46754fee83322cc434bad2c081d2ce3fd717eea9e458904ea27a246d80bd1c" + }, + "Networkservice.network.update_route_priority": { + "public_path": "network.update_route_priority", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "called_by_peer": "", + "peer_verify_key": "", + "priority": "int | None", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "ba2fa210de8f17f998ae4d725c5f0f26d2f9f41c9ead790098593977f5df9903" + }, + "Networkservice.network.update_route_priority_on_peer": { + "public_path": "network.update_route_priority_on_peer", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "peer": "", + "priority": "int | None", + "route": "" + }, + "return_type": "", + "warning": "", + "hash": "7601b207dc0989ecd0d4fa0081ab7238cbc09f181b81732f499e68fe75b468c2" + }, + "Notificationservice.notifications.activate": { + "public_path": "notifications.activate", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "dd7ee2f01ea20424d16876e200505ae611aaebb1a4741a6ee33ac13528236e9a" + }, + "Notificationservice.notifications.clear": { + "public_path": "notifications.clear", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "5bd81b8705c00214e84925d08b2e6ced1cab811961939d2a6cf0f64bc113aa7c" + }, + "Notificationservice.notifications.deactivate": { + "public_path": "notifications.deactivate", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "de7645699ea1be064486950eb987accdb95a5583847c15f51c23f15ac386adcf" + }, + "Notificationservice.notifications.get_all": { + "public_path": "notifications.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "ac013b589ff061b229cebd2ebb4af8f265cbfd526d5ce0957a056ee1b163b88a" + }, + "Notificationservice.notifications.get_all_read": { + "public_path": "notifications.get_all_read", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "671c5d779c7cbdc8b4bf28f8909f1eee1e8e5dfd2361d82cd5a021c270399056" + }, + "Notificationservice.notifications.get_all_sent": { + "public_path": "notifications.get_all_sent", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "51ad7c8a0d0a880eed4f9ace9ea788cb169b5f499288acc619df8f7f36dd4572" + }, + "Notificationservice.notifications.get_all_unread": { + "public_path": "notifications.get_all_unread", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.notification.notifications.Notification]", + "warning": "", + "hash": "ba9cfd8725d4d204edf1dd6a467331b89b68bbe06f4358561724f60fe6658e11" + }, + "Notificationservice.notifications.mark_as_read": { + "public_path": "notifications.mark_as_read", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "9c3db05496d63b05d556a1fb9b4a72e7f85b7101fc0c031a0a5ff64042974e40" + }, + "Notificationservice.notifications.mark_as_unread": { + "public_path": "notifications.mark_as_unread", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "7092e713add3f74cebba68755ff947c3cac2e2d774b39bf438db8d119803aecf" + }, + "Notificationservice.notifications.reply": { + "public_path": "notifications.reply", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "reply": "" + }, + "return_type": "", + "warning": "", + "hash": "6cf926abd11ef14b25afeb8bd292d7232cc10b06c5c4ed856885a9ebb82a82eb" + }, + "Notificationservice.notifications.resolve_object": { + "public_path": "notifications.resolve_object", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "linked_obj": "" + }, + "return_type": "", + "warning": "", + "hash": "4018a9e1d50166f20d1e73fa8448c765eedfa417980883da7cddbb3199569467" + }, + "Notificationservice.notifications.send": { + "public_path": "notifications.send", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "notification": "" + }, + "return_type": "", + "warning": "", + "hash": "cd6d675329c403a45803f36caae26657c18ede175ed28657a561bb1ab48b2d19" + }, + "Notificationservice.notifications.settings": { + "public_path": "notifications.settings", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "8a3250a191cb17af16fa98bef8a30e5edacda83c2445f202b05369c475fa72d0" + }, + "Notificationservice.notifications.user_settings": { + "public_path": "notifications.user_settings", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "06b41cf3485dbb1c638e5f75d20c8c6c023bacb4701247b0e6a92f574ce375f3" + }, + "Outputservice.output.create": { + "public_path": "output.create", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "executing_user_verify_key": "", + "input_ids": "dict[str, syft.types.uid.UID] | None", + "job_id": "syft.types.uid.UID | None", + "output_ids": "syft.types.uid.UID | list[syft.types.uid.UID] | dict[str, syft.types.uid.UID]", + "output_policy_id": "syft.types.uid.UID | None", + "user_code_id": "" + }, + "return_type": "", + "warning": "", + "hash": "dc4ea34317e12bf98edf01c4bc3cf44fb1a297e59c06a72fc26aeaa47929d57a" + }, + "Outputservice.output.get": { + "public_path": "output.get", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "id": "" + }, + "return_type": "", + "warning": "", + "hash": "2986bfb8974ece73066bc347e5b44bcb0d3dd5c7d6116db76bde18f486c0b241" + }, + "Outputservice.output.get_all": { + "public_path": "output.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.output.output_service.ExecutionOutput]", + "warning": "", + "hash": "1522501915d85c7f5f7fa9efc82a40cd5fdb1c10f733af8356be4884e34a89cd" + }, + "Outputservice.output.get_by_job_id": { + "public_path": "output.get_by_job_id", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "job_id": "" + }, + "return_type": "", + "warning": "", + "hash": "d7bc1d31fd46e20b7c294c07f10a1003389a421e50f6cacc2184e06e8a06c9e4" + }, + "Outputservice.output.get_by_output_policy_id": { + "public_path": "output.get_by_output_policy_id", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "output_policy_id": "" + }, + "return_type": "list[syft.service.output.output_service.ExecutionOutput]", + "warning": "", + "hash": "8e2c24f55fcb1c84fb2206bcb81e92ae73e45527d8d59f99a565420bf3a4ca61" + }, + "Outputservice.output.get_by_user_code_id": { + "public_path": "output.get_by_user_code_id", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "user_code_id": "" + }, + "return_type": "list[syft.service.output.output_service.ExecutionOutput]", + "warning": "", + "hash": "a7d95b8196de70c5f6e97fde7f0bf8e7a526b3a1de904205507e64dc8c118b90" + }, + "Outputservice.output.has_output_read_permissions": { + "public_path": "output.has_output_read_permissions", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "user_code_id": "", + "user_verify_key": "" + }, + "return_type": "", + "warning": "", + "hash": "e5f95987d84cb73a11dd6b92992f6fb93476710fad37a9c5d15c4a0d03acefd0" + }, + "Policyservice.policy.add": { + "public_path": "policy.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "policy_code": "syft.service.policy.policy.SubmitUserPolicy | syft.service.policy.policy.UserPolicy" + }, + "return_type": "", + "warning": "", + "hash": "630387c7d8672e922c3ca5c91b094cf472303abc254ec2be8b33a77461e66def" + }, + "Policyservice.policy.get_all": { + "public_path": "policy.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.policy.policy.UserPolicy]", + "warning": "", + "hash": "40467db5c219bf970d4b9a720f4ba9630ea7637c2b6f67abbd4cfcf1422ff455" + }, + "Policyservice.policy.get_by_uid": { + "public_path": "policy.get_by_uid", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "5ad41d5c3493fd66bc7036d58b4b8b22146f17d1db5b090abae3069e70586056" + }, + "Projectservice.project.add_event": { + "public_path": "project.add_event", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "project_event": "" + }, + "return_type": "", + "warning": "", + "hash": "34bcd382b8c043cda5743a1876f9020601267b65e37ea74495cb16d78983876e" + }, + "Projectservice.project.broadcast_event": { + "public_path": "project.broadcast_event", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "project_event": "" + }, + "return_type": "", + "warning": "", + "hash": "aef73e447c92a4c7b6443ffd4d47e4150c317717de70be49540164c5bc02cd9b" + }, + "Projectservice.project.can_create_project": { + "public_path": "project.can_create_project", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "3c106d238a89f35cc4b14e987678e61135afc089a8c53be303859d2d924f0880" + }, + "Projectservice.project.create_project": { + "public_path": "project.create_project", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "project": "" + }, + "return_type": "", + "warning": "", + "hash": "d844d0aa785d9f4414d229467a3f61c2529c85dcddfcc5cf34feb45663692bb9" + }, + "Projectservice.project.get_all": { + "public_path": "project.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.project.project.Project]", + "warning": "", + "hash": "a82c5b56ca9e2ef198e8fad21540d53cd7f32ab10efaf5ebac15e8d5f855c334" + }, + "Projectservice.project.get_by_name": { + "public_path": "project.get_by_name", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "name": "" + }, + "return_type": "", + "warning": "", + "hash": "2c72148209f032bf48c2d225ef49bd555f1647c4e3121692dad4b5d028d81340" + }, + "Projectservice.project.get_by_uid": { + "public_path": "project.get_by_uid", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "ef71eb772a4783d09377a11ce50c6f3a4978aeac5b2afb6d9cfe7febca0efeae" + }, + "Projectservice.project.sync": { + "public_path": "project.sync", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "project_id": "", + "seq_no": "" + }, + "return_type": "list[syft.service.project.project.ProjectEvent]", + "warning": "", + "hash": "ed217bbf1aeb9c262985d36397f314f37a2b05c7f32c0f71d76b1b3045d2d3ba" + }, + "Requestservice.request.add_changes": { + "public_path": "request.add_changes", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "changes": "list[syft.service.request.request.Change]", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "d4d149f5f3742cfd0e022a6032b4e80b742540506b8465a3ab9c7fd85aa750c4" + }, + "Requestservice.request.apply": { + "public_path": "request.apply", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "kwargs": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "dd7dc406eb3061dd5c55f97634cfc94815aed8226dc4ed87cdba43ae1e7a3f76" + }, + "Requestservice.request.delete_by_uid": { + "public_path": "request.delete_by_uid", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "2adfc43d40f40cf00e8a20d4e2b74002c05635b2d5410ff3a361b99ef269bd5f" + }, + "Requestservice.request.filter_all_info": { + "public_path": "request.filter_all_info", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "page_index": "int | None", + "page_size": "int | None", + "request_filter": "" + }, + "return_type": "list[syft.service.request.request.RequestInfo]", + "warning": "", + "hash": "e310c82d8b725a00f7ebb673123b1d6da38a2eb520eca2432cf15ef2e3214398" + }, + "Requestservice.request.get_all": { + "public_path": "request.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "cf90b8a370cd33abc37908bb50c09a4fa3b79fe1e374b2557ab6e753deceec79" + }, + "Requestservice.request.get_all_approved": { + "public_path": "request.get_all_approved", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "3e2a003fdd238554a545497578e1e0786edc1bf5b794c579b0bb0a61fa3f6605" + }, + "Requestservice.request.get_all_info": { + "public_path": "request.get_all_info", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "page_index": "int | None", + "page_size": "int | None" + }, + "return_type": "list[list[syft.service.request.request.RequestInfo]] | list[syft.service.request.request.RequestInfo]", + "warning": "", + "hash": "b8687d0df70b62635798c6bc818cf548b7fb4887ec0938c89e396194d14d100f" + }, + "Requestservice.request.get_all_pending": { + "public_path": "request.get_all_pending", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "a0c16eec47701f9639fac41c1871fb0956e44b84aac76dfb7845bacdfcd132bb" + }, + "Requestservice.request.get_all_rejected": { + "public_path": "request.get_all_rejected", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "59195c8267af462cc61a2e9251ce653288327c34428c43606a5cef6044a76d4f" + }, + "Requestservice.request.get_by_uid": { + "public_path": "request.get_by_uid", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "a949e549fb2cbe37293689809476c62956d8991d323dce97dcfbf1c5cb2394c1" + }, + "Requestservice.request.get_by_usercode_id": { + "public_path": "request.get_by_usercode_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "usercode_id": "" + }, + "return_type": "list[syft.service.request.request.Request]", + "warning": "", + "hash": "929038ac093c8b90cf1d17ac4acbe65903a18efe91295e0b5504238577ead5ea" + }, + "Requestservice.request.set_tags": { + "public_path": "request.set_tags", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "request": "", + "tags": "list[str]" + }, + "return_type": "", + "warning": "", + "hash": "30a233cf90353daa9fb15e6e7d7aaea211a77fca6ac31354eea3fc20a4706e86" + }, + "Requestservice.request.submit": { + "public_path": "request.submit", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "reason": "str | None", + "request": "", + "send_message": "" + }, + "return_type": "", + "warning": "", + "hash": "80c620b37cc1d5ef564d4ec0a9df143584090bf94a7bf1fc0ce92f92180b9dd3" + }, + "Requestservice.request.undo": { + "public_path": "request.undo", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "reason": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "e3ad64b4d02b659c16cda7773176ce7f368ec66543961c6a6c8452080a667fce" + }, + "Settingsservice.settings.allow_association_request_auto_approval": { + "public_path": "settings.allow_association_request_auto_approval", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "enable": "" + }, + "return_type": "", + "warning": "", + "hash": "915181662bbb92b3c6101d8797be56b2d2cf58fe43bce2707f265655536cde21" + }, + "Settingsservice.settings.allow_guest_signup": { + "public_path": "settings.allow_guest_signup", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "enable": "" + }, + "return_type": "", + "warning": { + "name": "HighSideCRUDWarning", + "confirmation": true, + "enabled": true + }, + "hash": "3a8f017c0779b5e56b85b8bf485487fc5ed95ea14bd404c55247fafa69c8c0a1" + }, + "Settingsservice.settings.batch_notifications": { + "public_path": "settings.batch_notifications", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "email_type": "", + "frequency": "", + "start_time": "" + }, + "return_type": "", + "warning": "", + "hash": "c74ca80e0efc8fd75de9e80e93fbe636bfc095708c0987224a44c8f8558d1535" + }, + "Settingsservice.settings.disable_notifications": { + "public_path": "settings.disable_notifications", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "5aab76e9ee5aaef9aa2e5569159625ec79552acedb300512a97d9b8d804eb185" + }, + "Settingsservice.settings.enable_notifications": { + "public_path": "settings.enable_notifications", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "email_password": "str | None", + "email_port": "str | None", + "email_sender": "str | None", + "email_server": "str | None", + "email_username": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "2ed74d0c4a4fe4d45e5bb0156e88b35912e72b799465608425a669b668d57bcb" + }, + "Settingsservice.settings.get": { + "public_path": "settings.get", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "6f690cf4d02b0b5dd378565e04a1fccc48dd595a28f7d7ffd1e10e92572aebfb" + }, + "Settingsservice.settings.get_server_config": { + "public_path": "settings.get_server_config", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "dict[str, typing.Any]", + "warning": "", + "hash": "b25a48efc5a3256ca7441aed9ffdcb347e9db4e80bb5408e0325b577cccebba1" + }, + "Settingsservice.settings.set": { + "public_path": "settings.set", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "settings": "" + }, + "return_type": "", + "warning": "", + "hash": "14e50764b0c3a2baa3a1c6b3678d8686aa63a7fe3eb2ef70e960c8e33ac06aae" + }, + "Settingsservice.settings.set_email_rate_limit": { + "public_path": "settings.set_email_rate_limit", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "daily_limit": "", + "email_type": "" + }, + "return_type": "", + "warning": "", + "hash": "c0eee38460af71b53cd9d9b74c1b49bbee2d2a0c1651c14f07812a6cf52d1a96" + }, + "Settingsservice.settings.set_server_side_type_dangerous": { + "public_path": "settings.set_server_side_type_dangerous", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "server_side_type": "" + }, + "return_type": "", + "warning": "", + "hash": "df16ae6c26ba4d0cf369ee31d486c81081396b477aea544913121475f514d0ae" + }, + "Settingsservice.settings.update": { + "public_path": "settings.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "admin_email": "str | syft.types.syft_metaclass.EmptyType", + "allow_guest_sessions": "bool | syft.types.syft_metaclass.EmptyType", + "association_request_auto_approval": "bool | syft.types.syft_metaclass.EmptyType", + "description": "str | syft.types.syft_metaclass.EmptyType", + "eager_execution_enabled": "bool | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "name": "str | syft.types.syft_metaclass.EmptyType", + "notifications_enabled": "bool | syft.types.syft_metaclass.EmptyType", + "on_board": "bool | syft.types.syft_metaclass.EmptyType", + "organization": "str | syft.types.syft_metaclass.EmptyType", + "pwd_token_config": "syft.service.settings.settings.PwdTokenResetConfig | syft.types.syft_metaclass.EmptyType", + "signup_enabled": "bool | syft.types.syft_metaclass.EmptyType", + "welcome_markdown": "syft.util.misc_objs.HTMLObject | syft.util.misc_objs.MarkdownDescription | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "72b89a87128736c8d058fc8b52673aec3362d3395d909c8f4486da2733c266a2" + }, + "Settingsservice.settings.welcome_customize": { + "public_path": "settings.welcome_customize", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "html": "", + "markdown": "" + }, + "return_type": "", + "warning": "", + "hash": "76b58fcf910ce249ab50eeda43ae572afe0090799ae7093aea35c4cf0ccfcc59" + }, + "Settingsservice.settings.welcome_preview": { + "public_path": "settings.welcome_preview", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "html": "", + "markdown": "" + }, + "return_type": "syft.util.misc_objs.MarkdownDescription | syft.util.misc_objs.HTMLObject", + "warning": "", + "hash": "5a43c8e7b827a694e58201057b9eeee0652c374414956e6051441ded200188c7" + }, + "Settingsservice.settings.welcome_show": { + "public_path": "settings.welcome_show", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "syft.util.misc_objs.HTMLObject | syft.util.misc_objs.MarkdownDescription", + "warning": "", + "hash": "10bce24c8bdf650a18f126f8907d098767a4502a9579afbc18abdf2e97cb06ed" + }, + "Syftimageregistryservice.image_registry.add": { + "public_path": "image_registry.add", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "url": "" + }, + "return_type": "", + "warning": "", + "hash": "64a4280e61983f88002373c751eaea8929699ece69a394634c682c3f77d6a88a" + }, + "Syftimageregistryservice.image_registry.delete": { + "public_path": "image_registry.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "syft.types.uid.UID | None", + "url": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "1b8d83ba5e1db162e8a1f6b3dc35ef8567590b0d6feedcf0715cb7440b58be18" + }, + "Syftimageregistryservice.image_registry.get_all": { + "public_path": "image_registry.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.worker.image_registry.SyftImageRegistry]", + "warning": "", + "hash": "b26f9e22130c342a4ac03fb91fb3237581532c909dd481b419f2ff26dc4ebb9e" + }, + "Syftimageregistryservice.image_registry.get_by_id": { + "public_path": "image_registry.get_by_id", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "516c5eacb7f2b0ece6402e40078a7834fa3ed82f5a4b1a607872e4dc972afe75" + }, + "Syftworkerimageservice.worker_image.build": { + "public_path": "worker_image.build", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "force_build": "", + "image_uid": "", + "pull_image": "", + "registry_uid": "syft.types.uid.UID | None", + "tag": "" + }, + "return_type": "", + "warning": "", + "hash": "74a4b94b50c84507019dc90db213599e3ecba320b76eac12f40a0c54a2f20ab7" + }, + "Syftworkerimageservice.worker_image.get_all": { + "public_path": "worker_image.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "syft.types.dicttuple.DictTuple[str, syft.service.worker.worker_image.SyftWorkerImage]", + "warning": "", + "hash": "2fc718a2dc2f739472afe8b59dd9ed9b4610c8ea6fa1fe154fb4957362bad695" + }, + "Syftworkerimageservice.worker_image.get_by_config": { + "public_path": "worker_image.get_by_config", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "worker_config": "" + }, + "return_type": "", + "warning": "", + "hash": "14ed82d9ac2ff31ee2116f59ed5d84e7ef371d5d4610c35b579b817486a29793" + }, + "Syftworkerimageservice.worker_image.get_by_uid": { + "public_path": "worker_image.get_by_uid", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "06965ad2051fb7bd27236f91f407fcac114487e2563aeff38eb6967d20450819" + }, + "Syftworkerimageservice.worker_image.push": { + "public_path": "worker_image.push", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "image_uid": "", + "password": "str | None", + "username": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "31e41d284837c038e63535aa24a7a80821600240d8dd61dcfeea993789c4102d" + }, + "Syftworkerimageservice.worker_image.remove": { + "public_path": "worker_image.remove", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "251c2f98944c6e26e3ac0a034537f99bccdcbf80958c209ec4ab3e1355592d70" + }, + "Syftworkerimageservice.worker_image.submit": { + "public_path": "worker_image.submit", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "worker_config": "" + }, + "return_type": "", + "warning": "", + "hash": "6110f27a7eae93bcf75af03ec2a8b9ce75abc613320c9e7ae7d73c5492f3e047" + }, + "Syftworkerpoolservice.worker_pool.add_workers": { + "public_path": "worker_pool.add_workers", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "number": "", + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None", + "registry_password": "str | None", + "registry_username": "str | None" + }, + "return_type": "list[syft.service.worker.worker_pool.ContainerSpawnStatus]", + "warning": "", + "hash": "06f251d4c0e1396ed12891b8f9f944589fa25ded053543c4f2959f2316a14156" + }, + "Syftworkerpoolservice.worker_pool.create_image_and_pool_request": { + "public_path": "worker_pool.create_image_and_pool_request", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "config": "", + "num_workers": "", + "pod_annotations": "dict[str, str] | None", + "pod_labels": "dict[str, str] | None", + "pool_name": "", + "pull_image": "", + "reason": "str | None", + "registry_uid": "syft.types.uid.UID | None", + "tag": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "c06d361c083a000443e74197dd59a3d9001b18f5078876689dc057f4a2edc44d" + }, + "Syftworkerpoolservice.worker_pool.create_pool_request": { + "public_path": "worker_pool.create_pool_request", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "image_uid": "", + "num_workers": "", + "pod_annotations": "dict[str, str] | None", + "pod_labels": "dict[str, str] | None", + "pool_name": "", + "reason": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "b4817014698bf833808f0f563233605f513c0f22446610a191e52e9f703fe980" + }, + "Syftworkerpoolservice.worker_pool.delete": { + "public_path": "worker_pool.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "bab446b49cf97c5750f38f6c38b49646d1db02e06b0a41fcd82691b46d46ef2e" + }, + "Syftworkerpoolservice.worker_pool.filter_by_image_id": { + "public_path": "worker_pool.filter_by_image_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "image_uid": "" + }, + "return_type": "list[syft.service.worker.worker_pool.WorkerPool]", + "warning": "", + "hash": "888db009c94350a86d9d284549e2d56a4928614419a81ac201af63a8a7c38ff8" + }, + "Syftworkerpoolservice.worker_pool.get_all": { + "public_path": "worker_pool.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "syft.types.dicttuple.DictTuple[str, syft.service.worker.worker_pool.WorkerPool]", + "warning": "", + "hash": "d21402f0343206f9f7b8449a3a55931d90c44d21e8142cb28588dc9fd8b468f5" + }, + "Syftworkerpoolservice.worker_pool.get_by_name": { + "public_path": "worker_pool.get_by_name", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "pool_name": "" + }, + "return_type": "list[syft.service.worker.worker_pool.WorkerPool]", + "warning": "", + "hash": "b7badddcb3f3c127377d649c939e7c0c3ffa4cd753ff7939efbaa706f1f3e82e" + }, + "Syftworkerpoolservice.worker_pool.launch": { + "public_path": "worker_pool.launch", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "image_uid": "syft.types.uid.UID | None", + "num_workers": "", + "pod_annotations": "dict[str, str] | None", + "pod_labels": "dict[str, str] | None", + "pool_name": "", + "registry_password": "str | None", + "registry_username": "str | None" + }, + "return_type": "list[syft.service.worker.worker_pool.ContainerSpawnStatus]", + "warning": "", + "hash": "451ffdf11b26b7d0041f88d523dd9a668507d2a99f5d2b5a38815c0413b9359d" + }, + "Syftworkerpoolservice.worker_pool.purge_workers": { + "public_path": "worker_pool.purge_workers", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "20ed1708fb676dde51cab7a220b5264dc6b864881fd911d5696a4fe860167bfd" + }, + "Syftworkerpoolservice.worker_pool.scale": { + "public_path": "worker_pool.scale", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "number": "", + "pool_id": "syft.types.uid.UID | None", + "pool_name": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "f934aa52a48943f34e6bcb0c782d878a0642a083d45d65ff3df68bf33ccb4043" + }, + "Syftworkerpoolservice.worker_pool.sync_pool_from_request": { + "public_path": "worker_pool.sync_pool_from_request", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "request": "" + }, + "return_type": "", + "warning": "", + "hash": "77f33550b908f73926f203b3eaa71d9394bab3176591113e94af8cb1b0664e80" + }, + "Syncservice.sync._get_state": { + "public_path": "sync._get_state", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "d41943b8063064a2e843a418a6eb5de3c11c626d7d30d65507381f7095eb56ac" + }, + "Syncservice.sync.get_permissions": { + "public_path": "sync.get_permissions", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "items": "list[syft.types.syncable_object.SyncableSyftObject]" + }, + "return_type": "tuple[dict[syft.types.uid.UID, set[str]], dict[syft.types.uid.UID, set[syft.types.uid.UID]]]", + "warning": "", + "hash": "dcd880281d2fa43c3ac75b9ec8644e5ee3a9c0741ed4401f0fa873d63cd7a8b5" + }, + "Syncservice.sync.sync_items": { + "public_path": "sync.sync_items", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "ignored_batches": "dict[syft.types.uid.UID, int]", + "items": "list[syft.types.syncable_object.SyncableSyftObject]", + "permissions": "dict[type, list[syft.service.action.action_permissions.ActionObjectPermission]]", + "storage_permissions": "list[syft.service.action.action_permissions.StoragePermission]", + "unignored_batches": "set[syft.types.uid.UID]" + }, + "return_type": "", + "warning": "", + "hash": "7561cf4ace912bd7235ad03fc706c85fb13623cbab2af0c40596b5b9c1f01582" + }, + "Usercodeservice.code.call": { + "public_path": "code.call", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "kwargs": "typing.Any", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "a7963f36d2a6ae35a3fbe9ba5362cbad9fc4ccadd21d21fe667f3d996f16ba63" + }, + "Usercodeservice.code.delete": { + "public_path": "code.delete", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "143b5c8c2444eb952c11a187a0cc4265eae8cfd854790885a0b0ef586ff34f2e" + }, + "Usercodeservice.code.get_all": { + "public_path": "code.get_all", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code.user_code.UserCode]", + "warning": "", + "hash": "0e98e7ab505e5b54e17f41816d7978c424d4d9c5130e78ccceb053d3e4ade501" + }, + "Usercodeservice.code.get_all_for_user": { + "public_path": "code.get_all_for_user", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code.user_code.UserCode]", + "warning": "", + "hash": "96ddc9ca6b46e107fcf2fed249d1266dc31370585bf5fb1a877f2dda6a2f7f0c" + }, + "Usercodeservice.code.get_by_id": { + "public_path": "code.get_by_id", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "48dcebf5d98a1f5a14a825729ff9bd46e96defc9b262f337db090f2f3a47e21c" + }, + "Usercodeservice.code.get_by_service_func_name": { + "public_path": "code.get_by_service_func_name", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "service_func_name": "" + }, + "return_type": "list[syft.service.code.user_code.UserCode]", + "warning": "", + "hash": "f1dc8817a0dc8f0c1ee80a8a86d38ed75eda76d5f55f47a8836980ca106c21fb" + }, + "Usercodeservice.code.request_code_execution": { + "public_path": "code.request_code_execution", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "code": "syft.service.code.user_code.SubmitUserCode | syft.service.code.user_code.UserCode", + "reason": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "e69c925579c700ab97cb02bcf930d777a726843f58daaae425815da16bbee0a3" + }, + "Usercodeservice.code.store_execution_output": { + "public_path": "code.store_execution_output", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "input_ids": "dict[str, syft.types.uid.UID] | None", + "job_id": "syft.types.uid.UID | None", + "outputs": "typing.Any", + "user_code_id": "" + }, + "return_type": "", + "warning": "", + "hash": "1952677c98c0e59fd4dcfec5d554fdbb533c88fe2a90381590e9194d03233c59" + }, + "Usercodeservice.code.submit": { + "public_path": "code.submit", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "code": "" + }, + "return_type": "", + "warning": "", + "hash": "10ea470830a5c833d84585682d6d602669504773917ff9b3d38ecd02f7bc9c58" + }, + "Usercodeservice.code.update": { + "public_path": "code.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "l0_deny_reason": "str | None | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "d824a955105b9c4febd06536f2b2752973d6b2b25c4b73bc05ed8cad5641f252" + }, + "Usercodestatusservice.code_status.create": { + "public_path": "code_status.create", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "status": "" + }, + "return_type": "", + "warning": "", + "hash": "e7e813f1933e93fad8024cb1a469b71d9223482803e6c31aafe5765b6f3ed578" + }, + "Usercodestatusservice.code_status.get_all": { + "public_path": "code_status.get_all", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.code.user_code.UserCodeStatusCollection]", + "warning": "", + "hash": "9feae68b9c5a79873d3573e17b20374ae74f2d11a97227b1d8b3a3d44f8c1d37" + }, + "Usercodestatusservice.code_status.get_by_uid": { + "public_path": "code_status.get_by_uid", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "105347de75f51d1e38b34ed1686e2a6a96aa394c090c831c278e1a3ca8d3116f" + }, + "Usercodestatusservice.code_status.remove": { + "public_path": "code_status.remove", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "38d79b611e9cd5911635072294b325a9a77a5c66f6c8b3007cea6fd6116d0f2a" + }, + "Usercodestatusservice.code_status.update": { + "public_path": "code_status.update", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "decision": "syft.service.code.user_code.ApprovalDecision | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "960b4f65972823d62ef88e83089d616b4176f8c0a5560e27f623791bfcfebc39" + }, + "Userservice.user.create": { + "public_path": "user.create", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "created_by": "syft.server.credentials.SyftSigningKey | None", + "email": "", + "id": "", + "institution": "str | None", + "mock_execution_permission": "", + "name": "", + "password": "", + "password_verify": "str | None", + "role": "syft.service.user.user_roles.ServiceRole | None", + "verify_key": "syft.server.credentials.SyftVerifyKey | None", + "website": "str | None" + }, + "return_type": "", + "warning": "", + "hash": "562536dd46bf07735ea56135a6a8e547e6f23707673103c86b7887bf39507d42" + }, + "Userservice.user.delete": { + "public_path": "user.delete", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "8b1a55fba9717e5a15206f838c767c20566fdc7a706caef0e1aafe69039fcfed" + }, + "Userservice.user.get_all": { + "public_path": "user.get_all", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "order_by": "str | None", + "page_index": "int | None", + "page_size": "int | None", + "sort_order": "str | None" + }, + "return_type": "list[syft.service.user.user.UserView]", + "warning": "", + "hash": "db30734fa2ba8cde141a7c0058572c7371897a63dcced42f0438dc3e80b1a86e" + }, + "Userservice.user.get_by_verify_key": { + "public_path": "user.get_by_verify_key", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "verify_key": "" + }, + "return_type": "", + "warning": "", + "hash": "697555ff4d40a26ef9328c26a68ef63518d3241284f0e5a612ae4741cd9fca8d" + }, + "Userservice.user.get_current_user": { + "public_path": "user.get_current_user", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": {}, + "return_type": "", + "warning": "", + "hash": "08424242302088be04173bfd09309243a7e3511838546af15e1322e4cca10ce0" + }, + "Userservice.user.get_index": { + "public_path": "user.get_index", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "index": "" + }, + "return_type": "", + "warning": "", + "hash": "43f9af96488f20804b6d6d5e3154499edd97e8ddfb0dbb6e0829f36b3e317e1c" + }, + "Userservice.user.request_password_reset": { + "public_path": "user.request_password_reset", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "031ffc151087bc11cbd3809de3a885b6656b232ef766586d14bd0accb81502bb" + }, + "Userservice.user.search": { + "public_path": "user.search", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "email": "pydantic.networks.EmailStr | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "name": "str | syft.types.syft_metaclass.EmptyType", + "page_index": "int | None", + "page_size": "int | None", + "verify_key": "syft.server.credentials.SyftVerifyKey | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "list[syft.service.user.user.UserView]", + "warning": "", + "hash": "3d7f1b56e2b2d1e5580887742fb825f244041846b64de3a72beff02178fc3ac7" + }, + "Userservice.user.update": { + "public_path": "user.update", + "RBAC_permission": "GUEST_ROLE_LEVEL", + "signature": { + "email": "pydantic.networks.EmailStr | syft.types.syft_metaclass.EmptyType", + "id": "syft.types.uid.UID | syft.types.syft_metaclass.EmptyType", + "institution": "str | syft.types.syft_metaclass.EmptyType", + "mock_execution_permission": "bool | syft.types.syft_metaclass.EmptyType", + "name": "str | syft.types.syft_metaclass.EmptyType", + "password": "str | syft.types.syft_metaclass.EmptyType", + "password_verify": "str | syft.types.syft_metaclass.EmptyType", + "role": "syft.service.user.user_roles.ServiceRole | syft.types.syft_metaclass.EmptyType", + "uid": "", + "verify_key": "syft.server.credentials.SyftVerifyKey | syft.types.syft_metaclass.EmptyType", + "website": "str | syft.types.syft_metaclass.EmptyType" + }, + "return_type": "", + "warning": "", + "hash": "a3be1d1c772bfbafb237d5d7e01c986cdea9cb23374e075e28e6d5c12419c302" + }, + "Userservice.user.view": { + "public_path": "user.view", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "b21973538da6cd08387e3e40eb11d88d31393accd0de76dd7ad6fd51123aad51" + }, + "Workerservice.worker.delete": { + "public_path": "worker.delete", + "RBAC_permission": "DATA_OWNER_ROLE_LEVEL", + "signature": { + "force": "", + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "82681e3a435dffb544f131c3fd2b067a0b1a9f9989754da645d5f4bcaf6f0615" + }, + "Workerservice.worker.get": { + "public_path": "worker.get", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "", + "warning": "", + "hash": "6a5140d7730b39024a81c9a93b4204d1a7cce505b5db2b4eed30292950daf376" + }, + "Workerservice.worker.get_all": { + "public_path": "worker.get_all", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": {}, + "return_type": "list[syft.service.worker.worker_pool.SyftWorker]", + "warning": "", + "hash": "0bfc387f28e4ed5c95a6c2db4cdf89d5778bdc2a8dfa3b57e6767ae764341353" + }, + "Workerservice.worker.logs": { + "public_path": "worker.logs", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "raw": "", + "uid": "" + }, + "return_type": "bytes | str", + "warning": "", + "hash": "dfa9c826fd8269b8f942685555f127ea2a056d60e3f288c7c6d0ed4d680b21c2" + }, + "Workerservice.worker.start_workers": { + "public_path": "worker.start_workers", + "RBAC_permission": "ADMIN_ROLE_LEVEL", + "signature": { + "n": "" + }, + "return_type": "list[syft.service.worker.worker_pool.ContainerSpawnStatus]", + "warning": "", + "hash": "2c5937bb99db09b1a6eeca68a877b6120e3d7b776bc36b2b0414d96849e2ea9c" + }, + "Workerservice.worker.status": { + "public_path": "worker.status", + "RBAC_permission": "DATA_SCIENTIST_ROLE_LEVEL", + "signature": { + "uid": "" + }, + "return_type": "tuple[syft.service.worker.worker_pool.WorkerStatus, syft.service.worker.worker_pool.WorkerHealth | None]", + "warning": "", + "hash": "b761b2b2b7bb865d6559838aea133398fac8a6c74532e817a220d26e85af2eb3" + } +} diff --git a/packages/syft/src/syft/util/assets.py b/packages/syft/src/syft/util/assets.py new file mode 100644 index 00000000000..b3467f15ab8 --- /dev/null +++ b/packages/syft/src/syft/util/assets.py @@ -0,0 +1,32 @@ +# stdlib +import base64 +from functools import lru_cache +import importlib.resources + +IMAGE_ASSETS = "syft.assets.img" +SVG_ASSETS = "syft.assets.svg" +CSS_ASSETS = "syft.assets.css" +JS_ASSETS = "syft.assets.js" + + +@lru_cache(maxsize=32) +def load_svg(fname: str) -> str: + # TODO add resize support + return importlib.resources.read_text(SVG_ASSETS, fname) + + +@lru_cache(maxsize=32) +def load_png_base64(fname: str) -> str: + b = importlib.resources.read_binary(IMAGE_ASSETS, fname) + res = base64.b64encode(b) + return f"data:image/png;base64,{res.decode('utf-8')}" + + +@lru_cache(maxsize=32) +def load_css(fname: str) -> str: + return importlib.resources.read_text(CSS_ASSETS, fname) + + +@lru_cache(maxsize=32) +def load_js(fname: str) -> str: + return importlib.resources.read_text(JS_ASSETS, fname) diff --git a/packages/syft/src/syft/util/autoreload.py b/packages/syft/src/syft/util/autoreload.py index e1f68e45555..b3230c6dc6d 100644 --- a/packages/syft/src/syft/util/autoreload.py +++ b/packages/syft/src/syft/util/autoreload.py @@ -8,8 +8,9 @@ def enable_autoreload() -> None: from IPython import get_ipython ipython = get_ipython() # noqa: F821 - ipython.run_line_magic("load_ext", "autoreload") - ipython.run_line_magic("autoreload", "2") + if hasattr(ipython, "run_line_magic"): + ipython.run_line_magic("load_ext", "autoreload") + ipython.run_line_magic("autoreload", "2") AUTORELOAD_ENABLED = True print("Autoreload enabled") except Exception as e: @@ -24,7 +25,8 @@ def disable_autoreload() -> None: from IPython import get_ipython ipython = get_ipython() # noqa: F821 - ipython.run_line_magic("autoreload", "0") + if hasattr(ipython, "run_line_magic"): + ipython.run_line_magic("autoreload", "0") AUTORELOAD_ENABLED = False print("Autoreload disabled.") except Exception as e: diff --git a/packages/syft/src/syft/util/colors.py b/packages/syft/src/syft/util/colors.py deleted file mode 100644 index a7b6ce06e49..00000000000 --- a/packages/syft/src/syft/util/colors.py +++ /dev/null @@ -1,9 +0,0 @@ -SURFACE_DARK_BRIGHT = "#464158" -SURFACE_SURFACE_DARK = "#2E2B3B" -DK_ON_SURFACE_HIGHEST = "#534F64" - -ON_SURFACE_HIGHEST = {"light": "#534F64", "dark": "#ffffff"} - -SURFACE_SURFACE = {"light": "#2E2B3B", "dark": "#ffffff"} - -SURFACE = {"light": "#464158", "dark": "#ffffff"} diff --git a/packages/syft/src/syft/util/commit.py b/packages/syft/src/syft/util/commit.py new file mode 100644 index 00000000000..b65cdae6b33 --- /dev/null +++ b/packages/syft/src/syft/util/commit.py @@ -0,0 +1,3 @@ +# Used to store the commit hash of the syft package +# Currently only used for experimental releases +__commit__ = "GIT_COMMIT_HASH" diff --git a/packages/syft/src/syft/util/decorators.py b/packages/syft/src/syft/util/decorators.py index 1262099d1c6..a149115346c 100644 --- a/packages/syft/src/syft/util/decorators.py +++ b/packages/syft/src/syft/util/decorators.py @@ -2,6 +2,10 @@ from collections.abc import Callable import functools from typing import Any +import warnings + +# relative +from ..types.errors import SyftException def singleton(cls: Any) -> Callable: @@ -46,3 +50,25 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: return previous_instances[cls].get("instance") return wrapper + + +def deprecated( + reason: str = "This function is deprecated and may be removed in the future.", + return_syfterror: bool = False, +) -> Callable: + def decorator(func: Callable) -> Callable: + @functools.wraps(func) + def wrapper(*args: list, **kwargs: dict) -> Any: + message = f"{func.__qualname__} is deprecated: {reason}" + if return_syfterror: + raise SyftException(public_message=message) + warnings.warn( + message, + category=DeprecationWarning, + stacklevel=2, + ) + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/packages/syft/src/syft/util/env.py b/packages/syft/src/syft/util/env.py index d1553fb40ce..de04d8a2bef 100644 --- a/packages/syft/src/syft/util/env.py +++ b/packages/syft/src/syft/util/env.py @@ -2,13 +2,13 @@ import venv # relative -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SyftObject class Env(SyftObject): __canonical_name__ = "Env" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 packages_dict: dict[str, str] @property diff --git a/packages/syft/src/syft/util/fonts.py b/packages/syft/src/syft/util/fonts.py deleted file mode 100644 index cf5fcc98a86..00000000000 --- a/packages/syft/src/syft/util/fonts.py +++ /dev/null @@ -1,77 +0,0 @@ -# ruff: noqa -fonts_css = """ -/* cyrillic-ext */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSKmu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+0460-052F, U+1C80-1C88, U+20B4, U+2DE0-2DFF, U+A640-A69F, U+FE2E-FE2F; -} -/* cyrillic */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSumu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+0301, U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116; -} -/* greek-ext */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSOmu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+1F00-1FFF; -} -/* greek */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSymu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+0370-03FF; -} -/* hebrew */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTS2mu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+0590-05FF, U+200C-2010, U+20AA, U+25CC, U+FB1D-FB4F; -} -/* vietnamese */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSCmu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+0102-0103, U+0110-0111, U+0128-0129, U+0168-0169, U+01A0-01A1, U+01AF-01B0, U+0300-0301, U+0303-0304, U+0308-0309, U+0323, U+0329, U+1EA0-1EF9, U+20AB; -} -/* latin-ext */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTSGmu0SC55K5gw.woff2) format('woff2'); - unicode-range: U+0100-02AF, U+0304, U+0308, U+0329, U+1E00-1E9F, U+1EF2-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; -} -/* latin */ -@font-face { - font-family: 'Open Sans'; - font-style: normal; - font-weight: 300 800; - font-stretch: 100%; - src: url(https://fonts.gstatic.com/s/opensans/v35/memvYaGs126MiZpBA-UvWbX2vVnXBbObj2OVTS-mu0SC55I.woff2) format('woff2'); - unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+0304, U+0308, U+0329, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; -} -""" - -ITABLES_CSS = ".itables {font-family: 'Consolas', monospace, sans-serif;}" diff --git a/packages/syft/src/syft/util/jax_settings.py b/packages/syft/src/syft/util/jax_settings.py deleted file mode 100644 index 5f9b4236dd8..00000000000 --- a/packages/syft/src/syft/util/jax_settings.py +++ /dev/null @@ -1,5 +0,0 @@ -# third party -from jax.config import config - -# this ensures that jax_enable_x64 is set before we import and use it -config.update("jax_enable_x64", True) diff --git a/packages/syft/src/syft/util/logger.py b/packages/syft/src/syft/util/logger.py deleted file mode 100644 index d9f0611a6c6..00000000000 --- a/packages/syft/src/syft/util/logger.py +++ /dev/null @@ -1,134 +0,0 @@ -# stdlib -from collections.abc import Callable -import logging -import os -import sys -from typing import Any -from typing import NoReturn -from typing import TextIO - -# third party -from loguru import logger - -LOG_FORMAT = "[{time}][{level}][{module}]][{process.id}] {message}" - -logger.remove() -DEFAULT_SINK = "syft_{time}.log" - - -def remove() -> None: - logger.remove() - - -def add( - sink: None | str | os.PathLike | TextIO | logging.Handler = None, - level: str = "ERROR", -) -> None: - sink = DEFAULT_SINK if sink is None else sink - try: - logger.add( - sink=sink, - format=LOG_FORMAT, - enqueue=True, - colorize=False, - diagnose=True, - backtrace=True, - rotation="10 MB", - retention="1 day", - level=level, - ) - except BaseException: - logger.add( - sink=sink, - format=LOG_FORMAT, - colorize=False, - diagnose=True, - backtrace=True, - level=level, - ) - - -def start() -> None: - add(sink=sys.stderr, level="CRITICAL") - - -def stop() -> None: - logger.stop() - - -def traceback_and_raise(e: Any, verbose: bool = False) -> NoReturn: - try: - if verbose: - logger.opt(lazy=True).exception(e) - else: - logger.opt(lazy=True).critical(e) - except BaseException as ex: - logger.debug("failed to print exception", ex) - if not issubclass(type(e), Exception): - e = Exception(e) - raise e - - -def create_log_and_print_function(level: str) -> Callable: - def log_and_print(*args: Any, **kwargs: Any) -> None: - try: - method = getattr(logger.opt(lazy=True), level, None) - if "print" in kwargs and kwargs["print"] is True: - del kwargs["print"] - print(*args, **kwargs) - if "end" in kwargs: - # clean up extra end for printinga - del kwargs["end"] - - if method is not None: - method(*args, **kwargs) - else: - raise Exception(f"no method {level} on logger") - except BaseException as e: - msg = f"failed to log exception. {e}" - try: - logger.debug(msg) - - except Exception as e: - print(f"{msg}. {e}") - - return log_and_print - - -def traceback(*args: Any, **kwargs: Any) -> None: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # print(f"traceback:{caller.filename}:{caller.function}:{caller.lineno}") - return create_log_and_print_function(level="exception")(*args, **kwargs) - - -def critical(*args: Any, **kwargs: Any) -> None: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # print(f"critical:{caller.filename}:{caller.function}:{caller.lineno}:{args}") - return create_log_and_print_function(level="critical")(*args, **kwargs) - - -def error(*args: Any, **kwargs: Any) -> None: - # caller = inspect.getframeinfo(inspect.stack()[1][0]) - # print(f"error:{caller.filename}:{caller.function}:{caller.lineno}") - return create_log_and_print_function(level="error")(*args, **kwargs) - - -def warning(*args: Any, **kwargs: Any) -> None: - return create_log_and_print_function(level="warning")(*args, **kwargs) - - -def info(*args: Any, **kwargs: Any) -> None: - return create_log_and_print_function(level="info")(*args, **kwargs) - - -def debug(*args: Any) -> None: - debug_msg = " ".join([str(a) for a in args]) - return logger.debug(debug_msg) - - -def _debug(*args: Any, **kwargs: Any) -> None: - return create_log_and_print_function(level="debug")(*args, **kwargs) - - -def trace(*args: Any, **kwargs: Any) -> None: - return create_log_and_print_function(level="trace")(*args, **kwargs) diff --git a/packages/syft/src/syft/util/misc_objs.py b/packages/syft/src/syft/util/misc_objs.py new file mode 100644 index 00000000000..221396b8bb3 --- /dev/null +++ b/packages/syft/src/syft/util/misc_objs.py @@ -0,0 +1,45 @@ +# third party +from IPython.display import HTML +from IPython.display import display + +# relative +from ..serde.serializable import serializable +from ..types.syft_object import SYFT_OBJECT_VERSION_1 +from ..types.syft_object import SyftObject + + +@serializable() +class MarkdownDescription(SyftObject): + # version + __canonical_name__ = "MarkdownDescription" + __version__ = SYFT_OBJECT_VERSION_1 + + text: str + + def _repr_markdown_(self, wrap_as_python: bool = True, indent: int = 0) -> str: + style = """ + + """ + display(HTML(style)) + return self.text + + +@serializable() +class HTMLObject(SyftObject): + # version + __canonical_name__ = "HTMLObject" + __version__ = SYFT_OBJECT_VERSION_1 + + text: str + + def _repr_html_(self) -> str: + return self.text diff --git a/packages/syft/src/syft/util/notebook_ui/__init__.py b/packages/syft/src/syft/util/notebook_ui/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/util/notebook_ui/components/__init__.py b/packages/syft/src/syft/util/notebook_ui/components/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/util/notebook_ui/components/base.py b/packages/syft/src/syft/util/notebook_ui/components/base.py new file mode 100644 index 00000000000..6fd09c458b4 --- /dev/null +++ b/packages/syft/src/syft/util/notebook_ui/components/base.py @@ -0,0 +1,20 @@ +# third party +import ipywidgets as widgets + +# relative +from ....types.syft_object import SYFT_OBJECT_VERSION_1 +from ....types.syft_object import SyftBaseObject + + +class HTMLComponentBase(SyftBaseObject): + __canonical_name__ = "HTMLComponentBase" + __version__ = SYFT_OBJECT_VERSION_1 + + def to_html(self) -> str: + raise NotImplementedError() + + def to_widget(self) -> widgets.Widget: + return widgets.HTML(value=self.to_html()) + + def _repr_html_(self) -> str: + return self.to_html() diff --git a/packages/syft/src/syft/util/notebook_ui/components/sync.py b/packages/syft/src/syft/util/notebook_ui/components/sync.py new file mode 100644 index 00000000000..693d9549367 --- /dev/null +++ b/packages/syft/src/syft/util/notebook_ui/components/sync.py @@ -0,0 +1,342 @@ +# stdlib +import datetime +from typing import Any + +# third party +from pydantic import model_validator + +# relative +from ....client.sync_decision import SyncDirection +from ....service.code.user_code import UserCode +from ....service.job.job_stash import Job +from ....service.request.request import Request +from ....service.user.user import UserView +from ....types.datetime import DateTime +from ....types.datetime import format_timedelta_human_readable +from ....types.errors import SyftException +from ....types.syft_object import SYFT_OBJECT_VERSION_1 +from ....types.syft_object import SyftObject +from ..icons import Icon +from ..styles import CSS_CODE +from .base import HTMLComponentBase + +COPY_CSS = """ +.copy-container { + cursor: pointer; + border-radius: 3px; + padding: 0px 3px; + display: inline-block; + transition: background-color 0.3s; + user-select: none; + color: #B4B0BF; + overflow: hidden; + white-space: nowrap; + vertical-align: middle; +} + +.copy-container:hover { + background-color: #f5f5f5; +} + +.copy-container:active { + background-color: #ebebeb; +} + +.copy-text-display { + display: inline-block; + max-width: 50px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + vertical-align: bottom; +} +""" + + +class CopyButton(HTMLComponentBase): + __canonical_name__ = "CopyButton" + __version__ = SYFT_OBJECT_VERSION_1 + copy_text: str + max_width: int = 50 + + def format_copy_text(self, copy_text: str) -> str: + return copy_text + + def to_html(self) -> str: + copy_js = f"event.stopPropagation(); navigator.clipboard.writeText('{self.copy_text}');" + text_formatted = self.format_copy_text(self.copy_text) + button_html = f""" + +

    + + {text_formatted} + + {Icon.COPY.svg} +
    + """ + return button_html + + +class CopyIDButton(CopyButton): + __canonical_name__ = "CopyIDButton" + __version__ = SYFT_OBJECT_VERSION_1 + + def format_copy_text(self, copy_text: str) -> str: + return f"#{copy_text}" + + +class SyncTableObject(HTMLComponentBase): + __canonical_name__ = "SyncTableObject" + __version__ = SYFT_OBJECT_VERSION_1 + + object: SyftObject + + def get_status_str(self) -> str: + if isinstance(self.object, UserCode): + return "" + elif isinstance(self.object, Job): # type: ignore + return f"Status: {self.object.status.value}" + elif isinstance(self.object, Request): + code = self.object.code + approval_decisions = list(code.status.status_dict.values()) + if len(approval_decisions) != 1: + raise ValueError("Request code should have exactly one status") + return approval_decisions[0].status.value + return "" # type: ignore + + def get_updated_by(self) -> str: + # TODO replace with centralized SyftObject created/updated by attribute + if isinstance(self.object, Request): + email = self.object.requesting_user_email + if email is not None: + return f"Requested by {email}" + + user_view: UserView | None = None + if isinstance(self.object, UserCode): + try: + user_view = self.object.user + except SyftException: + pass # nosec + + if isinstance(user_view, UserView): + return f"Created by {user_view.email}" + return "" + + def get_updated_delta_str(self) -> str: + # TODO replace with centralized SyftObject created/updated by attribute + if isinstance(self.object, Job): + # NOTE Job is not using DateTime for creation_time, so we need to handle it separately + time_str = self.object.creation_time + if time_str is not None: + t = datetime.datetime.fromisoformat(time_str) + delta = datetime.datetime.now(datetime.timezone.utc) - t + return f"{format_timedelta_human_readable(delta)} ago" + + dt: DateTime | None = None + if isinstance(self.object, Request): + dt = self.object.request_time + if isinstance(self.object, UserCode): + dt = self.object.submit_time + if dt is not None: + delta = DateTime.now().timedelta(dt) + delta_str = format_timedelta_human_readable(delta) + return f"{delta_str} ago" + + return "" + + def to_html(self) -> str: + type_html = TypeLabel(object=self.object).to_html() + + type_html = TypeLabel(object=self.object).to_html() + description_html = MainDescription(object=self.object).to_html() + copy_id_button = CopyIDButton( + copy_text=str(self.object.id.id), max_width=60 + ).to_html() + + updated_delta_str = self.get_updated_delta_str() + updated_by = self.get_updated_by() + status_str = self.get_status_str() + status_row = " • ".join( + s for s in [status_str, updated_by, updated_delta_str] if s + ) + summary_html = f""" +
    +
    + {type_html} {description_html} +
    + {copy_id_button} +
    +
    + + {status_row} + +
    + """ # noqa: E501 + summary_html = summary_html.replace("\n", "").replace(" ", "") + return summary_html + + +ALERT_CSS = """ +.syft-alert-container { + padding: 4px; + display: flex; + justify-content: center; +} + +.syft-alert-info { + display: flex; + align-items: center; + width: 100%; + padding: 8px 10px; + gap: 8px; + border-radius: 4px; + background: #C2DEF0; + color: #1F567A; + line-height: 1.4; + font-size: 12px; + font-family: 'Open Sans'; +} +""" + + +class Alert(HTMLComponentBase): + __canonical_name__ = "Alert" + __version__ = SYFT_OBJECT_VERSION_1 + message: str + + def to_html(self) -> str: + full_message = f"{Icon.INFO.svg} {self.message}" + return f""" + +
    +
    + {full_message} +
    +
    + """ + + +class Badge(HTMLComponentBase): + __canonical_name__ = "Badge" + __version__ = SYFT_OBJECT_VERSION_1 + value: str + badge_class: str + + def to_html(self) -> str: + value = str(self.value).upper() + return f'{value}' + + +class Label(HTMLComponentBase): + __canonical_name__ = "Label" + __version__ = SYFT_OBJECT_VERSION_1 + value: str + label_class: str + + def to_html(self) -> str: + value = str(self.value).upper() + return f'{value}' + + +class TypeLabel(Label): + __canonical_name__ = "TypeLabel" + __version__ = SYFT_OBJECT_VERSION_1 + object: SyftObject + + @model_validator(mode="before") + @classmethod + def validate_label(cls, data: dict) -> dict: + obj = data["object"] + data["label_class"] = cls.type_label_class(obj) + data["value"] = type(obj).__name__.upper() + return data + + @staticmethod + def type_label_class(obj: Any) -> str: + if isinstance(obj, UserCode): + return "label-light-blue" + elif isinstance(obj, Job): # type: ignore + return "label-light-blue" + elif isinstance(obj, Request): # type: ignore + # TODO: handle other requests + return "label-light-purple" + return "label-light-blue" # type: ignore + + +class MainDescription(HTMLComponentBase): + __canonical_name__ = "CopyButton" + __version__ = SYFT_OBJECT_VERSION_1 + object: SyftObject + + def main_object_description_str(self) -> str: + if isinstance(self.object, UserCode): + return self.object.service_func_name + elif isinstance(self.object, Job): # type: ignore + return self.object.user_code_name or "" + elif isinstance(self.object, Request): # type: ignore + # TODO: handle other requests + return f"Execute {self.object.code.service_func_name}" + # SyftLog + # ExecutionOutput + # ActionObject + # UserCodeStatusCollection + + return "" # type: ignore + + def to_html(self) -> str: + return f'{self.main_object_description_str()}' + + +class SyncWidgetHeader(SyncTableObject): + diff_batch: Any + + @model_validator(mode="before") + @classmethod + def add_object(cls, values: dict) -> dict: + if "diff_batch" not in values: + raise ValueError("diff_batch is required") + diff_batch = values["diff_batch"] + values["object"] = diff_batch.root_diff.non_empty_object + return values + + def to_html(self) -> str: + # CSS Styles + style = CSS_CODE + + first_line_html = "Syncing changes on" + + type_html = TypeLabel(object=self.object).to_html() + description_html = MainDescription(object=self.object).to_html() + copy_id_button = CopyIDButton( + copy_text=str(self.object.id.id), max_width=60 + ).to_html() + + second_line_html = f""" +
    +
    + {type_html} {description_html} +
    + {copy_id_button} +
    + """ # noqa: E501 + + num_diffs = len(self.diff_batch.get_dependencies(include_roots=True)) + if self.diff_batch.sync_direction == SyncDirection.HIGH_TO_LOW: + source_side = "High" + target_side = "Low" + else: + source_side = "Low" + target_side = "High" + + # Third line HTML + third_line_html = f"This would sync {num_diffs} changes from {source_side} Server to {target_side} Server" # noqa: E501 + + header_html = f""" + {style} + {first_line_html} + {second_line_html} + {third_line_html} +
    + """ + + return header_html diff --git a/packages/syft/src/syft/util/notebook_ui/components/tabulator_template.py b/packages/syft/src/syft/util/notebook_ui/components/tabulator_template.py new file mode 100644 index 00000000000..538614b4cb8 --- /dev/null +++ b/packages/syft/src/syft/util/notebook_ui/components/tabulator_template.py @@ -0,0 +1,251 @@ +# stdlib +import json +import logging +import re +import secrets +from typing import Any + +# third party +from IPython.display import HTML +from IPython.display import display +import jinja2 + +# relative +from ....types.uid import UID +from ...assets import load_css +from ...assets import load_js +from ...table import TABLE_INDEX_KEY +from ...table import prepare_table_data +from ...util import sanitize_html +from ..icons import Icon + +logger = logging.getLogger(__name__) + + +def make_links(text: str) -> str: + file_pattern = re.compile(r"([\w/.-]+\.py)\", line (\d+)") + return file_pattern.sub(r'\1, line \2', text) + + +DEFAULT_ID_WIDTH = 110 +jinja_env = jinja2.Environment(loader=jinja2.PackageLoader("syft", "assets/jinja")) # nosec +jinja_env.filters["make_links"] = make_links + + +def create_tabulator_columns( + column_names: list[str], + column_widths: dict | None = None, + header_sort: bool = True, +) -> tuple[list[dict], dict | None]: + """Returns tuple of (columns, row_header) for tabulator table""" + if column_widths is None: + column_widths = {} + + columns = [] + row_header = {} + if TABLE_INDEX_KEY in column_names: + row_header = { + "field": TABLE_INDEX_KEY, + "frozen": True, + "widthGrow": 0.3, + "minWidth": 60, + "headerSort": header_sort, + } + + for colname in column_names: + if colname != TABLE_INDEX_KEY: + column = { + "title": colname, + "field": colname, + "formatter": "html", + "resizable": True, + "minWidth": 60, + "maxInitialWidth": 500, + "headerSort": header_sort, + } + if colname in column_widths: + column["widthGrow"] = column_widths[colname] + columns.append(column) + + return columns, row_header + + +def format_dict(data: Any) -> str: + # relative + from .sync import Badge + from .sync import CopyButton + from .sync import Label + + if not isinstance(data, dict): + return data + + if set(data.keys()) != {"type", "value"}: + return sanitize_html(str(data)) + if "badge" in data["type"]: + return Badge(value=data["value"], badge_class=data["type"]).to_html() + elif "label" in data["type"]: + return Label(value=data["value"], label_class=data["type"]).to_html() + if "clipboard" in data["type"]: + return CopyButton(copy_text=data["value"]).to_html() + + return sanitize_html(str(data)) + + +def format_uid(uid: UID) -> str: + # relative + from .sync import CopyButton + + return CopyButton(copy_text=uid.no_dash).to_html() + + +def format_table_data(table_data: list[dict[str, Any]]) -> list[dict[str, str]]: + formatted: list[dict[str, str]] = [] + for row in table_data: + row_formatted: dict[str, str] = {} + for k, v in row.items(): + if isinstance(v, str): + row_formatted[k] = sanitize_html(v.replace("\n", "
    ")) + continue + # make UID copyable and trimmed + if isinstance(v, UID): + v_formatted = format_uid(v) + else: + v_formatted = format_dict(v) + row_formatted[k] = v_formatted + formatted.append(row_formatted) + return formatted + + +def _render_tabulator_table( + uid: str, + table_data: list[dict], + table_metadata: dict, + max_height: int | None, + pagination: bool, + header_sort: bool, +) -> str: + table_template = jinja_env.get_template("table.jinja2") + tabulator_js = load_js("tabulator.min.js") + tabulator_css = load_css("tabulator_pysyft.min.css") + js = load_js("table.js") + css = load_css("style.css") + + # Add tabulator as a named module for VSCode compatibility + tabulator_js = tabulator_js.replace( + "define(t)", "define('tabulator-tables', [], t)" + ) + + icon = table_metadata.get("icon", None) + if icon is None: + icon = Icon.TABLE.svg + + column_data, row_header = create_tabulator_columns( + table_metadata["columns"], header_sort=header_sort + ) + table_data = format_table_data(table_data) + table_html = table_template.render( + uid=uid, + columns=json.dumps(column_data), + row_header=json.dumps(row_header), + data=json.dumps(table_data), + css=css, + js=js, + index_field_name=TABLE_INDEX_KEY, + icon=icon, + name=table_metadata["name"], + tabulator_js=tabulator_js, + tabulator_css=tabulator_css, + max_height=json.dumps(max_height), + pagination=json.dumps(pagination), + header_sort=json.dumps(header_sort), + ) + + return table_html + + +def build_tabulator_table_with_data( + table_data: list[dict], + table_metadata: dict, + uid: str | None = None, + max_height: int | None = None, + pagination: bool = True, + header_sort: bool = True, +) -> str: + """ + Builds a Tabulator table for the provided data and metadata. + + Args: + table_data (list[dict]): The data to populate the table. + table_metadata (dict): The metadata for the table. + uid (str, optional): The unique identifier for the table. Defaults to None. + max_height (int, optional): The maximum height of the table. Defaults to None. + pagination (bool, optional): Whether to enable pagination. Defaults to True. + header_sort (bool, optional): Whether to enable header sorting. Defaults to True. + + Returns: + str: The HTML representation of the Tabulator table. + + """ + uid = uid if uid is not None else secrets.token_hex(4) + return _render_tabulator_table( + uid, table_data, table_metadata, max_height, pagination, header_sort + ) + + +def build_tabulator_table( + obj: Any, + uid: str | None = None, + max_height: int | None = None, + pagination: bool = True, + header_sort: bool = True, +) -> str | None: + """ + Builds a Tabulator table from the given object if possible. + + If the object cannot be represented as a table, returns None. + + Args: + obj (Any): The object to build the table from. + uid (str, optional): The unique identifier for the table. Defaults to None. + max_height (int, optional): The maximum height of the table. Defaults to None. + pagination (bool, optional): Whether to enable pagination. Defaults to True. + header_sort (bool, optional): Whether to enable header sorting. Defaults to True. + + Returns: + str | None: The HTML representation of the Tabulator table or None + + """ + table_data, table_metadata = prepare_table_data(obj) + if len(table_data) == 0: + if hasattr(obj, "__len__") and len(obj) == 0: + return obj.__repr__() + else: + return None + + return build_tabulator_table_with_data( + table_data, table_metadata, uid, max_height, pagination, header_sort + ) + + +def show_table(obj: Any) -> None: + table = build_tabulator_table(obj) + if table is not None: + display(HTML(table)) + + +def highlight_single_row( + table_uid: str, + index: int | str | None = None, + jump_to_row: bool = True, +) -> None: + js_code = f"" + display(HTML(js_code)) + + +def update_table_cell(uid: str, index: int, field: str, value: str) -> None: + js_code = f""" + + """ + display(HTML(js_code)) diff --git a/packages/syft/src/syft/util/notebook_ui/icons.py b/packages/syft/src/syft/util/notebook_ui/icons.py new file mode 100644 index 00000000000..a06c0a80258 --- /dev/null +++ b/packages/syft/src/syft/util/notebook_ui/icons.py @@ -0,0 +1,29 @@ +# stdlib +import enum +import json + +# relative +from ..assets import load_svg + + +class Icon(enum.Enum): + SEARCH = "search.svg" + CLIPBOARD = "clipboard.svg" + TABLE = "table.svg" + FOLDER = "folder.svg" + REQUEST = "request.svg" + ARROW = "arrow.svg" + COPY = "copy.svg" + INFO = "info.svg" + + @property + def svg(self) -> str: + return load_svg(self.value) + + @property + def js_escaped_svg(self) -> str: + """ + Required for in-line SVGs in JS + TODO remove after refactoring table + templating + """ + return json.dumps(self.svg) diff --git a/packages/syft/src/syft/util/notebook_ui/notebook_addons.py b/packages/syft/src/syft/util/notebook_ui/notebook_addons.py deleted file mode 100644 index fd72e302490..00000000000 --- a/packages/syft/src/syft/util/notebook_ui/notebook_addons.py +++ /dev/null @@ -1,719 +0,0 @@ -# stdlib -from collections.abc import Sequence -import json -from string import Template -from typing import Any - -# relative -from ...types.uid import UID - -CSS_CODE = """ - - -""" - -SEARCH_ICON = ( - '' -) -CLIPBOARD_ICON = ( - "" -) -TABLE_ICON = ( - ' ' -) -FOLDER_ICON = ( - '' -) -REQUEST_ICON = ( - '' -) - - -custom_code = """ -
    -
    -
    ${icon}
    -

    ${list_name}

    -
    - -
    -
    -
    -
    -
    - -
    - -
    - -
    - -

    0

    -
    -
    - -
    -
    - -
    -
    -""" - - -def create_table_template( - items: Sequence, list_name: Any, rows: int = 5, table_icon: Any = None -) -> str: - if not table_icon: - table_icon = TABLE_ICON - - items_dict = json.dumps(items) - code = CSS_CODE + custom_code - template = Template(code) - rows = min(len(items), rows) - if len(items) == 0: - cols = 0 - else: - cols = (len(items[0].keys())) * 4 - return template.substitute( - uid=str(UID()), - element=items_dict, - list_name=list_name, - cols=cols, - rows=rows, - icon=table_icon, - searchIcon=SEARCH_ICON, - clipboardIcon=CLIPBOARD_ICON, - ) diff --git a/packages/syft/src/syft/util/notebook_ui/styles.py b/packages/syft/src/syft/util/notebook_ui/styles.py new file mode 100644 index 00000000000..591f99ad96f --- /dev/null +++ b/packages/syft/src/syft/util/notebook_ui/styles.py @@ -0,0 +1,32 @@ +# relative +from ..assets import load_css + +FONT_CSS = load_css("fonts.css") +STYLESHEET_URLS = [ + "https://fonts.googleapis.com/css2?family=Karla:ital,wght@0,200;0,300;0,400;0,500;0,600;0,700;0,800;1,200;1,300;1,400;1,500;1,600;1,700;1,800&family=Open+Sans:ital,wght@0,300..800;1,300..800&display=swap", + "https://fonts.cdnfonts.com/css/dejavu-sans-mono", +] +STYLESHEET_JS_CALLS = "\n".join([f'addStyleSheet("{s}")' for s in STYLESHEET_URLS]) + +JS_DOWNLOAD_FONTS = f""" + +""" + +CSS_CODE = f""" + +""" diff --git a/packages/syft/src/syft/util/options.py b/packages/syft/src/syft/util/options.py deleted file mode 100644 index 7702b14823d..00000000000 --- a/packages/syft/src/syft/util/options.py +++ /dev/null @@ -1,3 +0,0 @@ -# Global variables - -color_theme = "light" diff --git a/packages/syft/src/syft/util/patch_ipython.py b/packages/syft/src/syft/util/patch_ipython.py new file mode 100644 index 00000000000..eb821900c26 --- /dev/null +++ b/packages/syft/src/syft/util/patch_ipython.py @@ -0,0 +1,217 @@ +# stdlib +import html +import re +from types import MethodType +from typing import Any + +# relative +from ..service.response import SyftResponseMessage +from ..types.dicttuple import DictTuple +from ..types.syft_object import SyftObject +from .table import render_itable_template +from .util import sanitize_html + + +def _patch_ipython_sanitization() -> None: + try: + # third party + from IPython import get_ipython + except ImportError: + return + + ip = get_ipython() + if ip is None: + return + + # stdlib + from importlib import resources + + # relative + from .assets import load_css + from .assets import load_js + from .notebook_ui.components.sync import ALERT_CSS + from .notebook_ui.components.sync import COPY_CSS + from .notebook_ui.styles import CSS_CODE + from .notebook_ui.styles import FONT_CSS + from .notebook_ui.styles import JS_DOWNLOAD_FONTS + + tabulator_js = load_js("tabulator.min.js") + tabulator_js = tabulator_js.replace( + "define(t)", "define('tabulator-tables', [], t)" + ) + + SKIP_SANITIZE = [ + FONT_CSS, + CSS_CODE, + JS_DOWNLOAD_FONTS, + tabulator_js, + load_css("tabulator_pysyft.min.css"), + load_js("table.js"), + ] + + css_reinsert = f""" + +{JS_DOWNLOAD_FONTS} +{CSS_CODE} + + +""" + + escaped_js_css = re.compile( + "|".join(re.escape(substr) for substr in SKIP_SANITIZE), + re.IGNORECASE | re.MULTILINE, + ) + + table_template = ( + resources.files("syft.assets.jinja").joinpath("table.jinja2").read_text() + ) + table_template = table_template.strip() + table_template = re.sub(r"\\{\\{.*?\\}\\}", ".*?", re.escape(table_template)) + escaped_template = re.compile(table_template, re.DOTALL | re.VERBOSE) + + jobs_repr_template = ( + r"(.*?)" + ) + jobs_pattern = re.compile(jobs_repr_template, re.DOTALL) + + itable_template = ( + r"\s*(.*?)\s*" + ) + escaped_itable_template = re.compile(itable_template, re.DOTALL) + + def display_sanitized_html(obj: SyftObject | DictTuple) -> str | None: + if callable(obj_repr_html_ := getattr(obj, "_repr_html_", None)): + html_str = obj_repr_html_() + if html_str is not None: + # find matching table and jobs + matching_table = escaped_template.findall(html_str) + matching_jobs = jobs_pattern.findall(html_str) + matching_itables = escaped_itable_template.findall(html_str) + template = "\n".join(matching_table + matching_jobs) + + # remove escaped tables from sanitized html + sanitized_str = escaped_template.sub("", html_str) + # remove escaped js/css from sanitized html + sanitized_str = escaped_js_css.sub("", sanitized_str) + + # remove jobs from sanitized html + sanitized_str = jobs_pattern.sub("", sanitized_str) + + # remove escaped itables from sanitized html + sanitized_str = escaped_itable_template.sub( + "SYFT_PLACEHOLDER_ITABLE", sanitized_str + ) + sanitized_str = sanitize_html(sanitized_str) + + # add back css / js that skips sanitization + + for matching_itable in matching_itables: + sanitized_str = sanitized_str.replace( + "SYFT_PLACEHOLDER_ITABLE", + render_itable_template(matching_itable), + 1, + ) + return f"{css_reinsert} {sanitized_str} {template}" + return None + + def display_sanitized_md(obj: SyftObject) -> str | None: + if callable(getattr(obj, "_repr_markdown_", None)): + md = obj._repr_markdown_() + if md is not None: + md_sanitized = sanitize_html(md) + return html.unescape(md_sanitized) + return None + + ip.display_formatter.formatters["text/html"].for_type( + SyftObject, display_sanitized_html + ) + ip.display_formatter.formatters["text/html"].for_type( + DictTuple, display_sanitized_html + ) + ip.display_formatter.formatters["text/markdown"].for_type( + SyftObject, display_sanitized_md + ) + ip.display_formatter.formatters["text/html"].for_type( + SyftResponseMessage, display_sanitized_html + ) + + +def _patch_ipython_autocompletion() -> None: + try: + # third party + from IPython import get_ipython + from IPython.core.guarded_eval import EVALUATION_POLICIES + except ImportError: + return + + ipython = get_ipython() + if ipython is None: + return + + try: + # this allows property getters to be used in nested autocomplete + ipython.Completer.evaluation = "limited" + ipython.Completer.use_jedi = False + policy = EVALUATION_POLICIES["limited"] + + policy.allowed_getattr_external.update( + [ + ("syft.client.api", "APIModule"), + ("syft.client.api", "SyftAPI"), + ] + ) + original_can_get_attr = policy.can_get_attr + + def patched_can_get_attr(value: Any, attr: str) -> bool: + attr_name = "__syft_allow_autocomplete__" + # first check if exist to prevent side effects + if hasattr(value, attr_name) and attr in getattr(value, attr_name, []): + if attr in dir(value): + return True + else: + return False + else: + return original_can_get_attr(value, attr) + + policy.can_get_attr = patched_can_get_attr + except Exception: + print("Failed to patch ipython autocompletion for syft property getters") + + try: + # this constraints the completions for autocomplete. + # if __syft_dir__ is defined we only autocomplete those properties + original_attr_matches = ipython.Completer.attr_matches + + def patched_attr_matches(self, text: str) -> list[str]: # type: ignore + res = original_attr_matches(text) + m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer) + if not m2: + return res + expr, _ = m2.group(1, 2) + obj = self._evaluate_expr(expr) + if isinstance(obj, SyftObject) and hasattr(obj, "__syft_dir__"): + # here we filter all autocomplete results to only contain those + # defined in __syft_dir__, however the original autocomplete prefixes + # have the full path, while __syft_dir__ only defines the attr + attrs = set(obj.__syft_dir__()) + new_res = [] + for r in res: + splitted = r.split(".") + if len(splitted) > 1: + attr_name = splitted[-1] + if attr_name in attrs: + new_res.append(r) + return new_res + else: + return res + + ipython.Completer.attr_matches = MethodType( + patched_attr_matches, ipython.Completer + ) + except Exception: + print("Failed to patch syft autocompletion for __syft_dir__") + + +def patch_ipython() -> None: + _patch_ipython_sanitization() + _patch_ipython_autocompletion() diff --git a/packages/syft/src/syft/util/reset_server.py b/packages/syft/src/syft/util/reset_server.py new file mode 100644 index 00000000000..da9f28d7f57 --- /dev/null +++ b/packages/syft/src/syft/util/reset_server.py @@ -0,0 +1,34 @@ +# stdlib +from distutils.dir_util import copy_tree +from pathlib import Path + +# relative +from ..orchestra import ServerHandle + +COPY_SUFFIX = "-copy" + + +def make_copy_path(path: Path) -> str: + return f"{path.parent}/{path.stem}{COPY_SUFFIX}{path.suffix}" + + +def make_original_path(copy_path: Path) -> str: + return f"{str(copy_path)[:-len(COPY_SUFFIX)]}" + + +def make_copy(server: ServerHandle) -> None: + if not server.python_server: + print("server does not have python server, no copy made") + return + cfg = server.python_server.db_config + original_dir = str(cfg.path.resolve()) + copy_dir = f"{original_dir}{COPY_SUFFIX}" + copy_tree(original_dir, copy_dir) + print(f"copied\n{original_dir}\nto\n{copy_dir}\n") + + +def restore_copy(copy_dir: str) -> None: + copy_dir_path = Path(copy_dir) + original_dir = make_original_path(copy_dir_path) + copy_tree(copy_dir_path, original_dir) + print(f"copied\n{copy_dir}\nto\n{original_dir}\n") diff --git a/packages/syft/src/syft/util/schema.py b/packages/syft/src/syft/util/schema.py index 8ab54cbdea2..fd284fa6e87 100644 --- a/packages/syft/src/syft/util/schema.py +++ b/packages/syft/src/syft/util/schema.py @@ -8,9 +8,76 @@ # syft absolute import syft as sy +# relative +from .decorators import deprecated + RELATIVE_PATH_TO_FRONTEND = "/../../../../grid/frontend/" SCHEMA_FOLDER = "schema" +GUEST_COMMANDS = """ +
  • <your_client>.datasets - list datasets
  • +
  • <your_client>.code - list code
  • +
  • <your_client>.login - list projects
  • +""" + +DS_COMMANDS = """ +
  • <your_client>.datasets - list datasets
  • +
  • <your_client>.code - list code
  • +
  • <your_client>.projects - list projects
  • +""" + +DO_COMMANDS = """ +
  • <your_client>.projects - list projects
  • +
  • <your_client>.requests - list requests
  • +
  • <your_client>.users - list users
  • +""" + +DEFAULT_WELCOME_MSG = """ + +
    + Logo +

    Welcome to $datasite_name

    +
    + URL: $server_url
    + Server Description: $description
    + Server Type: $server_type
    + Server Side Type:$server_side_type
    + Syft Version: $server_version
    + +
    +
    + ⓘ  + This datasite is run by the library PySyft to learn more about how it works visit + github.com/OpenMined/PySyft. +
    +

    Commands to Get Started

    + $command_list +

    + """ + # json schema primitive types primitive_mapping = { list: "array", @@ -116,7 +183,7 @@ def process_type_bank(type_bank: dict[str, tuple[Any, ...]]) -> dict[str, dict]: def resolve_references(json_mappings: dict[str, dict]) -> dict[str, dict]: # track second pass generated types new_types = {} - for _, json_schema in json_mappings.items(): + for json_schema in json_mappings.values(): replace_types = {} for attribute, config in json_schema["properties"].items(): if "type" in config: @@ -146,7 +213,11 @@ def resolve_references(json_mappings: dict[str, dict]) -> dict[str, dict]: return json_mappings +@deprecated( + reason="generate_json_schemas is outdated, #1603 for more info", +) def generate_json_schemas(output_path: str | None = None) -> None: + # TODO: should we also replace this with the SyftObjectRegistry? json_mappings = process_type_bank(sy.serde.recursive.TYPE_BANK) json_mappings = resolve_references(json_mappings) if not output_path: diff --git a/packages/syft/src/syft/util/table.py b/packages/syft/src/syft/util/table.py new file mode 100644 index 00000000000..25db3fb4552 --- /dev/null +++ b/packages/syft/src/syft/util/table.py @@ -0,0 +1,342 @@ +# stdlib +from collections import defaultdict +from collections.abc import Iterable +from collections.abc import Mapping +from collections.abc import Set +import json +import logging +import re +from typing import Any + +# third party +import itables +import pandas as pd + +# relative +from .util import full_name_with_qualname +from .util import sanitize_html + +TABLE_INDEX_KEY = "_table_repr_index" + +logger = logging.getLogger(__name__) + + +def _syft_in_mro(self: Any, item: Any) -> bool: + if hasattr(type(item), "mro") and type(item) != type: + # if unbound method, supply self + if hasattr(type(item).mro, "__self__"): + mro = type(item).mro() + else: + mro = type(item).mro(type(item)) # type: ignore + + elif hasattr(item, "mro") and type(item) != type: + mro = item.mro() + else: + mro = str(self) # type: ignore + + return "syft" in str(mro).lower() + + +def _get_values_for_table_repr(obj: Any) -> Any: + if isinstance(obj, Mapping): + values = list(obj.values()) + elif isinstance(obj, Set): + values = list(obj) + else: + values = obj + + return values + + +def _get_grid_template_columns(first_value: Any) -> tuple[str | None, str | None]: + grid_template_cols = getattr(first_value, "__table_coll_widths__", None) + if isinstance(grid_template_cols, list): + grid_template_columns = " ".join(grid_template_cols) + grid_template_cell_columns = "unset" + else: + grid_template_columns = None + grid_template_cell_columns = None + return grid_template_columns, grid_template_cell_columns + + +def _create_table_rows( + _self: Mapping | Iterable, + is_homogenous: bool, + extra_fields: list | None = None, + add_index: bool = True, +) -> list[dict[str, Any]]: + """ + Creates row data for a table based on input object obj. + + If valid table data cannot be created, an empty list is returned. + + Args: + _self (Mapping | Iterable): The input data as a Mapping or Iterable. + is_homogenous (bool): A boolean indicating whether the data is homogenous. + extra_fields (list | None, optional): Additional fields to include in the table. Defaults to None. + add_index (bool, optional): Whether to add an index column. Defaults to True. + + Returns: + list[dict[str, Any]]: A list of dictionaries where each dictionary represents a row in the table. + + """ + + if extra_fields is None: + extra_fields = [] + + cols = defaultdict(list) + + for item in iter(_self.items() if isinstance(_self, Mapping) else _self): + # unpack dict + if isinstance(_self, Mapping): + key, item = item + cols["key"].append(key) + + # get id + id_ = getattr(item, "id", None) + include_id = getattr(item, "__syft_include_id_coll_repr__", True) + if id_ is not None and include_id: + cols["id"].append({"value": str(id_), "type": "clipboard"}) + + if type(item) == type: + t = full_name_with_qualname(item) + else: + try: + t = item.__class__.__name__ + except Exception: + t = item.__repr__() + + if not is_homogenous: + cols["type"].append(t) + + # if has _coll_repr_ + + if hasattr(item, "_coll_repr_"): + ret_val = item._coll_repr_() + if "id" in ret_val: + del ret_val["id"] + for key in ret_val.keys(): + cols[key].append(ret_val[key]) + else: + for field in extra_fields: + value = item + try: + attrs = field.split(".") + for i, attr in enumerate(attrs): + # find indexing like abc[1] + res = re.search(r"\[[+-]?\d+\]", attr) + has_index = False + if res: + has_index = True + index_str = res.group() + index = int(index_str.replace("[", "").replace("]", "")) + attr = attr.replace(index_str, "") + + value = getattr(value, attr, None) + if isinstance(value, list) and has_index: + value = value[index] + # If the object has a special representation when nested we will use that instead + if ( + hasattr(value, "__repr_syft_nested__") + and i == len(attrs) - 1 + ): + value = value.__repr_syft_nested__() + if ( + isinstance(value, list) + and i == len(attrs) - 1 + and len(value) > 0 + and hasattr(value[0], "__repr_syft_nested__") + ): + value = [ + ( + x.__repr_syft_nested__() + if hasattr(x, "__repr_syft_nested__") + else x + ) + for x in value + ] + if value is None: + value = "n/a" + + except Exception as e: + print(e) + value = None + cols[field].append(sanitize_html(str(value))) + + col_lengths = {len(cols[col]) for col in cols.keys()} + if len(col_lengths) != 1: + logger.debug("Cannot create table for items with different number of fields.") + return [] + + num_rows = col_lengths.pop() + if add_index and TABLE_INDEX_KEY not in cols: + cols[TABLE_INDEX_KEY] = list(range(num_rows)) + + # NOTE cannot use Pandas, not all values can be in a DataFrame (dict/list/...) + rows = [] + for i in range(num_rows): + row = {} + for col in cols.keys(): + row[col] = cols[col][i] + rows.append(row) + + return rows + + +def _sort_table_rows(rows: list[dict[str, Any]], sort_key: str) -> list[dict[str, Any]]: + try: + sort_values = [row[sort_key] for row in rows] + except KeyError: + # Not all rows have the sort_key, do not sort + return rows + + # relative + from ..types.datetime import DateTime + from ..types.datetime import str_is_datetime + + if all(isinstance(v, str) and str_is_datetime(v) for v in sort_values): + sort_values = [DateTime.from_str(v) for v in sort_values] + + reverse_sort = False + if isinstance(sort_values[0], DateTime): + sort_values = [d.utc_timestamp for d in sort_values] + reverse_sort = True + + rows_sorted = [ + row + for _, row in sorted( + zip(sort_values, rows), + reverse=reverse_sort, + key=lambda pair: pair[0], + ) + ] + + return rows_sorted + + +def prepare_table_data( + obj: Any, + add_index: bool = True, +) -> tuple[list[dict], dict]: + """ + Creates table data and metadata for a given object. + + If a tabular representation cannot be created, an empty list and empty dict are returned instead. + + Args: + obj (Any): The input object for which table data is prepared. + add_index (bool, optional): Whether to add an index column to the table. Defaults to True. + + Returns: + tuple: A tuple (table_data, table_metadata) where table_data is a list of dictionaries + where each dictionary represents a row in the table and table_metadata is a dictionary + containing metadata about the table such as name, icon, etc. + + """ + + values = _get_values_for_table_repr(obj) + if len(values) == 0: + return [], {} + + # check first value and obj itself to see if syft in mro. If not, don't create table + first_value = values[0] + if not _syft_in_mro(obj, first_value): + return [], {} + + extra_fields = getattr(first_value, "__repr_attrs__", []) + is_homogenous = len({type(x) for x in values}) == 1 + if is_homogenous: + sort_key = getattr(first_value, "__table_sort_attr__", None) or "created_at" + cls_name = first_value.__class__.__name__ + grid_template_columns, grid_template_cell_columns = _get_grid_template_columns( + first_value + ) + else: + sort_key = "created_at" + cls_name = "" + grid_template_columns = None + grid_template_cell_columns = None + + table_data = _create_table_rows( + _self=obj, + is_homogenous=is_homogenous, + extra_fields=extra_fields, + add_index=add_index, + ) + # if empty result, collection objects have no table representation + if not table_data: + return [], {} + + table_data = _sort_table_rows(table_data, sort_key) + + table_metadata = { + "name": f"{cls_name} {obj.__class__.__name__.capitalize()}", + "columns": list(table_data[0].keys()), + "icon": getattr(first_value, "icon", None), + "grid_template_columns": grid_template_columns, + "grid_template_cell_columns": grid_template_cell_columns, + } + + return table_data, table_metadata + + +def itable_template_from_df(df: pd.DataFrame, itable_css: str | None = None) -> str: + """ + Generate an itable template from a pandas DataFrame. + + The itable template contains a JSON string that can be used to render an itable downstream + by the patched ipython. + + Args: + df (pd.DataFrame): The DataFrame to generate the template from. + itable_css (str | None, optional): The CSS styles to apply to the itable template. Defaults to None. + + Returns: + str: The generated itable template as a string. + + """ + itable_template = f""" + {json.dumps({"columns": df.columns.tolist(), + "data": df.values.tolist(), + "itable_css": itable_css})} + """ + return itable_template + + +def render_itable_template(itable_str: str) -> str: + """ + Renders an itable template string into an HTML table using itables.to_html_datatable. + + Args: + itable_str (str): The itable template string to render. + + Returns: + str: The rendered HTML table. + + """ + + df, itable_css = _extract_df_from_itable_template(itable_str) + if itable_css: + return itables.to_html_datatable(df=df, css=itable_css) + else: + return itables.to_html_datatable(df=df) + + +def _extract_df_from_itable_template( + itable_str: str, +) -> tuple[pd.DataFrame, str | None]: + """ + Extracts a DataFrame and CSS styles from an itable template string. + + Args: + itable_str (str): The itable template string to extract from. + + Returns: + tuple[pd.DataFrame, str | None]: A tuple containing the extracted DataFrame and the CSS styles. + - The DataFrame is created using the columns and data from the itable template. + - The CSS styles are extracted from the itable template, or None if not present. + + """ + json_data = json.loads(itable_str) + extracted_df = pd.DataFrame(columns=json_data["columns"], data=json_data["data"]) + return extracted_df, json_data.get("itable_css", None) diff --git a/packages/syft/src/syft/util/telemetry.py b/packages/syft/src/syft/util/telemetry.py index 32a57dd0534..e0449dad92c 100644 --- a/packages/syft/src/syft/util/telemetry.py +++ b/packages/syft/src/syft/util/telemetry.py @@ -1,79 +1,139 @@ # stdlib from collections.abc import Callable +import logging import os from typing import Any +from typing import TYPE_CHECKING from typing import TypeVar +# relative +from .. import __version__ +from .util import str_to_bool -def str_to_bool(bool_str: str | None) -> bool: - result = False - bool_str = str(bool_str).lower() - if bool_str == "true" or bool_str == "1": - result = True - return result +__all__ = [ + "TRACING_ENABLED", + "instrument", + "instrument_fastapi", + "instrument_botocore", +] +TRACING_ENABLED = str_to_bool(os.environ.get("TRACING", "False")) +logger = logging.getLogger(__name__) -TRACE_MODE = str_to_bool(os.environ.get("TRACE", "False")) +T = TypeVar("T", bound=Callable | type) -T = TypeVar("T", bound=Callable | type) +def no_instrument(__func_or_class: T | None = None, /, *args: Any, **kwargs: Any) -> T: + def noop_wrapper(__func_or_class: T) -> T: + return __func_or_class + if __func_or_class is None: + return noop_wrapper # type: ignore + else: + return __func_or_class -def noop(__func_or_class: T, /, *args: Any, **kwargs: Any) -> T: - return __func_or_class +def setup_instrumenter() -> Any: + if not TRACING_ENABLED: + return no_instrument -if not TRACE_MODE: - instrument = noop -else: try: - print("OpenTelemetry Tracing enabled") - service_name = os.environ.get("SERVICE_NAME", "client") - jaeger_host = os.environ.get("JAEGER_HOST", "localhost") - jaeger_port = int(os.environ.get("JAEGER_PORT", "14268")) - # third party from opentelemetry import trace - from opentelemetry.exporter.jaeger.thrift import JaegerExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter, + ) + from opentelemetry.sdk.resources import OTELResourceDetector + from opentelemetry.sdk.resources import ProcessResourceDetector from opentelemetry.sdk.resources import Resource - from opentelemetry.sdk.resources import SERVICE_NAME from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor - trace.set_tracer_provider( - TracerProvider(resource=Resource.create({SERVICE_NAME: service_name})) - ) - jaeger_exporter = JaegerExporter( - # agent_host_name=jaeger_host, - # agent_port=jaeger_port, - collector_endpoint=f"http://{jaeger_host}:{jaeger_port}/api/traces?format=jaeger.thrift", - # udp_split_oversized_batches=True, - ) + # relative + from .trace_decorator import instrument - trace.get_tracer_provider().add_span_processor( - BatchSpanProcessor(jaeger_exporter) - ) + # create a resource + resource = Resource({"syft.version": __version__}) + resource = resource.merge(OTELResourceDetector().detect()) + resource = resource.merge(ProcessResourceDetector().detect()) + logger.debug(f"OTEL resource : {resource.__dict__}") - # from opentelemetry.sdk.trace.export import ConsoleSpanExporter - # console_exporter = ConsoleSpanExporter() - # span_processor = BatchSpanProcessor(console_exporter) - # trace.get_tracer_provider().add_span_processor(span_processor) + # create a trace provider from the resource + provider = TracerProvider(resource=resource) + # create a span processor + otlp_exporter = OTLPSpanExporter() + span_processor = BatchSpanProcessor(otlp_exporter) + provider.add_span_processor(span_processor) + + # set the global trace provider + trace.set_tracer_provider(provider) + + logger.info("Added TracerProvider with BatchSpanProcessor") + return instrument + except Exception as e: + logger.error("Failed to import opentelemetry", exc_info=e) + return no_instrument + + +def instrument_fastapi(app: Any) -> None: + if not TRACING_ENABLED: + return + + try: # third party - import opentelemetry.instrumentation.requests + from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor - opentelemetry.instrumentation.requests.RequestsInstrumentor().instrument() + FastAPIInstrumentor().instrument_app(app) + logger.info("Added OTEL FastAPIInstrumentor") + except Exception as e: + logger.error(f"Failed to load FastAPIInstrumentor. {e}") - # relative - # from opentelemetry.instrumentation.digma.trace_decorator import ( - # instrument as _instrument, - # ) - # - # until this is merged: - # https://github.com/digma-ai/opentelemetry-instrumentation-digma/pull/41 - from .trace_decorator import instrument as _instrument - - instrument = _instrument - except Exception: # nosec - print("Failed to import opentelemetry") - instrument = noop + +def instrument_botocore() -> None: + if not TRACING_ENABLED: + return + + try: + # third party + from opentelemetry.instrumentation.botocore import BotocoreInstrumentor + + BotocoreInstrumentor().instrument() + logger.info("Added OTEL BotocoreInstrumentor") + except Exception as e: + logger.error(f"Failed to load BotocoreInstrumentor. {e}") + + +def instrument_threads() -> None: + if not TRACING_ENABLED: + return + + try: + # third party + from opentelemetry.instrumentation.threading import ThreadingInstrumentor + + ThreadingInstrumentor().instrument() + logger.info("Added OTEL ThreadingInstrumentor") + except Exception as e: + logger.error(f"Failed to load ThreadingInstrumentor. {e}") + + +def instrument_sqlalchemny() -> None: + if not TRACING_ENABLED: + return + + try: + # third party + from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor + + SQLAlchemyInstrumentor().instrument(enable_commenter=True, commenter_options={}) + logger.info("Added OTEL SQLAlchemyInstrumentor") + except Exception as e: + logger.error(f"Failed to load SQLAlchemyInstrumentor. {e}") + + +if TYPE_CHECKING: + # To let static type checker know the returntype of instrument decorators + instrument = no_instrument +else: + instrument = setup_instrumenter() diff --git a/packages/syft/src/syft/util/test_helpers/__init__.py b/packages/syft/src/syft/util/test_helpers/__init__.py new file mode 100644 index 00000000000..daa55d0a327 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/__init__.py @@ -0,0 +1,16 @@ +def make_nb_logger(name="notebook"): + # stdlib + import logging + + logger = logging.getLogger() + file_logging = logging.FileHandler(f"{name}.log") + file_logging.setFormatter( + logging.Formatter( + "%(asctime)s - pid-%(process)d tid-%(thread)d - %(levelname)s - %(name)s - %(message)s" + ) + ) + file_logging.setLevel(logging.INFO) + + logger.addHandler(file_logging) + logger.setLevel(logging.INFO) + return logger diff --git a/packages/syft/src/syft/util/test_helpers/apis/__init__.py b/packages/syft/src/syft/util/test_helpers/apis/__init__.py new file mode 100644 index 00000000000..e8221857fba --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/__init__.py @@ -0,0 +1,21 @@ +# stdlib +import os + +# relative +from ...util import str_to_bool +from .submit_query import make_submit_query + +env_var = "TEST_BIGQUERY_APIS_LIVE" +use_live = str_to_bool(str(os.environ.get(env_var, "False"))) +env_name = "Live" if use_live else "Mock" +print(f"Using {env_name} API Code, this will query BigQuery. ${env_var}=={use_live}") + + +if use_live: + # relative + from .live.schema import make_schema + from .live.test_query import make_test_query +else: + # relative + from .mock.schema import make_schema + from .mock.test_query import make_test_query diff --git a/packages/syft/src/syft/util/test_helpers/apis/live/__init__.py b/packages/syft/src/syft/util/test_helpers/apis/live/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/util/test_helpers/apis/live/schema.py b/packages/syft/src/syft/util/test_helpers/apis/live/schema.py new file mode 100644 index 00000000000..2cae94b21c6 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/live/schema.py @@ -0,0 +1,108 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..... import test_settings +from ..rate_limiter import is_within_rate_limit + + +def make_schema(settings: dict, worker_pool_name: str) -> Callable: + updated_settings = { + "calls_per_min": 5, + "rate_limiter_enabled": True, + "credentials": test_settings.gce_service_account.to_dict(), + "region": test_settings.gce_region, + "project_id": test_settings.gce_project_id, + "dataset_1": test_settings.dataset_1, + "table_1": test_settings.table_1, + "table_2": test_settings.table_2, + } | settings + + @sy.api_endpoint( + path="bigquery.schema", + description="This endpoint allows for visualising the metadata of tables available in BigQuery.", + settings=updated_settings, + helper_functions=[ + is_within_rate_limit + ], # Adds ratelimit as this is also a method available to data scientists + worker_pool_name=worker_pool_name, + ) + def live_schema( + context, + ) -> str: + # stdlib + import datetime + + # third party + from google.cloud import bigquery # noqa: F811 + from google.oauth2 import service_account + import pandas as pd + + # syft absolute + from syft import SyftException + + # Auth for Bigquer based on the workload identity + credentials = service_account.Credentials.from_service_account_info( + context.settings["credentials"] + ) + scoped_credentials = credentials.with_scopes( + ["https://www.googleapis.com/auth/cloud-platform"] + ) + + client = bigquery.Client( + credentials=scoped_credentials, + location=context.settings["region"], + ) + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + try: + # Formats the data schema in a data frame format + # Warning: the only supported format types are primitives, np.ndarrays and pd.DataFrames + + data_schema = [] + for table_id in [ + f"{context.settings['dataset_1']}.{context.settings['table_1']}", + f"{context.settings['dataset_1']}.{context.settings['table_2']}", + ]: + table = client.get_table(table_id) + for schema in table.schema: + data_schema.append( + { + "project": str(table.project), + "dataset_id": str(table.dataset_id), + "table_id": str(table.table_id), + "schema_name": str(schema.name), + "schema_field": str(schema.field_type), + "description": str(table.description), + "num_rows": str(table.num_rows), + } + ) + return pd.DataFrame(data_schema) + + except Exception as e: + # not a bigquery exception + if not hasattr(e, "_errors"): + output = f"got exception e: {type(e)} {str(e)}" + raise SyftException( + public_message=f"An error occured executing the API call {output}" + ) + + # Should add appropriate error handling for what should be exposed to the data scientists. + raise SyftException( + public_message="An error occured executing the API call, please contact the domain owner." + ) + + return live_schema diff --git a/packages/syft/src/syft/util/test_helpers/apis/live/test_query.py b/packages/syft/src/syft/util/test_helpers/apis/live/test_query.py new file mode 100644 index 00000000000..cca61eae533 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/live/test_query.py @@ -0,0 +1,113 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..... import test_settings +from ..rate_limiter import is_within_rate_limit + + +def make_test_query(settings) -> Callable: + updated_settings = { + "calls_per_min": 10, + "rate_limiter_enabled": True, + "credentials": test_settings.gce_service_account.to_dict(), + "region": test_settings.gce_region, + "project_id": test_settings.gce_project_id, + } | settings + + # these are the same if you allow the rate limiter to be turned on and off + @sy.api_endpoint_method( + settings=updated_settings, + helper_functions=[is_within_rate_limit], + ) + def live_test_query( + context, + sql_query: str, + ) -> str: + # stdlib + import datetime + + # third party + from google.cloud import bigquery # noqa: F811 + from google.oauth2 import service_account + + # syft absolute + from syft import SyftException + + # Auth for Bigquer based on the workload identity + credentials = service_account.Credentials.from_service_account_info( + context.settings["credentials"] + ) + scoped_credentials = credentials.with_scopes( + ["https://www.googleapis.com/auth/cloud-platform"] + ) + + client = bigquery.Client( + credentials=scoped_credentials, + location=context.settings["region"], + ) + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + try: + rows = client.query_and_wait( + sql_query, + project=context.settings["project_id"], + ) + + if rows.total_rows > 1_000_000: + raise SyftException( + public_message="Please only write queries that gather aggregate statistics" + ) + + return rows.to_dataframe() + + except Exception as e: + # not a bigquery exception + if not hasattr(e, "_errors"): + output = f"got exception e: {type(e)} {str(e)}" + raise SyftException( + public_message=f"An error occured executing the API call {output}" + ) + + # Treat all errors that we would like to be forwarded to the data scientists + # By default, any exception is only visible to the data owner. + + if e._errors[0]["reason"] in [ + "badRequest", + "blocked", + "duplicate", + "invalidQuery", + "invalid", + "jobBackendError", + "jobInternalError", + "notFound", + "notImplemented", + "rateLimitExceeded", + "resourceInUse", + "resourcesExceeded", + "tableUnavailable", + "timeout", + ]: + raise SyftException( + public_message="Error occured during the call: " + + e._errors[0]["message"] + ) + else: + raise SyftException( + public_message="An error occured executing the API call, please contact the domain owner." + ) + + return live_test_query diff --git a/packages/syft/src/syft/util/test_helpers/apis/mock/__init__.py b/packages/syft/src/syft/util/test_helpers/apis/mock/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syft/src/syft/util/test_helpers/apis/mock/data.py b/packages/syft/src/syft/util/test_helpers/apis/mock/data.py new file mode 100644 index 00000000000..82262bf7a01 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/mock/data.py @@ -0,0 +1,268 @@ +# stdlib +from math import nan + +schema_dict = { + "project": { + 0: "example-project", + 1: "example-project", + 2: "example-project", + 3: "example-project", + 4: "example-project", + 5: "example-project", + 6: "example-project", + 7: "example-project", + 8: "example-project", + 9: "example-project", + 10: "example-project", + 11: "example-project", + 12: "example-project", + 13: "example-project", + 14: "example-project", + 15: "example-project", + 16: "example-project", + 17: "example-project", + 18: "example-project", + 19: "example-project", + 20: "example-project", + 21: "example-project", + 22: "example-project", + }, + "dataset_id": { + 0: "test_1gb", + 1: "test_1gb", + 2: "test_1gb", + 3: "test_1gb", + 4: "test_1gb", + 5: "test_1gb", + 6: "test_1gb", + 7: "test_1gb", + 8: "test_1gb", + 9: "test_1gb", + 10: "test_1gb", + 11: "test_1gb", + 12: "test_1gb", + 13: "test_1gb", + 14: "test_1gb", + 15: "test_1gb", + 16: "test_1gb", + 17: "test_1gb", + 18: "test_1gb", + 19: "test_1gb", + 20: "test_1gb", + 21: "test_1gb", + 22: "test_1gb", + }, + "table_id": { + 0: "posts", + 1: "posts", + 2: "posts", + 3: "posts", + 4: "posts", + 5: "posts", + 6: "posts", + 7: "comments", + 8: "comments", + 9: "comments", + 10: "comments", + 11: "comments", + 12: "comments", + 13: "comments", + 14: "comments", + 15: "comments", + 16: "comments", + 17: "comments", + 18: "comments", + 19: "comments", + 20: "comments", + 21: "comments", + 22: "comments", + }, + "schema_name": { + 0: "int64_field_0", + 1: "id", + 2: "name", + 3: "subscribers_count", + 4: "permalink", + 5: "nsfw", + 6: "spam", + 7: "int64_field_0", + 8: "id", + 9: "body", + 10: "parent_id", + 11: "created_at", + 12: "last_modified_at", + 13: "gilded", + 14: "permalink", + 15: "score", + 16: "comment_id", + 17: "post_id", + 18: "author_id", + 19: "spam", + 20: "deleted", + 21: "upvote_raio", + 22: "collapsed_in_crowd_control", + }, + "schema_field": { + 0: "INTEGER", + 1: "STRING", + 2: "STRING", + 3: "INTEGER", + 4: "STRING", + 5: "FLOAT", + 6: "BOOLEAN", + 7: "INTEGER", + 8: "STRING", + 9: "STRING", + 10: "STRING", + 11: "INTEGER", + 12: "INTEGER", + 13: "BOOLEAN", + 14: "STRING", + 15: "INTEGER", + 16: "STRING", + 17: "STRING", + 18: "STRING", + 19: "BOOLEAN", + 20: "BOOLEAN", + 21: "FLOAT", + 22: "BOOLEAN", + }, + "description": { + 0: "None", + 1: "None", + 2: "None", + 3: "None", + 4: "None", + 5: "None", + 6: "None", + 7: "None", + 8: "None", + 9: "None", + 10: "None", + 11: "None", + 12: "None", + 13: "None", + 14: "None", + 15: "None", + 16: "None", + 17: "None", + 18: "None", + 19: "None", + 20: "None", + 21: "None", + 22: "None", + }, + "num_rows": { + 0: "2000000", + 1: "2000000", + 2: "2000000", + 3: "2000000", + 4: "2000000", + 5: "2000000", + 6: "2000000", + 7: "2000000", + 8: "2000000", + 9: "2000000", + 10: "2000000", + 11: "2000000", + 12: "2000000", + 13: "2000000", + 14: "2000000", + 15: "2000000", + 16: "2000000", + 17: "2000000", + 18: "2000000", + 19: "2000000", + 20: "2000000", + 21: "2000000", + 22: "2000000", + }, +} + + +query_dict = { + "int64_field_0": { + 0: 4, + 1: 5, + 2: 10, + 3: 16, + 4: 17, + 5: 23, + 6: 24, + 7: 25, + 8: 27, + 9: 40, + }, + "id": { + 0: "t5_via1x", + 1: "t5_cv9gn", + 2: "t5_8p2tq", + 3: "t5_8fcro", + 4: "t5_td5of", + 5: "t5_z01fv", + 6: "t5_hmqjk", + 7: "t5_1flyj", + 8: "t5_5rwej", + 9: "t5_uurcv", + }, + "name": { + 0: "/channel/mylittlepony", + 1: "/channel/polyamory", + 2: "/channel/Catholicism", + 3: "/channel/cordcutters", + 4: "/channel/stevenuniverse", + 5: "/channel/entitledbitch", + 6: "/channel/engineering", + 7: "/channel/nottheonion", + 8: "/channel/FoodPorn", + 9: "/channel/puppysmiles", + }, + "subscribers_count": { + 0: 4323081, + 1: 2425929, + 2: 4062607, + 3: 7543226, + 4: 2692168, + 5: 2709080, + 6: 8766144, + 7: 2580984, + 8: 7784809, + 9: 3715991, + }, + "permalink": { + 0: "/channel//channel/mylittlepony", + 1: "/channel//channel/polyamory", + 2: "/channel//channel/Catholicism", + 3: "/channel//channel/cordcutters", + 4: "/channel//channel/stevenuniverse", + 5: "/channel//channel/entitledbitch", + 6: "/channel//channel/engineering", + 7: "/channel//channel/nottheonion", + 8: "/channel//channel/FoodPorn", + 9: "/channel//channel/puppysmiles", + }, + "nsfw": { + 0: nan, + 1: nan, + 2: nan, + 3: nan, + 4: nan, + 5: nan, + 6: nan, + 7: nan, + 8: nan, + 9: nan, + }, + "spam": { + 0: False, + 1: False, + 2: False, + 3: False, + 4: False, + 5: False, + 6: False, + 7: False, + 8: False, + 9: False, + }, +} diff --git a/packages/syft/src/syft/util/test_helpers/apis/mock/schema.py b/packages/syft/src/syft/util/test_helpers/apis/mock/schema.py new file mode 100644 index 00000000000..f79281d2dc2 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/mock/schema.py @@ -0,0 +1,56 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..rate_limiter import is_within_rate_limit +from .data import schema_dict + + +def make_schema( + settings, + worker_pool_name, + path="bigquery.schema", +) -> Callable: + updated_settings = { + "calls_per_min": 5, + "rate_limiter_enabled": True, + "schema_dict": schema_dict, + } | settings + + @sy.api_endpoint( + path=path, + description="This endpoint allows for visualising the metadata of tables available in BigQuery.", + settings=updated_settings, + helper_functions=[is_within_rate_limit], + worker_pool_name=worker_pool_name, + ) + def mock_schema( + context, + ) -> str: + # syft absolute + from syft import SyftException + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + # stdlib + import datetime + + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + # third party + import pandas as pd + + df = pd.DataFrame(context.settings["schema_dict"]) + return df + + return mock_schema diff --git a/packages/syft/src/syft/util/test_helpers/apis/mock/test_query.py b/packages/syft/src/syft/util/test_helpers/apis/mock/test_query.py new file mode 100644 index 00000000000..ae028a8cf36 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/mock/test_query.py @@ -0,0 +1,138 @@ +# stdlib +from collections.abc import Callable + +# syft absolute +import syft as sy + +# relative +from ..rate_limiter import is_within_rate_limit +from .data import query_dict + + +def extract_limit_value(sql_query: str) -> int: + # stdlib + import re + + limit_pattern = re.compile(r"\bLIMIT\s+(\d+)\b", re.IGNORECASE) + match = limit_pattern.search(sql_query) + if match: + return int(match.group(1)) + return None + + +def is_valid_sql(query: str) -> bool: + # stdlib + import sqlite3 + + # Prepare an in-memory SQLite database + conn = sqlite3.connect(":memory:") + cursor = conn.cursor() + + try: + # Use the EXPLAIN QUERY PLAN command to get the query plan + cursor.execute(f"EXPLAIN QUERY PLAN {query}") + except sqlite3.Error as e: + if "no such table" in str(e).lower(): + return True + return False + finally: + conn.close() + + +def adjust_dataframe_rows(df, target_rows: int): + # third party + import pandas as pd + + current_rows = len(df) + + if target_rows > current_rows: + # Repeat rows to match target_rows + repeat_times = (target_rows + current_rows - 1) // current_rows + df_expanded = pd.concat([df] * repeat_times, ignore_index=True).head( + target_rows + ) + else: + # Truncate rows to match target_rows + df_expanded = df.head(target_rows) + + return df_expanded + + +def make_test_query(settings: dict) -> Callable: + updated_settings = { + "calls_per_min": 10, + "rate_limiter_enabled": True, + "query_dict": query_dict, + } | settings + + # these are the same if you allow the rate limiter to be turned on and off + @sy.api_endpoint_method( + settings=updated_settings, + helper_functions=[ + is_within_rate_limit, + extract_limit_value, + is_valid_sql, + adjust_dataframe_rows, + ], + ) + def mock_test_query( + context, + sql_query: str, + ) -> str: + # stdlib + import datetime + + # third party + from google.api_core.exceptions import BadRequest + + # syft absolute + from syft import SyftException + + # Store a dict with the calltimes for each user, via the email. + if context.settings["rate_limiter_enabled"]: + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + raise SyftException( + public_message="Rate limit of calls per minute has been reached." + ) + context.state[context.user.email].append(datetime.datetime.now()) + + bad_table = "invalid_table" + bad_post = ( + "BadRequest: 400 POST " + "https://bigquery.googleapis.com/bigquery/v2/projects/project-id/" + "queries?prettyPrint=false: " + ) + if bad_table in sql_query: + try: + raise BadRequest( + f'{bad_post} Table "{bad_table}" must be qualified ' + "with a dataset (e.g. dataset.table)." + ) + except Exception as e: + raise SyftException( + public_message=f"*must be qualified with a dataset*. {e}" + ) + + if not context.code.is_valid_sql(sql_query): + raise BadRequest( + f'{bad_post} Syntax error: Unexpected identifier "{sql_query}" at [1:1]' + ) + + # third party + import pandas as pd + + limit = context.code.extract_limit_value(sql_query) + if limit > 1_000_000: + raise SyftException( + public_message="Please only write queries that gather aggregate statistics" + ) + + base_df = pd.DataFrame(context.settings["query_dict"]) + + df = context.code.adjust_dataframe_rows(base_df, limit) + return df + + return mock_test_query diff --git a/packages/syft/src/syft/util/test_helpers/apis/rate_limiter.py b/packages/syft/src/syft/util/test_helpers/apis/rate_limiter.py new file mode 100644 index 00000000000..8ce319b61f4 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/rate_limiter.py @@ -0,0 +1,16 @@ +def is_within_rate_limit(context) -> bool: + """Rate limiter for custom API calls made by users.""" + # stdlib + import datetime + + state = context.state + settings = context.settings + email = context.user.email + + current_time = datetime.datetime.now() + calls_last_min = [ + 1 if (current_time - call_time).seconds < 60 else 0 + for call_time in state[email] + ] + + return sum(calls_last_min) < settings.get("calls_per_min", 5) diff --git a/packages/syft/src/syft/util/test_helpers/apis/submit_query.py b/packages/syft/src/syft/util/test_helpers/apis/submit_query.py new file mode 100644 index 00000000000..ee53cd7e0b6 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/apis/submit_query.py @@ -0,0 +1,42 @@ +# syft absolute +import syft as sy + + +def make_submit_query(settings, worker_pool_name): + updated_settings = {"user_code_worker": worker_pool_name} | settings + + @sy.api_endpoint( + path="bigquery.submit_query", + description="API endpoint that allows you to submit SQL queries to run on the private data.", + worker_pool_name=worker_pool_name, + settings=updated_settings, + ) + def submit_query( + context, + func_name: str, + query: str, + ) -> str: + # syft absolute + import syft as sy + + @sy.syft_function( + name=func_name, + input_policy=sy.MixedInputPolicy( + endpoint=sy.Constant( + val=context.user_client.api.services.bigquery.test_query + ), + query=sy.Constant(val=query), + client=context.user_client, + ), + worker_pool_name=context.settings["user_code_worker"], + ) + def execute_query(query: str, endpoint): + res = endpoint(sql_query=query) + return res + + request = context.user_client.code.request_code_execution(execute_query) + context.admin_client.requests.set_tags(request, ["autosync"]) + + return f"Query submitted {request}. Use `client.code.{func_name}()` to run your query" + + return submit_query diff --git a/packages/syft/src/syft/util/test_helpers/checkpoint.py b/packages/syft/src/syft/util/test_helpers/checkpoint.py new file mode 100644 index 00000000000..599e1d62c43 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/checkpoint.py @@ -0,0 +1,202 @@ +# stdlib +import os +from pathlib import Path +import tempfile +import zipfile + +# syft absolute +from syft import SyftError +from syft import SyftException +from syft.client.client import SyftClient +from syft.service.user.user_roles import ServiceRole +from syft.util.util import get_root_data_path + +# relative +from ...server.env import get_default_root_email +from ...server.env import get_default_root_password +from .worker_helpers import build_and_push_image + +CHECKPOINT_ROOT = "checkpoints" +CHECKPOINT_DIR_PREFIX = "chkpt" +DEFAULT_CHECKPOINT_DIR = get_root_data_path() / CHECKPOINT_ROOT + +try: + # Ensure the default checkpoint path exists always + DEFAULT_CHECKPOINT_DIR.mkdir(parents=True, exist_ok=True) +except Exception as e: + print(f"Error creating default checkpoint directory: {e}") + + +def is_admin(client: SyftClient) -> bool: + return client._SyftClient__user_role == ServiceRole.ADMIN + + +def is_valid_dir(path: Path | str) -> Path: + if isinstance(path, str): + path = Path(path) + if not path.is_dir(): + raise SyftException(f"Path {path} is not a directory.") + return path + + +def is_valid_file(path: Path | str) -> Path: + if isinstance(path, str): + path = Path(path) + if not path.is_file(): + raise SyftException(f"Path {path} is not a file.") + return path + + +def create_checkpoint( + name: str, # Name of the checkpoint + client: SyftClient, + chkpt_dir: Path | str = DEFAULT_CHECKPOINT_DIR, + root_email: str | None = None, + root_pwd: str | None = None, +) -> None: + """Save a checkpoint for the database.""" + + is_valid_dir(chkpt_dir) + + if root_email is None: + root_email = get_default_root_email() + + if root_pwd is None: + root_pwd = get_default_root_password() + + root_client = ( + client + if is_admin(client) + else client.login(email=root_email, password=root_pwd) + ) + migration_data = root_client.get_migration_data(include_blobs=True) + + if isinstance(migration_data, SyftError): + raise SyftException(message=migration_data.message) + + checkpoint_path = chkpt_dir / f"{name}.zip" + + # get a temporary directory to save the checkpoint + temp_dir = Path(tempfile.mkdtemp()) + checkpoint_blob = temp_dir / "checkpoint.blob" + checkpoint_yaml = temp_dir / "checkpoint.yaml" + migration_data.save( + path=checkpoint_blob, + yaml_path=checkpoint_yaml, + ) + + # Combine the files into a single zip file to checkpoint_path + with zipfile.ZipFile(checkpoint_path, "w") as zipf: + zipf.write(checkpoint_blob, "checkpoint.blob") + zipf.write(checkpoint_yaml, "checkpoint.yaml") + + print(f"Checkpoint saved at: \n {checkpoint_path}") + + +def get_checkpoint_for( + path: Path | str | None = None, chkpt_name: str | None = None +) -> Path | None: + # Path takes precedence over name + if path: + return is_valid_file(path) + + if chkpt_name: + return is_valid_file(DEFAULT_CHECKPOINT_DIR / f"{chkpt_name}.zip") + + +def get_registry_credentials() -> tuple[str, str]: + return os.environ.get("REGISTRY_USERNAME", ""), os.environ.get( + "REGISTRY_PASSWORD", "" + ) + + +def load_from_checkpoint( + client: SyftClient, + name: str | None = None, + path: Path | str | None = None, + root_email: str | None = None, + root_password: str | None = None, + registry_username: str | None = None, + registry_password: str | None = None, +) -> None: + """Load the last saved checkpoint for the given checkpoint state.""" + + root_email = "info@openmined.org" if root_email is None else root_email + root_password = "changethis" if root_password is None else root_password + + root_client = ( + client + if is_admin(client) + else client.login(email=root_email, password=root_password) + ) + if name is None and path is None: + raise SyftException("Please provide either a checkpoint name or a path.") + + checkpoint_zip_path = get_checkpoint_for(path=path, chkpt_name=name) + + if checkpoint_zip_path is None: + print(f"No last checkpoint found for : {name} or {path}") + return + + # Unzip the checkpoint zip file + with zipfile.ZipFile(checkpoint_zip_path, "r") as zipf: + checkpoint_temp_dir = Path(tempfile.mkdtemp()) + zipf.extract("checkpoint.blob", checkpoint_temp_dir) + zipf.extract("checkpoint.yaml", checkpoint_temp_dir) + + checkpoint_blob = checkpoint_temp_dir / "checkpoint.blob" + + print(f"Loading from checkpoint: {checkpoint_zip_path}") + result = root_client.load_migration_data( + path_or_data=checkpoint_blob, + include_worker_pools=True, + with_reset_db=True, + ) + + if isinstance(result, SyftError): + raise SyftException(message=result.message) + + print("Successfully loaded data from checkpoint.") + + # Step 1: Build and push the worker images + + print("Recreating worker images from checkpoint.") + worker_image_list = ( + [] if root_client.images.get_all() is None else root_client.images.get_all() + ) + for worker_image in worker_image_list: + if worker_image.is_prebuilt: + continue + + registry = worker_image.image_identifier.registry + + build_and_push_image( + root_client, + worker_image, + registry_uid=registry.id if registry else None, + tag=worker_image.image_identifier.repo_with_tag, + reg_password=registry_username, + reg_username=registry_password, + force_build=True, + ) + + print("Successfully Built worker image data from checkpoint.") + + # Step 2: Recreate the worker pools + print("Recreating worker pools from checkpoint.") + worker_pool_list = ( + [] if root_client.worker_pools is None else root_client.worker_pools + ) + for worker_pool in worker_pool_list: + previous_worker_cnt = worker_pool.max_count + purge_res = root_client.worker_pools.purge_workers(pool_id=worker_pool.id) + print(purge_res) + add_res = root_client.worker_pools.add_workers( + number=previous_worker_cnt, + pool_id=worker_pool.id, + registry_username=registry_username, + registry_password=registry_password, + ) + print(add_res) + + print("Successfully loaded worker pool data from checkpoint.") diff --git a/packages/syft/src/syft/util/test_helpers/email_helpers.py b/packages/syft/src/syft/util/test_helpers/email_helpers.py new file mode 100644 index 00000000000..950ced27375 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/email_helpers.py @@ -0,0 +1,342 @@ +# stdlib +from dataclasses import dataclass +from dataclasses import field +import json +import re +import time +from typing import Any + +# third party +from aiosmtpd.controller import Controller +from faker import Faker +from filelock import FileLock + +# relative +from ...service.user.user_roles import ServiceRole + +fake = Faker() + + +@dataclass +class Email: + email_from: str + email_to: str + email_content: str + + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + output[k] = v + return output + + def __iter__(self): + yield from self.to_dict().items() + + def __getitem__(self, key): + return self.to_dict()[key] + + def __repr__(self) -> str: + return f"{self.email_to}\n{self.email_from}\n\n{self.email_content}" + + +class EmailServer: + def __init__(self, filepath="emails.json"): + self.filepath = filepath + lockpath = self.filepath + ".lock" + self._lock = FileLock(lock_file=lockpath) + self._emails: dict[str, list[Email]] = self.load_emails() + + def load_emails(self) -> dict[str, list[Email]]: + try: + with ( + self._lock as _, + open(self.filepath) as f, + ): + data = json.load(f) + return {k: [Email(**email) for email in v] for k, v in data.items()} + except Exception as e: + print("Issues reading email file. Using empty email dict.", e) + return {} + + def save_emails(self) -> None: + with ( + self._lock as _, + open(self.filepath, "w") as f, + ): + data = { + k: [email.to_dict() for email in v] for k, v in self._emails.items() + } + f.write(json.dumps(data)) + + def add_email_for_user(self, user_email: str, email: Email) -> None: + with self._lock: + if user_email not in self._emails: + self._emails[user_email] = [] + self._emails[user_email].append(email) + self.save_emails() + + def get_emails_for_user(self, user_email: str) -> list[Email]: + self._emails: dict[str, list[Email]] = self.load_emails() + return self._emails.get(user_email, []) + + def reset_emails(self) -> None: + with self._lock: + self._emails = {} + self.save_emails() + + +SENDER = "noreply@openmined.org" + + +def get_token(email) -> str: + # stdlib + import re + + pattern = r"syft_client\.reset_password\(token='(.*?)', new_password=.*?\)" + try: + token = re.search(pattern, email.email_content).group(1) + except Exception: + raise Exception(f"No token found in email: {email.email_content}") + return token + + +@dataclass +class TestUser: + name: str + email: str + password: str + role: ServiceRole + new_password: str | None = None + email_disabled: bool = False + reset_password: bool = False + reset_token: str | None = None + _client_cache: Any | None = field(default=None, repr=False, init=False) + _email_server: EmailServer | None = None + + @property + def latest_password(self) -> str: + if self.new_password: + return self.new_password + return self.password + + def make_new_password(self) -> str: + self.new_password = fake.password() + return self.new_password + + @property + def client(self): + return self._client_cache + + def relogin(self) -> None: + self.client = self.client + + @client.setter + def client(self, client): + this_client = client.login(email=self.email, password=self.latest_password) + self._client_cache = this_client + + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + if k.startswith("_"): + continue + if k == "role": + v = str(v) + output[k] = v + return output + + def __iter__(self): + for key, val in self.to_dict().items(): + if not key.startswith("_"): + yield key, val + + def __getitem__(self, key): + if key.startswith("_"): + return None + return self.to_dict()[key] + + def update_password(self): + self.password = self.new_password + self.new_password = None + + @property + def emails(self) -> list[Email]: + if not self._email_server: + print("Not connected to email server object") + return [] + return self._email_server.get_emails_for_user(self.email) + + def get_token(self) -> str: + for email in reversed(self.emails): + token = None + try: + token = get_token(email) + break + except Exception: # nosec + pass + self.reset_token = token + return token + + +def save_users(users): + user_dicts = [] + for user in users: + user_dicts.append(user.to_dict()) + print(user_dicts) + with open("./users.json", "w") as f: + f.write(json.dumps(user_dicts)) + + +def load_users(high_client: None, path="./users.json"): + users = [] + with open(path) as f: + data = f.read() + user_dicts = json.loads(data) + for user in user_dicts: + test_user = TestUser(**user) + if high_client: + test_user.client = high_client + users.append(test_user) + return users + + +def make_user( + name: str | None = None, + email: str | None = None, + password: str | None = None, + role: ServiceRole = ServiceRole.DATA_SCIENTIST, +): + fake = Faker() + if name is None: + name = fake.name() + if email is None: + ascii_string = re.sub(r"[^a-zA-Z\s]", "", name).lower() + dashed_string = ascii_string.replace(" ", "-") + email = f"{dashed_string}-fake@openmined.org" + if password is None: + password = fake.password() + + return TestUser(name=name, email=email, password=password, role=role) + + +def user_exists(root_client, email: str) -> bool: + users = root_client.api.services.user + for user in users: + if user.email == email: + return True + return False + + +class SMTPTestServer: + def __init__(self, email_server, port=9025, ready_timeout=5): + self.port = port + self.hostname = "0.0.0.0" # nosec: B104 + self.controller = None + + # Simple email handler class + class SimpleHandler: + async def handle_DATA(self, server, session, envelope): + try: + print(f"> SMTPTestServer got an email for {envelope.rcpt_tos}") + email = Email( + email_from=envelope.mail_from, + email_to=envelope.rcpt_tos, + email_content=envelope.content.decode( + "utf-8", errors="replace" + ), + ) + email_server.add_email_for_user(envelope.rcpt_tos[0], email) + email_server.save_emails() + return "250 Message accepted for delivery" + except Exception as e: + print(f"> Error handling email: {e}") + return "550 Internal Server Error" + + try: + self.handler = SimpleHandler() + self.controller = Controller( + self.handler, + hostname=self.hostname, + port=self.port, + ready_timeout=ready_timeout, + ) + except Exception as e: + print(f"> Error initializing SMTPTestServer Controller: {e}") + + def start(self): + self.controller.start() + + def stop(self): + self.controller.stop() + + def __del__(self): + if self.controller: + self.stop() + + +class TimeoutError(Exception): + pass + + +class Timeout: + def __init__(self, timeout_duration): + if timeout_duration > 60: + raise ValueError("Timeout duration cannot exceed 60 seconds.") + self.timeout_duration = timeout_duration + + def run_with_timeout(self, condition_func, *args, **kwargs): + start_time = time.time() + result = None + + while True: + elapsed_time = time.time() - start_time + if elapsed_time > self.timeout_duration: + raise TimeoutError( + f"Function execution exceeded {self.timeout_duration} seconds." + ) + + # Check if the condition is met + try: + if condition_func(): + print("Condition met, exiting early.") + break + except Exception as e: + print(f"Exception in target function: {e}") + break # Exit the loop if an exception occurs in the function + time.sleep(1) + + return result + + +def get_email_server(reset=False, port=9025): + email_server = EmailServer() + if reset: + email_server.reset_emails() + for _ in range(5): + try: + smtp_server = SMTPTestServer(email_server, port=port) + smtp_server.start() + return email_server, smtp_server + + except TimeoutError: + del smtp_server + print("SMTP server timed out. Retrying...") + continue + except Exception as e: + print(f"> Error starting SMTP server: {e}") + raise Exception("Failed to start SMTP server in 5 attempts.") + + +def create_user(root_client, test_user): + if not user_exists(root_client, test_user.email): + fake = Faker() + root_client.register( + name=test_user.name, + email=test_user.email, + password=test_user.password, + password_verify=test_user.password, + institution=fake.company(), + website=fake.url(), + ) + else: + print("User already exists", test_user) diff --git a/packages/syft/src/syft/util/test_helpers/job_helpers.py b/packages/syft/src/syft/util/test_helpers/job_helpers.py new file mode 100644 index 00000000000..bac08bad5d6 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/job_helpers.py @@ -0,0 +1,398 @@ +# stdlib +from collections import defaultdict +from collections.abc import Callable +from dataclasses import dataclass +from dataclasses import field +import json +import random +import re +import secrets +import textwrap +from typing import Any + +# relative +from ... import test_settings +from .email_helpers import TestUser + +from ...client.client import SyftClient # noqa + +dataset_1 = test_settings.get("dataset_1", default="dataset_1") +dataset_2 = test_settings.get("dataset_2", default="dataset_2") +table_1 = test_settings.get("table_1", default="table_1") +table_2 = test_settings.get("table_2", default="table_2") +table_1_col_id = test_settings.get("table_1_col_id", default="table_id") +table_1_col_score = test_settings.get("table_1_col_score", default="colname") +table_2_col_id = test_settings.get("table_2_col_id", default="table_id") +table_2_col_score = test_settings.get("table_2_col_score", default="colname") + + +@dataclass +class TestJob: + user_email: str + func_name: str + query: str + job_type: str + settings: dict # make a type so we can rely on attributes + should_succeed: bool + should_submit: bool = True + code_path: str | None = field(default=None) + admin_reviewed: bool = False + result_as_expected: bool | None = None + + _client_cache: SyftClient | None = field(default=None, repr=False, init=False) + + @property + def is_submitted(self) -> bool: + return self.code_path is not None + + @property + def client(self): + return self._client_cache + + @client.setter + def client(self, client): + self._client_cache = client + + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + if k.startswith("_"): + continue + output[k] = v + return output + + def __iter__(self): + for key, val in self.to_dict().items(): + if key.startswith("_"): + yield key, val + + def __getitem__(self, key): + if key.startswith("_"): + return None + return self.to_dict()[key] + + @property + def code_method(self) -> None | Callable: + try: + return getattr(self.client.code, self.func_name, None) + except Exception as e: + print(f"Cant find code method. {e}") + return None + + +def make_query(settings: dict) -> str: + query = f""" + SELECT {settings['groupby_col']}, AVG({settings['score_col']}) AS average_score + FROM {settings['dataset']}.{settings['table']} + GROUP BY {settings['groupby_col']} + LIMIT {settings['limit']}""".strip() # nosec: B608 + + return textwrap.dedent(query) + + +def create_simple_query_job(user: TestUser) -> TestJob: + job_type = "simple_query" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + dataset = random.choice([dataset_1, dataset_2]) # nosec: B311 + table, groupby_col, score_col = random.choice( # nosec: B311 + [ + (table_1, table_1_col_id, table_1_col_score), + (table_2, table_2_col_id, table_2_col_score), + ] + ) + limit = random.randint(1, 1_000_000) # nosec: B311 + + settings = { + "dataset": dataset, + "table": table, + "groupby_col": groupby_col, + "score_col": score_col, + "limit": limit, + } + query = make_query(settings) + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings=settings, + should_succeed=True, + ) + + result.client = user.client + return result + + +def create_wrong_asset_query(user: TestUser) -> TestJob: + job_type = "wrong_asset_query" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + valid_job = create_simple_query_job(user) + settings = valid_job.settings + corrupted_asset = random.choice(["dataset", "table"]) # nosec: B311 + settings[corrupted_asset] = "wrong_asset" + query = make_query(settings) + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings=settings, + should_succeed=False, + ) + + result.client = user.client + return result + + +def create_wrong_syntax_query(user: TestUser) -> TestJob: + job_type = "wrong_syntax_query" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + query = "SELECT * FROM table INCORRECT SYNTAX" + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings={}, + should_succeed=False, + ) + + result.client = user.client + return result + + +def create_long_query_job(user: TestUser) -> TestJob: + job_type = "job_too_much_text" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + query = "a" * 1_000 + + result = TestJob( + user_email=user.email, + func_name=func_name, + query=query, + job_type=job_type, + settings={}, + should_succeed=False, + ) + + result.client = user.client + return result + + +def create_query_long_name(user: TestUser) -> TestJob: + job_type = "job_long_name" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + job = create_simple_query_job(user) + + job.job_type = job_type + job.func_name = func_name + "a" * 1_000 + + return job + + +def create_job_funcname_xss(user: TestUser) -> TestJob: + job_type = "job_funcname_xss" + func_name = f"{job_type}_{secrets.token_hex(3)}" + func_name += "" + + job = create_simple_query_job(user) + job.job_type = job_type + job.func_name = func_name + job.should_submit = False + return job + + +def get_request_for_job_info(requests, job): + job_requests = [r for r in requests if r.code.service_func_name == job.func_name] + if len(job_requests) != 1: + raise Exception(f"Too many or too few requests: {job} in requests: {requests}") + return job_requests[0] + + +def create_job_query_xss(user: TestUser) -> TestJob: + job_type = "job_query_xss" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + job = create_simple_query_job(user) + job.job_type = job_type + job.func_name = func_name + job.query += "" + job.should_succeed = False + + return job + + +def create_job_many_columns(user: TestUser) -> TestJob: + job_type = "job_many_columns" + func_name = f"{job_type}_{secrets.token_hex(3)}" + + job = create_simple_query_job(user) + job.job_type = job_type + job.func_name = func_name + settings = job.settings + job.settings["num_extra_cols"] = random.randint(100, 1000) # nosec: B311 + + new_columns_string = ", ".join( + f"{settings['score_col']} as col_{i}" for i in range(settings["num_extra_cols"]) + ) + + job.query = f""" + SELECT {settings['groupby_col']}, AVG({settings['score_col']}) AS average_score, {new_columns_string} + FROM {settings['dataset']}.{settings['table']} + GROUP BY {settings['groupby_col']} + LIMIT {settings['limit']}""".strip() # nosec: B608 + + return job + + +def create_random_job(user: TestUser) -> TestJob: + job_func = random.choice(create_job_functions) # nosec: B311 + return job_func(user) + + +def create_jobs(users: list[TestUser], total_jobs: int = 10) -> list[TestJob]: + jobs = [] + num_users = len(users) + user_index = 0 + each_count = 0 + # keep making jobs until we have enough + while len(jobs) < total_jobs: + # if we havent used each job type yet keep getting the next one + if each_count < len(create_job_functions): + job_func = create_job_functions[each_count] + each_count += 1 + else: + # otherwise lets get a random one + job_func = create_random_job + # use the current index of user + jobs.append(job_func(users[user_index])) + + # only go as high as the last user index + if user_index < num_users - 1: + user_index += 1 + else: + # reset back to the first user + user_index = 0 + + # in case we stuffed up + if len(jobs) > total_jobs: + jobs = jobs[:total_jobs] + return jobs + + +def submit_job(job: TestJob) -> tuple[Any, str]: + client = job.client + response = client.api.services.bigquery.submit_query( + func_name=job.func_name, query=job.query + ) + job.code_path = extract_code_path(response) + return response + + +def extract_code_path(response) -> str | None: + pattern = r"client\.code\.(\w+)\(\)" + match = re.search(pattern, str(response)) + if match: + extracted_code = match.group(1) + return extracted_code + return None + + +def approve_by_running(request): + job = request.code(blocking=False) + result = job.wait() + print("got result of type", type(result), "bool", bool(result)) + # got result of type bool False + # assert result won't work unless we know what type is coming back + job_info = job.info(result=True) + # need force when running multiple times + # todo check and dont run if its already done + response = request.deposit_result(job_info, approve=True, force=True) + return response + + +def get_job_emails(jobs, client, email_server): + all_requests = client.requests + res = {} + for job in jobs: + request = get_request_for_job_info(all_requests, job) + emails = email_server.get_emails_for_user(request.requesting_user_email) + res[request.requesting_user_email] = emails + return res + + +def resolve_request(request): + service_func_name = request.code.service_func_name + if service_func_name.startswith("simple_query"): + request.approve() # approve because it is good + if service_func_name.startswith("wrong_asset_query"): + request.approve() # approve because it is bad + if service_func_name.startswith("wrong_syntax_query"): + request.approve() # approve because it is bad + if service_func_name.startswith("job_too_much_text"): + request.deny(reason="too long, boring!") # deny because it is bad + if service_func_name.startswith("job_long_name"): + request.approve() + if service_func_name.startswith("job_funcname_xss"): + request.deny(reason="too long, boring!") # never reach doesnt matter + if service_func_name.startswith("job_query_xss"): + request.approve() # approve because it is bad + if service_func_name.startswith("job_many_columns"): + request.approve() # approve because it is bad + + return (request.id, request.status) + + +create_job_functions = [ + create_simple_query_job, # quick way to increase the odds + create_simple_query_job, + create_simple_query_job, + create_simple_query_job, + create_simple_query_job, + create_simple_query_job, + create_wrong_syntax_query, + create_long_query_job, + create_query_long_name, + create_job_funcname_xss, + create_job_query_xss, + create_job_many_columns, +] + + +def save_jobs(jobs, filepath="./jobs.json"): + user_jobs = defaultdict(list) + for job in jobs: + user_jobs[job.user_email].append(job.to_dict()) + with open(filepath, "w") as f: + f.write(json.dumps(user_jobs)) + + +def load_jobs(users, high_client, filepath="./jobs.json"): + data = {} + try: + with open(filepath) as f: + data = json.loads(f.read()) + except Exception as e: + print(f"cant read file: {filepath}: {e}") + data = {} + jobs_list = [] + for user in users: + if user.email not in data: + print(f"{user.email} missing from jobs") + continue + user_jobs = data[user.email] + for user_job in user_jobs: + test_job = TestJob(**user_job) + if user._client_cache is None: + user.client = high_client + test_job.client = user.client + jobs_list.append(test_job) + return jobs_list diff --git a/packages/syft/src/syft/util/test_helpers/sync_helpers.py b/packages/syft/src/syft/util/test_helpers/sync_helpers.py new file mode 100644 index 00000000000..7252b896ea2 --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/sync_helpers.py @@ -0,0 +1,192 @@ +# third party +from tqdm import tqdm + +# syft absolute +import syft as sy + +# relative +from ...client.datasite_client import DatasiteClient +from ...client.syncing import compare_clients +from ...service.code.user_code import UserCode +from ...service.job.job_stash import Job +from ...service.job.job_stash import JobStatus +from ...service.request.request import Request +from ...service.request.request import RequestStatus +from ...service.sync.diff_state import ObjectDiffBatch +from ...types.result import Err + + +def deny_requests_without_autosync_tag(client_low: DatasiteClient): + # Deny all requests that are not autosync + requests = client_low.requests.get_all() + if isinstance(requests, sy.SyftError): + print(requests) + return + + denied_requests = [] + for request in tqdm(requests): + if request.status != RequestStatus.PENDING: + continue + if "autosync" not in request.tags: + request.deny( + reason="This request has been denied automatically. " + "Please use the designated API to submit your request." + ) + denied_requests.append(request.id) + print(f"Denied {len(denied_requests)} requests without autosync tag") + + +def is_request_to_sync(batch: ObjectDiffBatch) -> bool: + # True if this is a new low-side request + # TODO add condition for sql requests/usercodes + low_request = batch.root.low_obj + return ( + isinstance(low_request, Request) + and batch.status == "NEW" + and "autosync" in low_request.tags + ) + + +def is_job_to_sync(batch: ObjectDiffBatch): + # True if this is a new high-side job that is either COMPLETED or ERRORED + if batch.status != "NEW": + return False + if not isinstance(batch.root.high_obj, Job): + return False + job = batch.root.high_obj + return job.status in (JobStatus.ERRORED, JobStatus.COMPLETED) + + +def execute_requests( + client_high: DatasiteClient, request_ids: list[sy.UID] +) -> dict[sy.UID, Job]: + jobs_by_request_id = {} + for request_id in request_ids: + request = client_high.requests.get_by_uid(request_id) + if not isinstance(request, Request): + continue + + code = request.code + if not isinstance(code, UserCode): + continue + + func_name = request.code.service_func_name + api_func = getattr(client_high.code, func_name, None) + if api_func is None: + continue + + job = api_func(blocking=False) + jobs_by_request_id[request_id] = job + + return jobs_by_request_id + + +def deny_failed_jobs( + client_low: DatasiteClient, + jobs: list[Job], +) -> None: + # NOTE no syncing is needed, requests are denied on the low side + denied_requests = [] + + for job in jobs: + if job.status != JobStatus.ERRORED: + continue + + error_result = job.result + if isinstance(error_result, Err): + error_msg = error_result.err_value + else: + error_msg = "An unknown error occurred, please check the Job logs for more information." + + code_id = job.user_code_id + if code_id is None: + continue + requests = client_low.requests.get_by_usercode_id(code_id) + if isinstance(requests, list) and len(requests) > 0: + request = requests[0] + request.deny(reason=f"Execution failed: {error_msg}") + denied_requests.append(request.id) + else: + print(f"Failed to deny request for job {job.id}") + + print(f"Denied {len(denied_requests)} failed requests") + + +def sync_finished_jobs( + client_low: DatasiteClient, + client_high: DatasiteClient, +) -> dict[sy.UID, sy.SyftError | sy.SyftSuccess] | sy.SyftError: + sync_job_results = {} + synced_jobs = [] + diff = compare_clients( + from_client=client_high, to_client=client_low, include_types=["job"] + ) + if isinstance(diff, sy.SyftError): + print(diff) + return diff + + for batch in diff.batches: + if is_job_to_sync(batch): + job = batch.root.high_obj + + w = batch.resolve(build_state=False) + share_result = w.click_share_all_private_data() + if isinstance(share_result, sy.SyftError): + sync_job_results[job.id] = share_result + continue + sync_result = w.click_sync() + + synced_jobs.append(job) + sync_job_results[job.id] = sync_result + + print(f"Sharing {len(sync_job_results)} new results") + deny_failed_jobs(client_low, synced_jobs) + return sync_job_results + + +def sync_new_requests( + client_low: DatasiteClient, + client_high: DatasiteClient, +) -> dict[sy.UID, sy.SyftSuccess | sy.SyftError] | sy.SyftError: + sync_request_results = {} + diff = compare_clients( + from_client=client_low, to_client=client_high, include_types=["request"] + ) + if isinstance(diff, sy.SyftError): + print(diff) + return sync_request_results + print(f"{len(diff.batches)} request batches found") + for batch in tqdm(diff.batches): + if is_request_to_sync(batch): + request_id = batch.root.low_obj.id + w = batch.resolve(build_state=False) + result = w.click_sync() + sync_request_results[request_id] = result + return sync_request_results + + +def sync_and_execute_new_requests( + client_low: DatasiteClient, client_high: DatasiteClient +) -> None: + sync_results = sync_new_requests(client_low, client_high) + if isinstance(sync_results, sy.SyftError): + print(sync_results) + return + + request_ids = [ + uid for uid, res in sync_results.items() if isinstance(res, sy.SyftSuccess) + ] + print(f"Synced {len(request_ids)} new requests") + + jobs_by_request = execute_requests(client_high, request_ids) + print(f"Started {len(jobs_by_request)} new jobs") + + +def auto_sync(client_low: DatasiteClient, client_high: DatasiteClient) -> None: + print("Starting auto sync") + print("Denying non tagged jobs") + deny_requests_without_autosync_tag(client_low) + print("Syncing and executing") + sync_and_execute_new_requests(client_low, client_high) + sync_finished_jobs(client_low, client_high) + print("Finished auto sync") diff --git a/packages/syft/src/syft/util/test_helpers/worker_helpers.py b/packages/syft/src/syft/util/test_helpers/worker_helpers.py new file mode 100644 index 00000000000..f4a3c3a41df --- /dev/null +++ b/packages/syft/src/syft/util/test_helpers/worker_helpers.py @@ -0,0 +1,131 @@ +# syft absolute +import syft as sy + +# relative +from ...client.client import SyftClient +from ...service.response import SyftSuccess +from ...service.worker.worker_image import SyftWorkerImage +from ...types.uid import UID + + +def build_and_launch_worker_pool_from_docker_str( + environment: str, + client: sy.DatasiteClient, + worker_pool_name: str, + custom_pool_pod_annotations: dict, + custom_pool_pod_labels: dict, + worker_dockerfile: str, + external_registry: str, + docker_tag: str, + scale_to: int, +): + result = client.api.services.image_registry.add(external_registry) + assert "success" in result.message # nosec: B101 + + # For some reason, when using k9s, result.value is empty so can't use the below line + # local_registry = result.value + local_registry = client.api.services.image_registry[0] + + docker_config = sy.DockerWorkerConfig(dockerfile=worker_dockerfile) + assert docker_config.dockerfile == worker_dockerfile # nosec: B101 + submit_result = client.api.services.worker_image.submit(worker_config=docker_config) + print(submit_result.message) + assert "success" in submit_result.message # nosec: B101 + + worker_image = submit_result.value + + if environment == "remote": + docker_build_result = client.api.services.worker_image.build( + image_uid=worker_image.id, + tag=docker_tag, + registry_uid=local_registry.id, + ) + print(docker_build_result) + + if environment == "remote": + push_result = client.api.services.worker_image.push(worker_image.id) + print(push_result) + + result = client.api.services.worker_pool.launch( + pool_name=worker_pool_name, + image_uid=worker_image.id, + num_workers=1, + pod_annotations=custom_pool_pod_annotations, + pod_labels=custom_pool_pod_labels, + ) + print(result) + # assert 'success' in str(result.message) + + # scale_to > 1 is valid for scale up + # scale_to = 0 is valid for removing all pods + # scale_to < 0 should return error from server + if environment == "remote" and scale_to != 1: + result = client.worker_pools.scale(number=scale_to, pool_name=worker_pool_name) + print(result) + + +def launch_worker_pool_from_docker_tag_and_registry( + environment: str, + client: sy.DatasiteClient, + worker_pool_name: str, + custom_pool_pod_annotations: dict, + custom_pool_pod_labels: dict, + docker_tag: str, + external_registry: str, + scale_to: int = 1, +): + res = client.api.services.image_registry.add(external_registry) + assert "success" in res.message # nosec: B101 + docker_config = sy.PrebuiltWorkerConfig(tag=docker_tag) + image_result = client.api.services.worker_image.submit(worker_config=docker_config) + assert "success" in res.message # nosec: B101 + worker_image = image_result.value + + launch_result = client.api.services.worker_pool.launch( + pool_name=worker_pool_name, + image_uid=worker_image.id, + num_workers=1, + pod_annotations=custom_pool_pod_annotations, + pod_labels=custom_pool_pod_labels, + ) + if environment == "remote" and scale_to > 1: + result = client.worker_pools.scale(number=scale_to, pool_name=worker_pool_name) + print(result) + + return launch_result + + +def prune_worker_pool_and_images(client: SyftClient) -> None: + for pool in client.worker_pools.get_all(): + client.worker_pools.delete(pool.id) + + for image in client.images.get_all(): + client.images.remove(image.id) + + +def build_and_push_image( + client: SyftClient, + image: SyftWorkerImage, + tag: str, + registry_uid: UID | None = None, + reg_username: str | None = None, + reg_password: str | None = None, + force_build: bool = False, +) -> None: + """Build and push the image to the given registry.""" + if image.is_prebuilt: + return + + build_result = client.api.services.worker_image.build( + image_uid=image.id, registry_uid=registry_uid, tag=tag, force_build=force_build + ) + print(build_result.message) + + if isinstance(build_result, SyftSuccess): + push_result = client.api.services.worker_image.push( + image.id, + username=reg_username, + password=reg_password, + ) + assert isinstance(push_result, SyftSuccess) # nosec: B101 + print(push_result.message) diff --git a/packages/syft/src/syft/util/trace_decorator.py b/packages/syft/src/syft/util/trace_decorator.py index 87486b0cda4..eaa259330cd 100644 --- a/packages/syft/src/syft/util/trace_decorator.py +++ b/packages/syft/src/syft/util/trace_decorator.py @@ -6,10 +6,10 @@ from collections.abc import Callable from functools import wraps import inspect +import threading from typing import Any from typing import ClassVar from typing import TypeVar -from typing import cast # third party from opentelemetry import trace @@ -17,6 +17,10 @@ from opentelemetry.trace import Tracer from opentelemetry.trace.span import Span +__all__ = ["instrument"] + +T = TypeVar("T", bound=Callable | type) + class TracingDecoratorOptions: class NamingSchemes: @@ -40,11 +44,8 @@ def set_default_attributes(cls, attributes: dict[str, str] | None = None) -> Non cls.default_attributes[att] = attributes[att] -T = TypeVar("T", bound=Callable | type) - - def instrument( - _func_or_class: T, + _func_or_class: T | None = None, /, *, span_name: str = "", @@ -99,16 +100,12 @@ def decorate_class(cls: T) -> T: return cls - # Check if this is a span or class decorator - if inspect.isclass(_func_or_class): - return decorate_class(_func_or_class) - def span_decorator(func_or_class: T) -> T: - if inspect.isclass(func_or_class): + if ignore: + return func_or_class + elif inspect.isclass(func_or_class): return decorate_class(func_or_class) - # sig = inspect.signature(func_or_class) - # Check if already decorated (happens if both class and function # decorated). If so, we keep the function decorator settings only undecorated_func = getattr(func_or_class, "__tracing_unwrapped__", None) @@ -121,10 +118,13 @@ def span_decorator(func_or_class: T) -> T: tracer = existing_tracer or trace.get_tracer(func_or_class.__module__) def _set_semantic_attributes(span: Span, func: Callable) -> None: + thread = threading.current_thread() span.set_attribute(SpanAttributes.CODE_NAMESPACE, func.__module__) span.set_attribute(SpanAttributes.CODE_FUNCTION, func.__qualname__) span.set_attribute(SpanAttributes.CODE_FILEPATH, func.__code__.co_filename) span.set_attribute(SpanAttributes.CODE_LINENO, func.__code__.co_firstlineno) + span.set_attribute(SpanAttributes.THREAD_ID, thread.ident) + span.set_attribute(SpanAttributes.THREAD_NAME, thread.name) def _set_attributes( span: Span, attributes_dict: dict[str, str] | None = None @@ -155,16 +155,20 @@ async def wrap_with_span_async(*args: Any, **kwargs: Any) -> Callable: _set_attributes(span, attributes) return await func_or_class(*args, **kwargs) - if ignore: - return func_or_class - - wrapper = ( + span_wrapper = ( wrap_with_span_async if asyncio.iscoroutinefunction(func_or_class) else wrap_with_span_sync ) - wrapper.__signature__ = inspect.signature(func_or_class) + span_wrapper.__signature__ = inspect.signature(func_or_class) - return cast(T, wrapper) + return span_wrapper # type: ignore - return span_decorator(_func_or_class) + # decorator factory on a class or func + # @instrument or @instrument(span_name="my_span", ...) + if _func_or_class and inspect.isclass(_func_or_class): + return decorate_class(_func_or_class) + elif _func_or_class: + return span_decorator(_func_or_class) + else: + return span_decorator # type: ignore diff --git a/packages/syft/src/syft/util/update_commit.py b/packages/syft/src/syft/util/update_commit.py new file mode 100644 index 00000000000..359120c37c2 --- /dev/null +++ b/packages/syft/src/syft/util/update_commit.py @@ -0,0 +1,48 @@ +# stdlib +import os +import subprocess # nosec +import sys + + +def get_commit_hash() -> str: + cwd = os.path.dirname(os.path.abspath(__file__)) + try: + output = subprocess.check_output( + "git rev-parse --short HEAD".split(" "), + cwd=cwd, # nosec + ) + return output.strip().decode("ascii") + except subprocess.CalledProcessError as e: + print(f"Error getting commit hash: {e}") + sys.exit(1) + + +def update_commit_variable(file_path: str, commit_hash: str) -> None: + """Replace the __commit__ variable with the actual commit hash.""" + try: + with open(file_path) as file: + lines = file.readlines() + + with open(file_path, "w") as file: + updated = False + for line in lines: + if "__commit__ = " in line: + file.write(f'__commit__ = "{commit_hash}"\n') + updated = True + else: + file.write(line) + if not updated: + print("No __commit__ variable found in the file.") + except OSError as e: + print(f"Error reading or writing file: {e}") + sys.exit(1) + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python update_commit.py ") + sys.exit(1) + + file_path = sys.argv[1] + commit_hash = get_commit_hash() + update_commit_variable(file_path, commit_hash) diff --git a/packages/syft/src/syft/util/util.py b/packages/syft/src/syft/util/util.py index 82dda0b9c08..fa20c3fc2c2 100644 --- a/packages/syft/src/syft/util/util.py +++ b/packages/syft/src/syft/util/util.py @@ -1,15 +1,22 @@ # stdlib import asyncio from asyncio.selector_events import BaseSelectorEventLoop +from collections import deque from collections.abc import Callable from collections.abc import Iterator from collections.abc import Sequence from concurrent.futures import ProcessPoolExecutor from concurrent.futures import ThreadPoolExecutor from contextlib import contextmanager +from copy import deepcopy +from datetime import datetime import functools import hashlib +import inspect +from itertools import chain from itertools import repeat +import json +import logging import multiprocessing import multiprocessing as mp from multiprocessing import set_start_method @@ -19,10 +26,14 @@ import os from pathlib import Path import platform +import random import re +import reprlib +import secrets from secrets import randbelow import socket import sys +from sys import getsizeof import threading import time import types @@ -34,13 +45,13 @@ from forbiddenfruit import curse from nacl.signing import SigningKey from nacl.signing import VerifyKey +import nh3 import requests # relative -from .logger import critical -from .logger import debug -from .logger import error -from .logger import traceback_and_raise +from ..serde.serialize import _serialize as serialize + +logger = logging.getLogger(__name__) DATASETS_URL = "https://raw.githubusercontent.com/OpenMined/datasets/main" PANDAS_DATA = f"{DATASETS_URL}/pandas_cookbook" @@ -56,9 +67,9 @@ def full_name_with_qualname(klass: type) -> str: if not hasattr(klass, "__module__"): return f"builtins.{get_qualname_for(klass)}" return f"{klass.__module__}.{get_qualname_for(klass)}" - except Exception: + except Exception as e: # try name as backup - print("Failed to get FQN for:", klass, type(klass)) + logger.error(f"Failed to get FQN for: {klass} {type(klass)}", exc_info=e) return full_name_with_name(klass=klass) @@ -69,7 +80,7 @@ def full_name_with_name(klass: type) -> str: return f"builtins.{get_name_for(klass)}" return f"{klass.__module__}.{get_name_for(klass)}" except Exception as e: - print("Failed to get FQN for:", klass, type(klass)) + logger.error(f"Failed to get FQN for: {klass} {type(klass)}", exc_info=e) raise e @@ -87,8 +98,78 @@ def get_name_for(klass: type) -> str: return klass_name -def get_mb_size(data: Any) -> float: - return sys.getsizeof(data) / (1024 * 1024) +def get_mb_size(data: Any, handlers: dict | None = None) -> float: + """Returns the approximate memory footprint an object and all of its contents. + + Automatically finds the contents of the following builtin containers and + their subclasses: tuple, list, deque, dict, set and frozenset. + Otherwise, tries to read from the __slots__ or __dict__ of the object. + To search other containers, add handlers to iterate over their contents: + + handlers = {SomeContainerClass: iter, + OtherContainerClass: OtherContainerClass.get_elements} + + Lightly modified from + https://code.activestate.com/recipes/577504-compute-memory-footprint-of-an-object-and-its-cont/ + which is referenced in official sys.getsizeof documentation + https://docs.python.org/3/library/sys.html#sys.getsizeof. + + """ + + def dict_handler(d: dict[Any, Any]) -> Iterator[Any]: + return chain.from_iterable(d.items()) + + all_handlers = { + tuple: iter, + list: iter, + deque: iter, + dict: dict_handler, + set: iter, + frozenset: iter, + } + if handlers: + all_handlers.update(handlers) # user handlers take precedence + seen = set() # track which object id's have already been seen + default_size = getsizeof(0) # estimate sizeof object without __sizeof__ + + def sizeof(o: Any) -> int: + if id(o) in seen: # do not double count the same object + return 0 + seen.add(id(o)) + s = getsizeof(o, default_size) + + for typ, handler in all_handlers.items(): + if isinstance(o, typ): + s += sum(map(sizeof, handler(o))) # type: ignore + break + else: + # no __slots__ *usually* means a __dict__, but some special builtin classes + # (such as `type(None)`) have neither else, `o` has no attributes at all, + # so sys.getsizeof() actually returned the correct value + if not hasattr(o.__class__, "__slots__"): + if hasattr(o, "__dict__"): + s += sizeof(o.__dict__) + else: + s += sum( + sizeof(getattr(o, x)) + for x in o.__class__.__slots__ + if hasattr(o, x) + ) + return s + + return sizeof(data) / (1024.0 * 1024.0) + + +def get_mb_serialized_size(data: Any) -> float: + try: + serialized_data = serialize(data, to_bytes=True) + return sys.getsizeof(serialized_data) / (1024 * 1024) + except Exception as e: + data_type = type(data) + raise TypeError( + f"Failed to serialize data of type '{data_type.__module__}.{data_type.__name__}'." + f" Data type not supported. Detailed error: {e}" + ) def extract_name(klass: type) -> str: @@ -106,7 +187,7 @@ def extract_name(klass: type) -> str: return fqn.split(".")[-1] return fqn except Exception as e: - print(f"Failed to get klass name {klass}") + logger.error(f"Failed to get klass name {klass}", exc_info=e) raise e else: raise ValueError(f"Failed to match regex for klass {klass}") @@ -116,9 +197,7 @@ def validate_type(_object: object, _type: type, optional: bool = False) -> Any: if isinstance(_object, _type) or (optional and (_object is None)): return _object - traceback_and_raise( - f"Object {_object} should've been of type {_type}, not {_object}." - ) + raise Exception(f"Object {_object} should've been of type {_type}, not {_object}.") def validate_field(_object: object, _field: str) -> Any: @@ -127,7 +206,7 @@ def validate_field(_object: object, _field: str) -> Any: if object is not None: return object - traceback_and_raise(f"Object {_object} has no {_field} field set.") + raise Exception(f"Object {_object} has no {_field} field set.") def get_fully_qualified_name(obj: object) -> str: @@ -149,7 +228,7 @@ def get_fully_qualified_name(obj: object) -> str: try: fqn += "." + obj.__class__.__name__ except Exception as e: - error(f"Failed to get FQN: {e}") + logger.error(f"Failed to get FQN: {e}") return fqn @@ -174,7 +253,7 @@ def key_emoji(key: object) -> str: hex_chars = bytes(key).hex()[-8:] return char_emoji(hex_chars=hex_chars) except Exception as e: - error(f"Fail to get key emoji: {e}") + logger.error(f"Fail to get key emoji: {e}") pass return "ALL" @@ -309,7 +388,11 @@ def print_dynamic_log( return (finish, success) -def find_available_port(host: str, port: int, search: bool = False) -> int: +def find_available_port( + host: str, port: int | None = None, search: bool = False +) -> int: + if port is None: + port = random.randint(1500, 65000) # nosec port_available = False while not port_available: try: @@ -324,9 +407,10 @@ def find_available_port(host: str, port: int, search: bool = False) -> int: port += 1 else: break + sock.close() except Exception as e: - print(f"Failed to check port {port}. {e}") + logger.error(f"Failed to check port {port}. {e}") sock.close() if search is False and port_available is False: @@ -338,6 +422,18 @@ def find_available_port(host: str, port: int, search: bool = False) -> int: return port +def get_random_available_port() -> int: + """Retrieve a random available port number from the host OS. + + Returns + ------- + int: Available port number. + """ + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as soc: + soc.bind(("localhost", 0)) + return soc.getsockname()[1] + + def get_loaded_syft() -> ModuleType: return sys.modules[__name__.split(".")[0]] @@ -428,7 +524,7 @@ def obj2pointer_type(obj: object | None = None, fqn: str | None = None) -> type: except Exception as e: # sometimes the object doesn't have a __module__ so you need to use the type # like: collections.OrderedDict - debug( + logger.debug( f"Unable to get get_fully_qualified_name of {type(obj)} trying type. {e}" ) fqn = get_fully_qualified_name(obj=type(obj)) @@ -439,10 +535,8 @@ def obj2pointer_type(obj: object | None = None, fqn: str | None = None) -> type: try: ref = get_loaded_syft().lib_ast.query(fqn, obj_type=type(obj)) - except Exception as e: - log = f"Cannot find {type(obj)} {fqn} in lib_ast. {e}" - critical(log) - raise Exception(log) + except Exception: + raise Exception(f"Cannot find {type(obj)} {fqn} in lib_ast.") return ref.pointer_type @@ -459,7 +553,7 @@ def prompt_warning_message(message: str, confirm: bool = False) -> bool: if response == "y": return True elif response == "n": - display("Aborted !!") + print("Aborted.") return False else: print("Invalid response. Please enter Y or N.") @@ -888,19 +982,6 @@ def set_klass_module_to_syft(klass: type, module_name: str) -> None: sys.modules["syft"].__dict__[module_name] = new_module -def get_syft_src_path() -> Path: - return Path(__file__).parent.parent.parent.expanduser() - - -def get_grid_src_path() -> Path: - syft_path = get_syft_src_path() - return syft_path.parent.parent / "grid" - - -def get_syft_cpu_dockerfile() -> Path: - return get_grid_src_path() / "backend" / "worker_cpu.dockerfile" - - def get_queue_address(port: int) -> str: """Get queue address based on container host name.""" @@ -910,3 +991,181 @@ def get_queue_address(port: int) -> str: elif container_host == "docker": return f"tcp://{socket.gethostname()}:{port}" return f"tcp://localhost:{port}" + + +def get_dev_mode() -> bool: + return str_to_bool(os.getenv("DEV_MODE", "False")) + + +def generate_token() -> str: + return secrets.token_hex(64) + + +def sanitize_html(html_str: str) -> str: + policy = { + "tags": ["svg", "strong", "rect", "path", "circle", "code", "pre"], + "attributes": { + "*": {"class", "style"}, + "svg": { + "class", + "style", + "xmlns", + "width", + "height", + "viewBox", + "fill", + "stroke", + "stroke-width", + }, + "path": {"d", "fill", "stroke", "stroke-width"}, + "rect": {"x", "y", "width", "height", "fill", "stroke", "stroke-width"}, + "circle": {"cx", "cy", "r", "fill", "stroke", "stroke-width"}, + }, + "remove": {"script", "style"}, + } + + tags = nh3.ALLOWED_TAGS + for tag in policy["tags"]: + tags.add(tag) + + _attributes = deepcopy(nh3.ALLOWED_ATTRIBUTES) + attributes = {**_attributes, **policy["attributes"]} # type: ignore + + return nh3.clean( + html_str, + tags=tags, + clean_content_tags=policy["remove"], + attributes=attributes, + ) + + +def parse_iso8601_date(date_string: str) -> datetime: + # Handle variable length of microseconds by trimming to 6 digits + if "." in date_string: + base_date, microseconds = date_string.split(".") + microseconds = microseconds.rstrip("Z") # Remove trailing 'Z' + microseconds = microseconds[:6] # Trim to 6 digits + date_string = f"{base_date}.{microseconds}Z" + return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ") + + +def get_latest_tag(registry: str, repo: str) -> str | None: + repo_url = f"http://{registry}/v2/{repo}" + res = requests.get(url=f"{repo_url}/tags/list", timeout=5) + tags = res.json().get("tags", []) + + tag_times = [] + for tag in tags: + manifest_response = requests.get(f"{repo_url}/manifests/{tag}", timeout=5) + manifest = manifest_response.json() + created_time = json.loads(manifest["history"][0]["v1Compatibility"])["created"] + created_datetime = parse_iso8601_date(created_time) + tag_times.append((tag, created_datetime)) + + # sort tags by datetime + tag_times.sort(key=lambda x: x[1], reverse=True) + if len(tag_times) > 0: + return tag_times[0][0] + return None + + +def get_caller_file_path() -> str | None: + stack = inspect.stack() + + for frame_info in stack: + code_context = frame_info.code_context + if code_context and len(code_context) > 0: + if "from syft import test_settings" in str(frame_info.code_context): + caller_file_path = os.path.dirname(os.path.abspath(frame_info.filename)) + return caller_file_path + + return None + + +def find_base_dir_with_tox_ini(start_path: str = ".") -> str | None: + base_path = os.path.abspath(start_path) + while True: + if os.path.exists(os.path.join(base_path, "tox.ini")): + return base_path + parent_path = os.path.abspath(os.path.join(base_path, os.pardir)) + if parent_path == base_path: # Reached the root directory + break + base_path = parent_path + return start_path + + +def get_all_config_files(base_path: str, current_path: str) -> list[str]: + config_files = [] + current_path = os.path.abspath(current_path) + base_path = os.path.abspath(base_path) + + while current_path.startswith(base_path): + config_file = os.path.join(current_path, "settings.yaml") + if os.path.exists(config_file): + config_files.append(config_file) + if current_path == base_path: # Stop if we reach the base directory + break + current_path = os.path.abspath(os.path.join(current_path, os.pardir)) + + return config_files + + +def test_settings() -> Any: + # third party + from dynaconf import Dynaconf + + config_files = [] + current_path = "." + + # jupyter uses "." which resolves to the notebook + if not is_interpreter_jupyter(): + # python uses the file which has from syft import test_settings in it + import_path = get_caller_file_path() + if import_path: + current_path = import_path + + base_dir = find_base_dir_with_tox_ini(current_path) + config_files = get_all_config_files(base_dir, current_path) + config_files = list(reversed(config_files)) + # create + # can override with + # import os + # os.environ["TEST_KEY"] = "var" + # third party + + # Dynaconf settings + test_settings = Dynaconf( + settings_files=config_files, + environments=True, + envvar_prefix="TEST", + ) + + return test_settings + + +class CustomRepr(reprlib.Repr): + def repr_str(self, obj: Any, level: int = 0) -> str: + if len(obj) <= self.maxstring: + return repr(obj) + return repr(obj[: self.maxstring] + "...") + + +def repr_truncation(obj: Any, max_elements: int = 10) -> str: + """ + Return a truncated string representation of the object if it is too long. + + Args: + - obj: The object to be represented (can be str, list, dict, set...). + - max_elements: Maximum number of elements to display before truncating. + + Returns: + - A string representation of the object, truncated if necessary. + """ + r = CustomRepr() + r.maxlist = max_elements # For lists + r.maxdict = max_elements # For dictionaries + r.maxset = max_elements # For sets + r.maxstring = 100 # For strings + r.maxother = 100 # For other objects + + return r.repr(obj) diff --git a/packages/syft/tests/conftest.py b/packages/syft/tests/conftest.py index 79c69efbdf1..a3908f59eae 100644 --- a/packages/syft/tests/conftest.py +++ b/packages/syft/tests/conftest.py @@ -1,5 +1,5 @@ # stdlib -import json +from functools import cache import os from pathlib import Path from secrets import token_hex @@ -7,43 +7,29 @@ import sys from tempfile import gettempdir from unittest import mock +from uuid import uuid4 # third party from faker import Faker -from pymongo import MongoClient +import numpy as np import pytest # syft absolute import syft as sy -from syft.client.domain_client import DomainClient -from syft.node.worker import Worker +from syft import Dataset +from syft.abstract_server import ServerSideType +from syft.client.datasite_client import DatasiteClient from syft.protocol.data_protocol import get_data_protocol from syft.protocol.data_protocol import protocol_release_dir from syft.protocol.data_protocol import stage_protocol_changes - -# relative -from .syft.stores.store_fixtures_test import dict_action_store # noqa: F401 -from .syft.stores.store_fixtures_test import dict_document_store # noqa: F401 -from .syft.stores.store_fixtures_test import dict_queue_stash # noqa: F401 -from .syft.stores.store_fixtures_test import dict_store_partition # noqa: F401 -from .syft.stores.store_fixtures_test import mongo_action_store # noqa: F401 -from .syft.stores.store_fixtures_test import mongo_document_store # noqa: F401 -from .syft.stores.store_fixtures_test import mongo_queue_stash # noqa: F401 -from .syft.stores.store_fixtures_test import mongo_store_partition # noqa: F401 -from .syft.stores.store_fixtures_test import sqlite_action_store # noqa: F401 -from .syft.stores.store_fixtures_test import sqlite_document_store # noqa: F401 -from .syft.stores.store_fixtures_test import sqlite_queue_stash # noqa: F401 -from .syft.stores.store_fixtures_test import sqlite_store_partition # noqa: F401 -from .syft.stores.store_fixtures_test import sqlite_workspace # noqa: F401 -from .utils.mongodb import start_mongo_server -from .utils.mongodb import stop_mongo_server -from .utils.xdist_state import SharedState +from syft.server.worker import Worker +from syft.service.queue.queue_stash import QueueStash +from syft.service.user import user def patch_protocol_file(filepath: Path): dp = get_data_protocol() - original_protocol = dp.read_json(dp.file_path) - filepath.write_text(json.dumps(original_protocol)) + shutil.copyfile(src=dp.file_path, dst=filepath) def remove_file(filepath: Path): @@ -94,8 +80,10 @@ def protocol_file(): protocol_dir = sy.SYFT_PATH / "protocol" file_path = protocol_dir / f"{random_name}.json" patch_protocol_file(filepath=file_path) - yield file_path - remove_file(filepath=file_path) + try: + yield file_path + finally: + remove_file(file_path) @pytest.fixture(autouse=True) @@ -108,14 +96,15 @@ def stage_protocol(protocol_file: Path): stage_protocol_changes() # bump_protocol_version() yield dp.protocol_history - dp.revert_latest_protocol() + dp.reset_dev_protocol() dp.save_history(dp.protocol_history) # Cleanup release dir, remove unused released files - for _file_path in protocol_release_dir().iterdir(): - for version in dp.read_json(_file_path): - if version not in dp.protocol_history.keys(): - _file_path.unlink() + if os.path.exists(protocol_release_dir()): + for _file_path in protocol_release_dir().iterdir(): + for version in dp.read_json(_file_path): + if version not in dp.protocol_history.keys(): + _file_path.unlink() @pytest.fixture @@ -125,14 +114,51 @@ def faker(): @pytest.fixture(scope="function") def worker() -> Worker: - worker = sy.Worker.named(name=token_hex(8)) + """ + NOTE in-memory sqlite is not shared between connections, so: + - using 2 workers (high/low) will not share a db + - re-using a connection (e.g. for a Job worker) will not share a db + """ + worker = sy.Worker.named(name=token_hex(16), db_url="sqlite://") + yield worker + worker.cleanup() + del worker + + +@pytest.fixture(scope="function") +def second_worker() -> Worker: + # Used in server syncing tests + worker = sy.Worker.named(name=uuid4().hex, db_url="sqlite://") + yield worker + worker.cleanup() + del worker + + +@pytest.fixture(scope="function") +def high_worker() -> Worker: + worker = sy.Worker.named( + name=token_hex(8), server_side_type=ServerSideType.HIGH_SIDE, db_url="sqlite://" + ) + yield worker + worker.cleanup() + del worker + + +@pytest.fixture(scope="function") +def low_worker() -> Worker: + worker = sy.Worker.named( + name=token_hex(8), + server_side_type=ServerSideType.LOW_SIDE, + dev_mode=True, + db_url="sqlite://", + ) yield worker worker.cleanup() del worker @pytest.fixture -def root_domain_client(worker) -> DomainClient: +def root_datasite_client(worker) -> DatasiteClient: yield worker.root_client @@ -142,7 +168,7 @@ def root_verify_key(worker): @pytest.fixture -def guest_client(worker) -> DomainClient: +def guest_client(worker) -> DatasiteClient: yield worker.guest_client @@ -152,14 +178,34 @@ def guest_verify_key(worker): @pytest.fixture -def guest_domain_client(root_domain_client) -> DomainClient: - yield root_domain_client.guest() +def guest_datasite_client(root_datasite_client) -> DatasiteClient: + yield root_datasite_client.guest() + + +@pytest.fixture +def ds_client( + faker: Faker, root_datasite_client: DatasiteClient, guest_client: DatasiteClient +): + guest_email = faker.email() + password = "mysecretpassword" + root_datasite_client.register( + name=faker.name(), + email=guest_email, + password=password, + password_verify=password, + ) + ds_client = guest_client.login(email=guest_email, password=password) + yield ds_client + + +@pytest.fixture +def ds_verify_key(ds_client: DatasiteClient): + yield ds_client.credentials.verify_key @pytest.fixture def document_store(worker): - yield worker.document_store - worker.document_store.reset() + yield worker.db @pytest.fixture @@ -167,63 +213,79 @@ def action_store(worker): yield worker.action_store -@pytest.fixture(scope="session") -def mongo_client(testrun_uid): - """ - A race-free fixture that starts a MongoDB server for an entire pytest session. - Cleans up the server when the session ends, or when the last client disconnects. - """ - db_name = f"pytest_mongo_{testrun_uid}" - root_dir = Path(gettempdir(), db_name) - state = SharedState(db_name) - KEY_CONN_STR = "mongoConnectionString" - KEY_CLIENTS = "mongoClients" - - # start the server if it's not already running - with state.lock: - conn_str = state.get(KEY_CONN_STR, None) - - if not conn_str: - conn_str = start_mongo_server(db_name) - state.set(KEY_CONN_STR, conn_str) - - # increment the number of clients - clients = state.get(KEY_CLIENTS, 0) + 1 - state.set(KEY_CLIENTS, clients) - - # create a client, and test the connection - client = MongoClient(conn_str) - assert client.server_info().get("ok") == 1.0 - - yield client - - # decrement the number of clients - with state.lock: - clients = state.get(KEY_CLIENTS, 0) - 1 - state.set(KEY_CLIENTS, clients) - - # if no clients are connected, destroy the server - if clients <= 0: - stop_mongo_server(db_name) - state.purge() - shutil.rmtree(root_dir, ignore_errors=True) - - -__all__ = [ - "mongo_store_partition", - "mongo_document_store", - "mongo_queue_stash", - "mongo_action_store", - "sqlite_store_partition", - "sqlite_workspace", - "sqlite_document_store", - "sqlite_queue_stash", - "sqlite_action_store", - "dict_store_partition", - "dict_action_store", - "dict_document_store", - "dict_queue_stash", -] +@pytest.fixture(autouse=True) +def patched_session_cache(monkeypatch): + # patching compute heavy hashing to speed up tests + + def _get_key(email, password, connection): + return f"{email}{password}{connection}" + + monkeypatch.setattr("syft.client.client.SyftClientSessionCache._get_key", _get_key) + + +cached_salt_and_hash_password = cache(user.salt_and_hash_password) +cached_check_pwd = cache(user.check_pwd) + + +@pytest.fixture(autouse=True) +def patched_user(monkeypatch): + # patching compute heavy hashing to speed up tests + + monkeypatch.setattr( + "syft.service.user.user.salt_and_hash_password", + cached_salt_and_hash_password, + ) + monkeypatch.setattr( + "syft.service.user.user.check_pwd", + cached_check_pwd, + ) + + +@pytest.fixture +def small_dataset() -> Dataset: + dataset = Dataset( + name="small_dataset", + asset_list=[ + sy.Asset( + name="small_dataset", + data=np.array([1, 2, 3]), + mock=np.array([1, 1, 1]), + ) + ], + ) + yield dataset + + +@pytest.fixture +def big_dataset() -> Dataset: + num_elements = 20 * 1024 * 1024 + data_big = np.random.randint(0, 100, size=num_elements) + mock_big = np.random.randint(0, 100, size=num_elements) + dataset = Dataset( + name="big_dataset", + asset_list=[ + sy.Asset( + name="big_dataset", + data=data_big, + mock=mock_big, + ) + ], + ) + yield dataset + + +@pytest.fixture( + scope="function", + params=[ + "tODOsqlite_address", + # "TODOpostgres_address", # will be used when we have a postgres CI tests + ], +) +def queue_stash(request): + _ = request.param + stash = QueueStash.random() + yield stash + pytest_plugins = [ "tests.syft.users.fixtures", @@ -231,6 +293,5 @@ def mongo_client(testrun_uid): "tests.syft.request.fixtures", "tests.syft.dataset.fixtures", "tests.syft.notifications.fixtures", - "tests.syft.action_graph.fixtures", "tests.syft.serde.fixtures", ] diff --git a/packages/syft/tests/syft/action_graph/action_graph_serde_test.py b/packages/syft/tests/syft/action_graph/action_graph_serde_test.py deleted file mode 100644 index 68f5c868924..00000000000 --- a/packages/syft/tests/syft/action_graph/action_graph_serde_test.py +++ /dev/null @@ -1,43 +0,0 @@ -# stdlib -from typing import Any - -# third party -import pytest -from pytest import FixtureRequest - -# syft absolute -import syft as sy -from syft.node.credentials import SyftVerifyKey -from syft.service.action.action_graph import InMemoryActionGraphStore -from syft.service.action.action_graph import NodeActionData - -# relative -from .fixtures import create_action_node - - -def test_node_action_data_serde(verify_key: SyftVerifyKey) -> None: - action_node: NodeActionData = create_action_node(verify_key) - bytes_data: bytes = sy.serialize(action_node, to_bytes=True) - deserialized_node_action_data = sy.deserialize(bytes_data, from_bytes=True) - - assert deserialized_node_action_data == action_node - - -@pytest.mark.parametrize( - "obj", - [ - "simple_in_memory_action_graph", - ], -) -def test_in_memory_action_graph_serde( - obj: Any, request: FixtureRequest, verify_key: SyftVerifyKey -) -> None: - in_memory_graph: InMemoryActionGraphStore = request.getfixturevalue(obj) - serialized_graph: bytes = sy.serialize(in_memory_graph, to_bytes=True) - deserialized_graph = sy.deserialize(serialized_graph, from_bytes=True) - - assert isinstance(deserialized_graph, type(in_memory_graph)) - assert isinstance(deserialized_graph.graph, type(in_memory_graph.graph)) - assert isinstance(deserialized_graph.graph.db, type(in_memory_graph.graph.db)) - assert deserialized_graph.edges(verify_key) == in_memory_graph.edges(verify_key) - assert deserialized_graph.nodes(verify_key) == in_memory_graph.nodes(verify_key) diff --git a/packages/syft/tests/syft/action_graph/action_graph_service_test.py b/packages/syft/tests/syft/action_graph/action_graph_service_test.py deleted file mode 100644 index 26cc6833b7d..00000000000 --- a/packages/syft/tests/syft/action_graph/action_graph_service_test.py +++ /dev/null @@ -1,453 +0,0 @@ -""" -Tests for the ActionGraphService in /syft/src/syft/service/action/action_graph_service.py -""" - -# syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker -from syft.service.action.action_graph import ExecutionStatus -from syft.service.action.action_graph import InMemoryActionGraphStore -from syft.service.action.action_graph import NetworkXBackingStore -from syft.service.action.action_graph import NodeActionData -from syft.service.action.action_graph import NodeActionDataUpdate -from syft.service.action.action_graph import NodeType -from syft.service.action.action_graph_service import ActionGraphService -from syft.service.action.action_object import Action -from syft.service.action.action_object import ActionObject -from syft.service.context import AuthedServiceContext -from syft.service.response import SyftError -from syft.service.response import SyftSuccess -from syft.types.datetime import DateTime -from syft.types.uid import UID - - -def test_action_graph_service_init( - in_mem_action_graph_service: ActionGraphService, -) -> None: - assert isinstance(in_mem_action_graph_service.store, InMemoryActionGraphStore) - assert isinstance(in_mem_action_graph_service.store.graph, NetworkXBackingStore) - - -def test_action_graph_service_add_action_obj( - in_mem_action_graph_service: ActionGraphService, - authed_context: AuthedServiceContext, -) -> None: - action_obj = ActionObject.from_obj([1, 2, 3]) - result: NodeActionData = in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj - ) - assert result.id == action_obj.id - assert result.type == NodeType.ACTION_OBJECT - assert result.status == ExecutionStatus.PROCESSING - assert result.user_verify_key == authed_context.credentials - assert isinstance(result.created_at, DateTime) - assert result.retry == 0 - assert result.is_mutated is False - assert result.is_mutagen is False - assert result.next_mutagen_node is None - assert result.last_nm_mutagen_node is None - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 1 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 0 - # add again the same node. Expect to get back the error - err: SyftError = in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj - ) - assert "Node already exists in the graph" in err.message - # add another action_obj node - action_obj_2 = ActionObject.from_obj([2, 3, 4]) - result_2: NodeActionData = in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_2 - ) - assert result_2.id == action_obj_2.id - assert result_2.type == NodeType.ACTION_OBJECT - assert result_2.user_verify_key == authed_context.credentials - assert isinstance(result.created_at, DateTime) - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 2 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 0 - - -def test_action_graph_service_add_action_no_mutagen( - in_mem_action_graph_service: ActionGraphService, - authed_context: AuthedServiceContext, -) -> None: - """ - Test the `add_action` method of ActionGraphService when there is no - mutagen, i.e. a node that causes mutation. Scenario: - node_1: action_obj_a = [1,2,3] - node_2: action_obj_b = [2,3,4] - node_3: action -> add(a, b) - node_4: action_obj = a + b (automatically created) - """ - action_obj_a = ActionObject.from_obj([1, 2, 3]) - action_obj_b = ActionObject.from_obj([2, 3, 4]) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_a - ) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_b - ) - action = Action( - path="action.execute", - op="__add__", - remote_self=action_obj_a.syft_lineage_id, - args=[action_obj_b.syft_lineage_id], - kwargs={}, - ) - action_node, result_node = in_mem_action_graph_service.add_action( - context=authed_context, action=action - ) - - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 4 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 3 - - assert action_node.id == action.id - assert action_node.type == NodeType.ACTION - assert action_node.status == ExecutionStatus.PROCESSING - assert action_node.retry == 0 - assert isinstance(action_node.created_at, DateTime) - assert action_node.user_verify_key == authed_context.credentials - assert action_node.is_mutated is False - assert action_node.is_mutagen is False - assert action_node.next_mutagen_node is None - assert action_node.last_nm_mutagen_node is None - - assert result_node.id == action.result_id.id - assert result_node.type == NodeType.ACTION_OBJECT - assert result_node.status == ExecutionStatus.PROCESSING - assert result_node.retry == 0 - assert isinstance(result_node.created_at, DateTime) - assert result_node.user_verify_key == authed_context.credentials - assert result_node.is_mutated is False - assert result_node.is_mutagen is False - assert result_node.next_mutagen_node is None - assert result_node.last_nm_mutagen_node is None - - -def test_action_graph_service_add_action_mutagen( - in_mem_action_graph_service: ActionGraphService, - authed_context: AuthedServiceContext, -) -> None: - """ - Test the `add_action` method of ActionGraphService when mutation occurs. - Scenario: We first create a np array, change its type, then change a value - at a specific index, then do an addition on the mutated value. - The final graph has 11 nodes, 10 edges, 2 mutagen nodes and 2 mutated nodes - node_1: action_obj_d = [1,2,3] - node_2: action -> np.array(d) - node_3: action_obj = np.array([1,2,3]) (automatically created) - node_4: as_type_action_obj = 'np.int32' - node_5: action -> d = np.astype(d, 'np.int32') (first mutation) - node_6: idx_action_obj = 2 - node_7: item_val_action_obj = 5 - node_8: action -> d[2] = 5 (second mutation) - node_9: action_obj_e = 48 - node_10: action -> d + e - node_11: action_obj_f = d + 48 (automatically created) - """ - # node_1: action_obj_d = [1,2,3] - action_obj_d = ActionObject.from_obj([1, 2, 3]) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_d - ) - # node_2: action -> np.array(d) - action = Action( - path="action.execute", - op="np.array", - remote_self=None, - args=[action_obj_d.syft_lineage_id], - kwargs={}, - ) - action_node, result_node = in_mem_action_graph_service.add_action( - context=authed_context, action=action - ) - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 3 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 2 - assert action_node.id == action.id - assert result_node.id == action.result_id.id - assert action_node.type == NodeType.ACTION - assert result_node.type == NodeType.ACTION_OBJECT - assert result_node.is_mutated is False - assert result_node.is_mutagen is False - assert result_node.next_mutagen_node is None - assert result_node.last_nm_mutagen_node is None - # node_3 is the result_node that's automatically created - # node_4: as_type_action_obj = 'np.int32' - as_type_action_obj = ActionObject.from_obj("np.int32") - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=as_type_action_obj - ) - # node_5: action -> d = np.astype(d, 'np.int32') -- mutation occurs - action2 = Action( - path="action.execute", - op="astype", - remote_self=action.result_id, - args=[as_type_action_obj.syft_lineage_id], - kwargs={}, - result_id=action.result_id, - ) - action_node_2, result_node_2 = in_mem_action_graph_service.add_action( - context=authed_context, action=action2 - ) - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 5 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 4 - assert action_node_2.type == NodeType.ACTION - assert result_node_2.type == NodeType.ACTION_OBJECT - assert result_node_2 == result_node - assert action_node_2.is_mutagen is True - assert action_node_2.is_mutated is False - assert result_node_2.is_mutated is True - assert result_node_2.is_mutagen is False - assert result_node_2.next_mutagen_node == action_node_2.id - assert result_node_2.last_nm_mutagen_node == action_node_2.id - assert action_node_2.next_mutagen_node is None - assert action_node_2.last_nm_mutagen_node is None - # node_6: idx_action_obj = 2 - idx_action_obj = ActionObject.from_obj(2) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=idx_action_obj - ) - # node_7: item_val_action_obj = 5 - item_val_action_obj = ActionObject.from_obj(5) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=item_val_action_obj - ) - # node_8: action -> d[2] = 5 (second mutagen node) - action3 = Action( - path="action.execute", - op="__setitem__", - remote_self=action.result_id, - args=[idx_action_obj.syft_lineage_id, item_val_action_obj.syft_lineage_id], - kwargs={}, - result_id=action.result_id, - ) - action_node_3, result_node_3 = in_mem_action_graph_service.add_action( - context=authed_context, action=action3 - ) - assert action.result_id == action2.result_id == action3.result_id - assert result_node_3 == action_node_2 - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 8 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 7 - # the action_node_3 is the last non-mutated mutagen node in the chain - assert action_node_3.is_mutagen is True - assert action_node_3.is_mutated is False - assert action_node_3.next_mutagen_node is None - assert action_node_3.last_nm_mutagen_node is None - # action_node_2 should be changed accordingly - assert action_node_2.is_mutagen is True - assert action_node_2.is_mutated is True - assert action_node_2.next_mutagen_node == action_node_3.id - assert action_node_2.last_nm_mutagen_node == action_node_3.id - # result_node should be changed accordingly - assert result_node.is_mutagen is False - assert result_node.is_mutated is True - assert result_node.next_mutagen_node == action_node_2.id - assert result_node.last_nm_mutagen_node == action_node_3.id - # node_9: action_obj_e = 48 - action_obj_e = ActionObject.from_obj(48) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_e - ) - # node_10: action -> d + e - action4 = Action( - path="action.execute", - op="__add__", - remote_self=action.result_id, - args=[action_obj_e.syft_lineage_id], - kwargs={}, - ) - action_node_4, result_node_4 = in_mem_action_graph_service.add_action( - context=authed_context, action=action4 - ) - # node_11: action_obj_f = d + 48 (= the result_node_4 that's automatically created) - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 11 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 10 - # the __add__ node (action_node_4) should be a - # direct child of the __setitem__ (action_node_3) node - assert ( - in_mem_action_graph_service.store.is_parent( - parent=action_node_3.id, child=action_node_4.id - ).ok() - is True - ) - # action_node_4 and result_node_4 do not belong to the mutation chain - assert action_node_4.is_mutagen is False - assert action_node_4.is_mutated is False - assert action_node_4.next_mutagen_node is None - assert action_node_4.last_nm_mutagen_node is None - # result_node should be changed accordingly - assert result_node_4.is_mutagen is False - assert result_node_4.is_mutated is False - assert result_node_4.next_mutagen_node is None - assert result_node_4.last_nm_mutagen_node is None - - -def test_action_graph_service_get_remove_nodes( - in_mem_action_graph_service: ActionGraphService, - authed_context: AuthedServiceContext, -) -> None: - """ - Test the get and remove_node method of the ActionGraphService - """ - action_obj_a = ActionObject.from_obj([1, 2, 3]) - action_obj_b = ActionObject.from_obj([2, 3, 4]) - action_obj_node_a: NodeActionData = in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_a - ) - action_obj_node_b: NodeActionData = in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_b - ) - action = Action( - path="action.execute", - op="__add__", - remote_self=action_obj_a.syft_lineage_id, - args=[action_obj_b.syft_lineage_id], - kwargs={}, - ) - action_node, result_node = in_mem_action_graph_service.add_action( - context=authed_context, action=action - ) - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 4 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 3 - nodes = set( - dict(in_mem_action_graph_service.get_all_nodes(context=authed_context)).keys() - ) - # test the get method - assert action_obj_node_a == in_mem_action_graph_service.get( - uid=action_obj_a.id, context=authed_context - ) - assert action_obj_node_b == in_mem_action_graph_service.get( - uid=action_obj_b.id, context=authed_context - ) - assert action_node == in_mem_action_graph_service.get( - uid=action.id, context=authed_context - ) - # test the remove_node method - removed_result: SyftSuccess = in_mem_action_graph_service.remove_node( - authed_context, action_node.id - ) - assert ( - removed_result.message - == f"Successfully deleted node with uid: {action.id} from the graph." - ) - assert len(in_mem_action_graph_service.get_all_nodes(authed_context)) == 3 - assert len(in_mem_action_graph_service.get_all_edges(authed_context)) == 0 - nodes_after_remove = set( - dict(in_mem_action_graph_service.get_all_nodes(context=authed_context)).keys() - ) - assert action_node.id == (nodes - nodes_after_remove).pop() - - -def test_action_graph_service_update( - in_mem_action_graph_service: ActionGraphService, - authed_context: AuthedServiceContext, -) -> None: - action_obj_d = ActionObject.from_obj([1, 2, 3]) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_d - ) - action = Action( - path="action.execute", - op="np.array", - remote_self=None, - args=[action_obj_d.syft_lineage_id], - kwargs={}, - ) - action_node, _ = in_mem_action_graph_service.add_action( - context=authed_context, action=action - ) - update_data = NodeActionDataUpdate( - status=ExecutionStatus.DONE, - is_mutagen=True, - is_mutated=True, - next_mutagen_node=UID(), - last_nm_mutagen_node=UID(), - ) - updated_node: NodeActionData = in_mem_action_graph_service.update( - context=authed_context, uid=action_node.id, node_data=update_data - ) - assert updated_node.id == action_node.id - assert updated_node.type == NodeType.ACTION - for k, v in update_data.to_dict(exclude_empty=True).items(): - assert getattr(updated_node, k) == v - - -def test_action_graph_service_status( - in_mem_action_graph_service: ActionGraphService, - authed_context: AuthedServiceContext, -) -> None: - """ - Test the update_action_status and get_by_action_status methods - """ - action_obj_d = ActionObject.from_obj([1, 2, 3]) - in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj_d - ) - action = Action( - path="action.execute", - op="np.array", - remote_self=None, - args=[action_obj_d.syft_lineage_id], - kwargs={}, - ) - action_node, _ = in_mem_action_graph_service.add_action( - context=authed_context, action=action - ) - - assert ( - len( - in_mem_action_graph_service.get_by_action_status( - authed_context, ExecutionStatus.PROCESSING - ) - ) - == 3 - ) - - updated_node = in_mem_action_graph_service.update_action_status( - context=authed_context, action_id=action_node.id, status=ExecutionStatus.DONE - ) - - assert updated_node.status == ExecutionStatus.DONE - - done_nodes = in_mem_action_graph_service.get_by_action_status( - authed_context, ExecutionStatus.DONE - ) - assert len(done_nodes) == 1 - assert done_nodes[0] == updated_node.id # should be just updated_node? - assert ( - len( - in_mem_action_graph_service.get_by_action_status( - authed_context, ExecutionStatus.PROCESSING - ) - ) - == 2 - ) - - -def test_action_graph_service_get_by_verify_key( - worker: Worker, - in_mem_action_graph_service: ActionGraphService, -) -> None: - verify_key: SyftVerifyKey = SyftSigningKey.generate().verify_key - verify_key_2: SyftVerifyKey = SyftSigningKey.generate().verify_key - assert verify_key_2 != verify_key - authed_context = AuthedServiceContext(credentials=verify_key, node=worker) - authed_context_2 = AuthedServiceContext(credentials=verify_key_2, node=worker) - action_obj = ActionObject.from_obj([1, 2, 3]) - action_obj_2 = ActionObject.from_obj([2, 3, 4]) - node_1 = in_mem_action_graph_service.add_action_obj( - context=authed_context, action_obj=action_obj - ) - node_2 = in_mem_action_graph_service.add_action_obj( - context=authed_context_2, action_obj=action_obj_2 - ) - - assert ( - in_mem_action_graph_service.get_by_verify_key(authed_context, verify_key)[0] - == node_1.id - ) - - assert ( - in_mem_action_graph_service.get_by_verify_key(authed_context_2, verify_key_2)[0] - == node_2.id - ) diff --git a/packages/syft/tests/syft/action_graph/action_graph_test.py b/packages/syft/tests/syft/action_graph/action_graph_test.py deleted file mode 100644 index 0b451f455b4..00000000000 --- a/packages/syft/tests/syft/action_graph/action_graph_test.py +++ /dev/null @@ -1,629 +0,0 @@ -""" -Tests for the classes in /syft/src/syft/service/action/action_graph.py: - - NodeActionData, NodeActionDataUpdate - - InMemoryStoreClientConfig, InMemoryGraphConfig - - NetworkXBackingStore - - InMemoryActionGraphStore -""" - -# stdlib -import os -from pathlib import Path -import sys -import tempfile -from threading import Thread - -# third party -import networkx as nx -import pytest -from result import Err - -# syft absolute -from syft.node.credentials import SyftVerifyKey -from syft.service.action.action_graph import ExecutionStatus -from syft.service.action.action_graph import InMemoryActionGraphStore -from syft.service.action.action_graph import InMemoryGraphConfig -from syft.service.action.action_graph import InMemoryStoreClientConfig -from syft.service.action.action_graph import NetworkXBackingStore -from syft.service.action.action_graph import NodeActionData -from syft.service.action.action_graph import NodeActionDataUpdate -from syft.service.action.action_graph import NodeType -from syft.service.action.action_graph_service import ExecutionStatusPartitionKey -from syft.service.action.action_object import Action -from syft.service.action.action_object import ActionObject -from syft.store.document_store import QueryKeys -from syft.store.locks import ThreadingLockingConfig -from syft.types.datetime import DateTime -from syft.types.syft_metaclass import Empty -from syft.types.uid import UID - -# relative -from .fixtures import create_action_node -from .fixtures import create_action_obj_node - - -def test_node_action_data_from_action_obj(verify_key: SyftVerifyKey) -> None: - action_obj = ActionObject.from_obj([2, 4, 6]) - node_action_obj = NodeActionData.from_action_obj( - action_obj=action_obj, credentials=verify_key - ) - - assert node_action_obj.id == action_obj.id - assert node_action_obj.user_verify_key == verify_key - assert node_action_obj.type == NodeType.ACTION_OBJECT - assert node_action_obj.status == ExecutionStatus.PROCESSING - assert node_action_obj.retry == 0 - assert isinstance(node_action_obj.created_at, DateTime) - assert node_action_obj.is_mutated is False - assert node_action_obj.is_mutagen is False - assert node_action_obj.next_mutagen_node is None - assert node_action_obj.last_nm_mutagen_node is None - - -def test_node_action_data_from_action_no_mutagen(verify_key: SyftVerifyKey) -> None: - """ - action -> a + b - """ - action_obj_a = ActionObject.from_obj([2, 4, 6]) - action_obj_b = ActionObject.from_obj([2, 3, 4]) - # adding 2 action objects - action = Action( - path="action.execute", - op="__add__", - remote_self=action_obj_a.syft_lineage_id, - args=[action_obj_b.syft_lineage_id], - kwargs={}, - ) - node_action_data = NodeActionData.from_action(action=action, credentials=verify_key) - - assert node_action_data.id == action.id - assert node_action_data.type == NodeType.ACTION - assert node_action_data.user_verify_key == verify_key - assert node_action_data.status == ExecutionStatus.PROCESSING - assert node_action_data.retry == 0 - assert isinstance(node_action_data.created_at, DateTime) - assert node_action_data.is_mutated is False - assert node_action_data.is_mutagen is False - assert node_action_data.next_mutagen_node is None - assert node_action_data.last_nm_mutagen_node is None - - -def test_node_action_data_from_action_mutagen(verify_key: SyftVerifyKey) -> None: - """ - action1 -> d = numpy.arry([1, 2, 3]) - action2 -> d.astype('int32') (this is a mutagen node) - """ - action_obj = ActionObject.from_obj([1, 2, 3]) - action1 = Action( - path="action.execute", - op="np.array", - remote_self=None, - args=[action_obj.syft_lineage_id], - kwargs={}, - ) - node_action_data1 = NodeActionData.from_action( - action=action1, credentials=verify_key - ) - as_type_action_obj = ActionObject.from_obj("np.int32") - action2 = Action( - path="action.execute", - op="astype", - remote_self=action1.result_id, - args=[as_type_action_obj.syft_lineage_id], - kwargs={}, - result_id=action1.result_id, - ) - node_action_data2 = NodeActionData.from_action( - action=action2, credentials=verify_key - ) - assert node_action_data1.id == action1.id - assert node_action_data2.id == action2.id - assert node_action_data1.type == NodeType.ACTION - assert node_action_data2.type == NodeType.ACTION - assert node_action_data1.is_mutagen is False - assert node_action_data2.is_mutagen is True - assert node_action_data1.next_mutagen_node is None - assert node_action_data1.last_nm_mutagen_node is None - assert node_action_data2.next_mutagen_node is None - assert node_action_data2.last_nm_mutagen_node is None - - -def test_node_action_data_update() -> None: - node_action_data_update = NodeActionDataUpdate() - - assert node_action_data_update.id == Empty - assert node_action_data_update.type == Empty - assert node_action_data_update.status == Empty - assert node_action_data_update.retry == Empty - assert node_action_data_update.created_at == Empty - assert node_action_data_update.credentials == Empty - # only updated_at is not empty - assert isinstance(node_action_data_update.updated_at, DateTime) - assert len(node_action_data_update.to_dict(exclude_empty=True)) == 1 - assert ( - node_action_data_update.to_dict(exclude_empty=False) - == node_action_data_update.dict() - ) - - -def test_in_memory_store_client_config() -> None: - default_client_conf = InMemoryStoreClientConfig() - assert default_client_conf.filename == "action_graph.bytes" - assert default_client_conf.path == tempfile.gettempdir() - assert ( - default_client_conf.file_path - == Path(tempfile.gettempdir()) / "action_graph.bytes" - ) - - custom_client_conf = InMemoryStoreClientConfig( - filename="custom_action_graph.bytes", path="/custom" - ) - assert custom_client_conf.filename == "custom_action_graph.bytes" - assert custom_client_conf.path == "/custom" - assert custom_client_conf.file_path == Path("/custom") / "custom_action_graph.bytes" - - -def test_in_memory_graph_config() -> None: - store_config = InMemoryGraphConfig() - default_client_conf = InMemoryStoreClientConfig() - locking_config = ThreadingLockingConfig() - - assert store_config.client_config == default_client_conf - assert store_config.store_type == NetworkXBackingStore - assert store_config.locking_config == locking_config - - -def test_networkx_backing_store_node_related_methods( - networkx_store: NetworkXBackingStore, verify_key: SyftVerifyKey -) -> None: - """ - Test the methods related to nodes of the NetworkXBackingStore: - get(), set(), is_parent(), edges(), nodes(), delete(), update() methods - """ - assert isinstance(networkx_store.db, nx.DiGraph) - - # set and get an action object node - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - networkx_store.set(uid=action_obj_node.id, data=action_obj_node) - assert len(networkx_store.nodes()) == 1 - assert networkx_store.get(uid=action_obj_node.id) == action_obj_node - - # set and get an action node - action_node: NodeActionData = create_action_node(verify_key) - networkx_store.set(uid=action_node.id, data=action_node) - assert networkx_store.get(uid=action_node.id) == action_node - assert len(networkx_store.nodes()) == 2 - assert len(networkx_store.edges()) == 0 - assert ( - networkx_store.is_parent(parent=action_obj_node.id, child=action_node.id) - is False - ) - - # update the action node - update_node = NodeActionDataUpdate( - status=ExecutionStatus.DONE, is_mutagen=True, is_mutated=True - ) - for key, val in update_node.to_dict(exclude_empty=True).items(): - setattr(action_node, key, val) - networkx_store.update(uid=action_node.id, data=action_node) - updated_action_node = networkx_store.get(uid=action_node.id) - - assert updated_action_node.status == ExecutionStatus.DONE - assert updated_action_node.updated_at == update_node.updated_at - assert updated_action_node.is_mutagen == update_node.is_mutagen - assert updated_action_node.is_mutated == update_node.is_mutated - - # remove a node - assert networkx_store.exists(uid=action_obj_node.id) is True - networkx_store.delete(uid=action_obj_node.id) - assert len(networkx_store.nodes()) == 1 - assert networkx_store.exists(uid=action_obj_node.id) is False - - # remove the remaining node - networkx_store.delete(uid=action_node.id) - assert len(networkx_store.nodes()) == 0 - - -def test_networkx_backing_store_edge_related_methods( - networkx_store: NetworkXBackingStore, verify_key: SyftVerifyKey -) -> None: - """ - Test the add_edge, remove_edge and find_neighbors methods of NetworkXBackingStore - """ - # create some nodes and add them to the store - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - action_node: NodeActionData = create_action_node(verify_key) - action_node_2: NodeActionData = create_action_node(verify_key) - networkx_store.set(uid=action_obj_node.id, data=action_obj_node) - networkx_store.set(uid=action_node.id, data=action_node) - networkx_store.set(uid=action_node_2.id, data=action_node_2) - # add the edges between them (we are making a closed circle here) - networkx_store.add_edge(parent=action_node.id, child=action_obj_node.id) - networkx_store.add_edge(parent=action_obj_node.id, child=action_node_2.id) - networkx_store.add_edge(parent=action_node_2.id, child=action_node.id) - - assert len(networkx_store.edges()) == 3 - assert ( - networkx_store.is_parent(parent=action_node.id, child=action_obj_node.id) - is True - ) - assert ( - networkx_store.is_parent(parent=action_obj_node.id, child=action_node_2.id) - is True - ) - assert ( - networkx_store.is_parent(parent=action_node_2.id, child=action_node.id) is True - ) - - # remove the edges - networkx_store.remove_edge(parent=action_node.id, child=action_obj_node.id) - assert len(networkx_store.edges()) == 2 - networkx_store.remove_edge(parent=action_obj_node.id, child=action_node_2.id) - assert len(networkx_store.edges()) == 1 - networkx_store.remove_edge(parent=action_node_2.id, child=action_node.id) - assert len(networkx_store.edges()) == 0 - assert len(networkx_store.nodes()) == 3 - - -@pytest.mark.xfail( - sys.platform == "win32", - reason="Fails on Windows. capnp\lib\capnp.pyx:3323: KjException Message did not contain a root pointer.", -) -def test_networkx_backing_store_save_load_default( - networkx_store_with_nodes: NetworkXBackingStore, verify_key: SyftVerifyKey -) -> None: - """ - Test the save and load methods of NetworkXBackingStore to a default location. - These functions rely on the serialization and deserialization methods of the store. - """ - # save the store to and from the default location - networkx_store_with_nodes.save() - default_in_mem_graph_config = InMemoryGraphConfig() - networkx_store_2 = NetworkXBackingStore(default_in_mem_graph_config) - assert networkx_store_2.nodes() == networkx_store_with_nodes.nodes() - assert networkx_store_2.edges() == networkx_store_with_nodes.edges() - # remove the saved file - os.remove(default_in_mem_graph_config.client_config.file_path) - - -def test_networkx_backing_store_save_load_custom(verify_key: SyftVerifyKey) -> None: - # save the store to and from a custom location - custom_client_conf = InMemoryStoreClientConfig( - filename="custom_action_graph.bytes", path=tempfile.gettempdir() - ) - custom_in_mem_graph_config = InMemoryGraphConfig() - custom_in_mem_graph_config.client_config = custom_client_conf - networkx_store = NetworkXBackingStore(store_config=custom_in_mem_graph_config) - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - action_node: NodeActionData = create_action_node(verify_key) - action_node_2: NodeActionData = create_action_node(verify_key) - networkx_store.set(uid=action_obj_node.id, data=action_obj_node) - networkx_store.set(uid=action_node.id, data=action_node) - networkx_store.set(uid=action_node_2.id, data=action_node_2) - networkx_store.save() - # load the store from the custom location - networkx_store_2 = NetworkXBackingStore(custom_in_mem_graph_config) - assert networkx_store_2.nodes() == networkx_store.nodes() - assert networkx_store_2.edges() == networkx_store.edges() - # remove the saved file - os.remove(custom_in_mem_graph_config.client_config.file_path) - - -def test_networkx_backing_store_subgraph( - networkx_store_with_nodes: NetworkXBackingStore, verify_key: SyftVerifyKey -): - processing_status = ExecutionStatus.PROCESSING - qks = QueryKeys(qks=[ExecutionStatusPartitionKey.with_obj(processing_status)]) - subgraph = networkx_store_with_nodes.subgraph(qks) - assert len(subgraph.nodes()) == 3 - assert len(subgraph.edges()) == 0 - # add a node with a status DONE - action_node: NodeActionData = create_action_node(verify_key) - action_node.status = ExecutionStatus.DONE - networkx_store_with_nodes.set(uid=action_node.id, data=action_node) - done_status = ExecutionStatus.DONE - qks2 = QueryKeys(qks=[ExecutionStatusPartitionKey.with_obj(done_status)]) - subgraph2 = networkx_store_with_nodes.subgraph(qks2) - assert len(subgraph2.nodes()) == 1 - assert len(subgraph2.edges()) == 0 - - -def test_in_memory_action_graph_store_init( - in_mem_graph_config: InMemoryGraphConfig, -) -> None: - graph_store = InMemoryActionGraphStore(store_config=in_mem_graph_config) - - assert graph_store.store_config == in_mem_graph_config - assert isinstance(graph_store.graph, NetworkXBackingStore) - assert isinstance(graph_store.graph.db, nx.DiGraph) - - -def test_in_memory_action_graph_store_set_get_delete_no_mutations( - in_mem_graph_store: InMemoryActionGraphStore, - verify_key: SyftVerifyKey, -) -> None: - """ - Test these methods of InMemoryActionGraphStore: set, get, delete, nodes, edges, is_parent - when there is no mutations. - """ - # add the first node - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - result = in_mem_graph_store.set(action_obj_node, credentials=verify_key) - assert result.ok() == action_obj_node - assert len(in_mem_graph_store.nodes(verify_key).ok()) == 1 - assert len(in_mem_graph_store.edges(verify_key).ok()) == 0 - assert ( - in_mem_graph_store.get(action_obj_node.id, verify_key).ok() == action_obj_node - ) - - # add the second node which is the child of the first node - action_node: NodeActionData = create_action_node(verify_key) - result2 = in_mem_graph_store.set( - action_node, credentials=verify_key, parent_uids=[action_obj_node.id] - ) - assert result2.ok() == action_node - assert len(in_mem_graph_store.nodes(verify_key).ok()) == 2 - assert len(in_mem_graph_store.edges(verify_key).ok()) == 1 - assert in_mem_graph_store.get(action_node.id, verify_key).ok() == action_node - assert ( - in_mem_graph_store.is_parent( - parent=action_obj_node.id, child=action_node.id - ).ok() - is True - ) - - # add the third node which is the child of the first and second node - action_node_2: NodeActionData = create_action_node(verify_key) - result3 = in_mem_graph_store.set( - action_node_2, - credentials=verify_key, - parent_uids=[action_obj_node.id, action_node.id], - ) - assert result3.ok() == action_node_2 - assert len(in_mem_graph_store.nodes(verify_key).ok()) == 3 - assert len(in_mem_graph_store.edges(verify_key).ok()) == 3 - assert in_mem_graph_store.get(action_node_2.id, verify_key).ok() == action_node_2 - assert ( - in_mem_graph_store.is_parent( - parent=action_obj_node.id, child=action_node_2.id - ).ok() - is True - ) - assert ( - in_mem_graph_store.is_parent(parent=action_node.id, child=action_node_2.id).ok() - is True - ) - assert ( - in_mem_graph_store.is_parent(parent=action_node_2.id, child=action_node.id).ok() - is False - ) - - # delete the first node - result4 = in_mem_graph_store.delete(action_obj_node.id, verify_key) - assert result4.ok() is True - assert len(in_mem_graph_store.nodes(verify_key).ok()) == 2 - assert len(in_mem_graph_store.edges(verify_key).ok()) == 1 - assert ( - in_mem_graph_store.is_parent(parent=action_node.id, child=action_node_2.id).ok() - is True - ) - # trying to get the deleted note should result in an Err - assert isinstance(in_mem_graph_store.get(action_obj_node.id, verify_key), Err) - - -def test_in_memory_action_graph_store_update( - in_mem_graph_store: InMemoryActionGraphStore, - verify_key: SyftVerifyKey, -) -> None: - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - result = in_mem_graph_store.set(action_obj_node, credentials=verify_key).ok() - update_node = NodeActionDataUpdate( - status=ExecutionStatus.DONE, is_mutagen=True, is_mutated=True - ) - result2 = in_mem_graph_store.update( - uid=result.id, data=update_node, credentials=verify_key - ).ok() - assert result2.id == result.id - assert in_mem_graph_store.get(result.id, verify_key).ok() == result2 - assert result2.status == ExecutionStatus.DONE - assert result2.is_mutagen is True - assert result2.is_mutated is True - assert isinstance(result2.updated_at, DateTime) - - -def test_simple_in_memory_action_graph( - simple_in_memory_action_graph: InMemoryActionGraphStore, - verify_key: SyftVerifyKey, -) -> None: - """ - node_1: action_obj_node_a - node_2: action_obj_node_b - node_3: action -> a + b = c - """ - assert len(simple_in_memory_action_graph.edges(verify_key).ok()) == 2 - assert len(simple_in_memory_action_graph.nodes(verify_key).ok()) == 3 - # the nodes should be in the order of how they were added - nodes = list(simple_in_memory_action_graph.nodes(verify_key).ok()) - node_1: NodeActionData = nodes[0][1]["data"] - node_2: NodeActionData = nodes[1][1]["data"] - node_3: NodeActionData = nodes[2][1]["data"] - assert ( - simple_in_memory_action_graph.is_parent(parent=node_1.id, child=node_3.id).ok() - is True - ) - assert ( - simple_in_memory_action_graph.is_parent(parent=node_2.id, child=node_3.id).ok() - is True - ) - assert ( - simple_in_memory_action_graph.is_parent(parent=node_3.id, child=node_1.id).ok() - is False - ) - - -def test_multithreaded_graph_store_set_and_add_edge(verify_key: SyftVerifyKey) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - store_config = InMemoryGraphConfig() - graph_store = InMemoryActionGraphStore(store_config=store_config, reset=True) - - def _cbk(tid: int) -> None: - nonlocal execution_err - for _idx in range(repeats): - action_obj_a = ActionObject.from_obj([2, 4, 6]) - node_data_a = NodeActionData.from_action_obj( - action_obj_a, credentials=verify_key - ) - res1 = graph_store.set(node_data_a, credentials=verify_key) - if res1.is_err(): - execution_err = res1.err() - - action_obj_b = ActionObject.from_obj([3, 4, 6]) - node_data_b = NodeActionData.from_action_obj( - action_obj_b, credentials=verify_key - ) - res2 = graph_store.set(node_data_b, credentials=verify_key) - if res2.is_err(): - execution_err = res2.err() - - res3 = graph_store.add_edge( - node_data_a.id, node_data_b.id, credentials=verify_key - ) - if res3.is_err(): - execution_err = res3.err() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - reqd_num_nodes = thread_cnt * repeats * 2 - reqd_num_edges = thread_cnt * repeats * 1 - - assert execution_err is None - assert len(graph_store.nodes(None).ok()) == reqd_num_nodes - assert len(graph_store.edges(None).ok()) == reqd_num_edges - - -def test_multithreaded_graph_store_delete_node(verify_key: SyftVerifyKey) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - store_config = InMemoryGraphConfig() - graph_store = InMemoryActionGraphStore(store_config=store_config, reset=True) - - thread_id_node_map = {} - for tid in range(thread_cnt): - thread_id_node_map[tid] = [] - for _rp in range(repeats): - action_obj = ActionObject.from_obj([2, 4, 6]) - node_data = NodeActionData.from_action_obj( - action_obj, credentials=verify_key - ) - res = graph_store.set(node_data, credentials=verify_key) - if res.is_err(): - print(f"Failed to add node, error: {res.err()}") - assert 0 == 1 # TODO how else to make the test fail? - thread_id_node_map[tid].append(node_data.id) - - assert len(graph_store.nodes(None).ok()) == thread_cnt * repeats - - def _cbk(tid: int) -> None: - nonlocal execution_err - for idx in range(repeats): - cur_node_id = thread_id_node_map[tid][idx] - res = graph_store.delete(cur_node_id, credentials=verify_key) - if res.is_err(): - execution_err = res.err() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_cbk, args=(tid,)) - thread.start() - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - assert len(graph_store.nodes(None).ok()) == 0 - - -def test_simple_in_memory_action_graph_query( - simple_in_memory_action_graph: InMemoryActionGraphStore, - verify_key: SyftVerifyKey, -) -> None: - qks = QueryKeys( - qks=[ExecutionStatusPartitionKey.with_obj(ExecutionStatus.PROCESSING)] - ) - result = simple_in_memory_action_graph.query(qks, verify_key).ok() - # the nodes should be in the order of how they were added - nodes = list(simple_in_memory_action_graph.nodes(verify_key).ok()) - node_1: NodeActionData = nodes[0][1]["data"] - node_2: NodeActionData = nodes[1][1]["data"] - node_3: NodeActionData = nodes[2][1]["data"] - assert result[0] == node_1.id - assert result[1] == node_2.id - assert result[2] == node_3.id - # change the status of a node and do the query again - node_1.status = ExecutionStatus.DONE - done_qks = QueryKeys( - qks=[ExecutionStatusPartitionKey.with_obj(ExecutionStatus.DONE)] - ) - done_result = simple_in_memory_action_graph.query(done_qks, verify_key).ok() - processing_result = simple_in_memory_action_graph.query(qks, verify_key).ok() - assert done_result[0] == node_1.id - assert processing_result[0] == node_2.id - assert processing_result[1] == node_3.id - - -def test_multithreaded_graph_store_update_node(verify_key: SyftVerifyKey) -> None: - execution_err = None - store_config = InMemoryGraphConfig() - graph_store = InMemoryActionGraphStore(store_config=store_config, reset=True) - - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - result = graph_store.set(action_obj_node, credentials=verify_key).ok() - - thread_id_update_map = [ - {"next_mutagen_node": UID()}, - {"last_nm_mutagen_node": UID()}, - {"retry": 42}, - {"is_mutagen": True}, - {"is_mutated": True}, - {"status": ExecutionStatus.DONE}, - ] - thread_cnt = len(thread_id_update_map) - - def _cbk(tid: int) -> None: - nonlocal execution_err - update_node = NodeActionDataUpdate(**thread_id_update_map[tid]) - result2 = graph_store.update( - uid=result.id, data=update_node, credentials=verify_key - ) - if result2.is_err(): - execution_err = result2.err() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_cbk, args=(tid,)) - thread.start() - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - updated_node = graph_store.get(result.id, verify_key).ok() - - for update_params in thread_id_update_map: - for param, value in update_params.items(): - assert getattr(updated_node, param) == value diff --git a/packages/syft/tests/syft/action_graph/fixtures.py b/packages/syft/tests/syft/action_graph/fixtures.py deleted file mode 100644 index fa12bb5dae5..00000000000 --- a/packages/syft/tests/syft/action_graph/fixtures.py +++ /dev/null @@ -1,133 +0,0 @@ -# third party -import numpy as np -import pytest - -# syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.service.action.action_graph import InMemoryActionGraphStore -from syft.service.action.action_graph import InMemoryGraphConfig -from syft.service.action.action_graph import NetworkXBackingStore -from syft.service.action.action_graph import NodeActionData -from syft.service.action.action_graph import NodeType -from syft.service.action.action_graph_service import ActionGraphService -from syft.service.action.action_object import Action -from syft.service.action.action_object import ActionObject - - -def create_action_obj_node(verify_key: SyftVerifyKey) -> NodeActionData: - """ - Helper function to create an action object node of a random - array of 3 float numbers - """ - random_data = np.random.rand(3) - action_obj = ActionObject.from_obj(random_data) - action_obj_node = NodeActionData.from_action_obj( - action_obj=action_obj, credentials=verify_key - ) - assert action_obj_node.type == NodeType.ACTION_OBJECT - - return action_obj_node - - -def create_action_node(verify_key: SyftVerifyKey) -> NodeActionData: - """ - Helper function to create an action node of a random - array of 3 float numbers - """ - random_data = np.random.rand(3) - action_obj = ActionObject.from_obj(random_data) - action = Action( - path="action.execute", - op="np.array", - remote_self=None, - args=[action_obj.syft_lineage_id], - kwargs={}, - ) - action_node = NodeActionData.from_action(action=action, credentials=verify_key) - assert action_node.type == NodeType.ACTION - return action_node - - -@pytest.fixture -def verify_key() -> SyftVerifyKey: - signing_key = SyftSigningKey.generate() - verify_key: SyftVerifyKey = signing_key.verify_key - yield verify_key - - -@pytest.fixture -def in_mem_graph_config() -> InMemoryGraphConfig: - yield InMemoryGraphConfig() - - -@pytest.fixture -def networkx_store(in_mem_graph_config: InMemoryGraphConfig) -> NetworkXBackingStore: - yield NetworkXBackingStore(store_config=in_mem_graph_config, reset=True) - - -@pytest.fixture -def networkx_store_with_nodes( - verify_key: SyftVerifyKey, networkx_store: NetworkXBackingStore -) -> NetworkXBackingStore: - action_obj_node: NodeActionData = create_action_obj_node(verify_key) - action_node: NodeActionData = create_action_node(verify_key) - action_node_2: NodeActionData = create_action_node(verify_key) - networkx_store.set(uid=action_obj_node.id, data=action_obj_node) - networkx_store.set(uid=action_node.id, data=action_node) - networkx_store.set(uid=action_node_2.id, data=action_node_2) - - yield networkx_store - - -@pytest.fixture -def in_mem_graph_store( - in_mem_graph_config: InMemoryGraphConfig, -) -> InMemoryActionGraphStore: - graph_store = InMemoryActionGraphStore(store_config=in_mem_graph_config, reset=True) - yield graph_store - - -@pytest.fixture -def simple_in_memory_action_graph( - in_mem_graph_store: InMemoryActionGraphStore, - verify_key: SyftVerifyKey, -) -> InMemoryActionGraphStore: - """ - Create a simple in memory graph with 3 nodes without node mutation - node_1: action_obj_node_a - node_2: action_obj_node_b - node_3: action -> a + b = c - """ - action_obj_a = ActionObject.from_obj([1, 2, 3]) - action_obj_b = ActionObject.from_obj([2, 3, 4]) - action_obj_node_a: NodeActionData = NodeActionData.from_action_obj( - action_obj_a, verify_key - ) - action_obj_node_b: NodeActionData = NodeActionData.from_action_obj( - action_obj_b, verify_key - ) - in_mem_graph_store.set(action_obj_node_a, credentials=verify_key) - in_mem_graph_store.set(action_obj_node_b, credentials=verify_key) - action = Action( - path="action.execute", - op="__add__", - remote_self=action_obj_a.syft_lineage_id, - args=[action_obj_b.syft_lineage_id], - kwargs={}, - ) - action_node = NodeActionData.from_action(action=action, credentials=verify_key) - in_mem_graph_store.set( - node=action_node, - credentials=verify_key, - parent_uids=[action_obj_node_a.id, action_obj_node_b.id], - ) - - yield in_mem_graph_store - - -@pytest.fixture -def in_mem_action_graph_service( - in_mem_graph_store: InMemoryActionGraphStore, -) -> ActionGraphService: - yield ActionGraphService(store=in_mem_graph_store) diff --git a/packages/syft/tests/syft/action_test.py b/packages/syft/tests/syft/action_test.py index a9b2adb1c97..851a83cb7c2 100644 --- a/packages/syft/tests/syft/action_test.py +++ b/packages/syft/tests/syft/action_test.py @@ -1,58 +1,96 @@ +# stdlib +import uuid + # third party +from faker import Faker import numpy as np +import pytest # syft absolute from syft import ActionObject from syft.client.api import SyftAPICall +from syft.server.worker import Worker from syft.service.action.action_object import Action from syft.service.response import SyftError +from syft.service.user.user_roles import ServiceRole from syft.types.uid import LineageID # relative from ..utils.custom_markers import currently_fail_on_python_3_12 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_actionobject_method(worker): - root_domain_client = worker.root_client - action_store = worker.get_service("actionservice").store + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) + action_store = worker.services.action.stash obj = ActionObject.from_obj("abc") - pointer = root_domain_client.api.services.action.set(obj) - assert len(action_store.data) == 1 + pointer = obj.send(root_datasite_client) + assert len(action_store._data) == 1 res = pointer.capitalize() - assert len(action_store.data) == 2 + assert len(action_store._data) == 2 assert res[0] == "A" +def test_new_admin_has_action_object_permission( + worker: Worker, + faker: Faker, +) -> None: + root_client = worker.root_client + + email = uuid.uuid4().hex[:6] + faker.email() # avoid collision + pw = uuid.uuid4().hex + root_client.register( + name=faker.name(), email=email, password=pw, password_verify=pw + ) + ds_client = root_client.login(email=email, password=pw) + + obj = ActionObject.from_obj("abc") + obj.send(ds_client) + + email = faker.email() + pw = uuid.uuid4().hex + root_client.register( + name=faker.name(), email=email, password=pw, password_verify=pw + ) + + admin = root_client.login(email=email, password=pw) + + root_client.api.services.user.update(uid=admin.account.id, role=ServiceRole.ADMIN) + + assert admin.api.services.action.get(obj.id) == obj + + @currently_fail_on_python_3_12(raises=AttributeError) def test_lib_function_action(worker): - root_domain_client = worker.root_client - numpy_client = root_domain_client.api.lib.numpy + root_datasite_client = worker.root_client + numpy_client = root_datasite_client.api.lib.numpy res = numpy_client.zeros_like([1, 2, 3]) assert isinstance(res, ActionObject) assert all(res == np.array([0, 0, 0])) - assert len(worker.get_service("actionservice").store.data) > 0 + assert len(worker.services.action.stash._data) > 0 def test_call_lib_function_action2(worker): - root_domain_client = worker.root_client - assert root_domain_client.api.lib.numpy.add(1, 2) == 3 + root_datasite_client = worker.root_client + assert root_datasite_client.api.lib.numpy.add(1, 2) == 3 def test_lib_class_init_action(worker): - root_domain_client = worker.root_client - numpy_client = root_domain_client.api.lib.numpy + root_datasite_client = worker.root_client + numpy_client = root_datasite_client.api.lib.numpy res = numpy_client.float32(4.0) assert isinstance(res, ActionObject) assert res == np.float32(4.0) - assert len(worker.get_service("actionservice").store.data) > 0 + assert len(worker.services.action.stash._data) > 0 def test_call_lib_wo_permission(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client fname = ActionObject.from_obj("my_fake_file") - obj1_pointer = fname.send(root_domain_client) + obj1_pointer = fname.send(root_datasite_client) action = Action( path="numpy", op="fromfile", @@ -62,17 +100,17 @@ def test_call_lib_wo_permission(worker): ) kwargs = {"action": action} api_call = SyftAPICall( - node_uid=worker.id, path="action.execute", args=[], kwargs=kwargs + server_uid=worker.id, path="action.execute", args=[], kwargs=kwargs ) - res = root_domain_client.api.make_call(api_call) + res = root_datasite_client.api.make_call(api_call) assert isinstance(res, SyftError) def test_call_lib_custom_signature(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client # concatenate has a manually set signature assert all( - root_domain_client.api.lib.numpy.concatenate( + root_datasite_client.api.lib.numpy.concatenate( ([1, 2, 3], [4, 5, 6]) ).syft_action_data == np.array([1, 2, 3, 4, 5, 6]) @@ -113,7 +151,7 @@ def test_call_lib_custom_signature(worker): # return action_service_execute_method(context, action) # with mock.patch( -# "syft.core.node.new.action_object.ActionObjectPointer.execute_action", mock_func +# "syft.core.server.new.action_object.ActionObjectPointer.execute_action", mock_func # ): # result = pointer1 + pointer2 diff --git a/packages/syft/tests/syft/api_test.py b/packages/syft/tests/syft/api_test.py index 94338c990fb..ca2f61ac147 100644 --- a/packages/syft/tests/syft/api_test.py +++ b/packages/syft/tests/syft/api_test.py @@ -1,20 +1,17 @@ # stdlib from collections.abc import Callable -from textwrap import dedent # third party import numpy as np -import pytest # syft absolute import syft as sy -from syft.service.response import SyftAttributeError -from syft.service.user.user import UserUpdate +from syft.service.response import SyftError from syft.service.user.user_roles import ServiceRole def test_api_cache_invalidation(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client dataset = sy.Dataset( name="test", asset_list=[ @@ -26,8 +23,8 @@ def test_api_cache_invalidation(worker): ) ], ) - root_domain_client.upload_dataset(dataset) - asset = root_domain_client.datasets[0].assets[0] + root_datasite_client.upload_dataset(dataset) + asset = root_datasite_client.datasets[0].assets[0] @sy.syft_function( input_policy=sy.ExactMatch(x=asset), @@ -36,11 +33,9 @@ def test_api_cache_invalidation(worker): def my_func(x): return x + 1 - my_func.code = dedent(my_func.code) - - assert root_domain_client.code.request_code_execution(my_func) + assert root_datasite_client.code.request_code_execution(my_func) # check that function is added to api without refreshing the api manually - assert isinstance(root_domain_client.code.my_func, Callable) + assert isinstance(root_datasite_client.code.my_func, Callable) def test_api_cache_invalidation_login(root_verify_key, worker): @@ -50,10 +45,10 @@ def test_api_cache_invalidation_login(root_verify_key, worker): name="q", email="a@b.org", password="aaa", password_verify="aaa" ) guest_client = guest_client.login(email="a@b.org", password="aaa") - user_id = worker.document_store.partitions["User"].all(root_verify_key).value[-1].id + user_id = worker.root_client.users[-1].id def get_role(verify_key): - users = worker.get_service("UserService").stash.get_all(root_verify_key).ok() + users = worker.services.user.stash.get_all(root_verify_key).ok() user = [u for u in users if u.verify_key == verify_key][0] return user.role @@ -62,15 +57,12 @@ def get_role(verify_key): dataset = sy.Dataset( name="test2", ) - with pytest.raises(SyftAttributeError): - assert guest_client.upload_dataset(dataset) + assert isinstance(guest_client.upload_dataset(dataset), SyftError) - assert guest_client.api.services.user.update( - user_id, UserUpdate(user_id=user_id, name="abcdef") - ) + assert guest_client.api.services.user.update(uid=user_id, name="abcdef") assert worker.root_client.api.services.user.update( - user_id, UserUpdate(user_id=user_id, role=ServiceRole.DATA_OWNER) + uid=user_id, role=ServiceRole.DATA_OWNER ) assert get_role(guest_client.credentials.verify_key) == ServiceRole.DATA_OWNER diff --git a/packages/syft/tests/syft/assets_test.py b/packages/syft/tests/syft/assets_test.py new file mode 100644 index 00000000000..acc2297f1df --- /dev/null +++ b/packages/syft/tests/syft/assets_test.py @@ -0,0 +1,28 @@ +# third party +import pytest + +# syft absolute +from syft.util.assets import load_css +from syft.util.assets import load_png_base64 +from syft.util.assets import load_svg +from syft.util.notebook_ui.icons import Icon + + +def test_load_assets(): + png = load_png_base64("small-syft-symbol-logo.png") + assert isinstance(png, str) + + with pytest.raises(FileNotFoundError): + load_png_base64("non_existent.png") + + svg = load_svg("copy.svg") + assert isinstance(svg, str) + + css = load_css("style.css") + assert isinstance(css, str) + + +def test_icons(): + for icon in Icon: + assert isinstance(icon.svg, str) + assert isinstance(icon.js_escaped_svg, str) diff --git a/packages/syft/tests/syft/blob_storage/blob_storage_test.py b/packages/syft/tests/syft/blob_storage/blob_storage_test.py index 11942815529..47e33f7926d 100644 --- a/packages/syft/tests/syft/blob_storage/blob_storage_test.py +++ b/packages/syft/tests/syft/blob_storage/blob_storage_test.py @@ -1,18 +1,25 @@ # stdlib import io -import random # third party +import numpy as np import pytest # syft absolute import syft as sy +from syft import ActionObject +from syft import Dataset +from syft import Worker +from syft.client.datasite_client import DatasiteClient +from syft.service.blob_storage.util import can_upload_to_blob_storage +from syft.service.blob_storage.util import min_size_for_blob_storage_upload from syft.service.context import AuthedServiceContext from syft.service.response import SyftSuccess from syft.service.user.user import UserCreate from syft.store.blob_storage import BlobDeposit from syft.store.blob_storage import SyftObjectRetrieval from syft.types.blob_storage import CreateBlobStorageEntry +from syft.types.errors import SyftException raw_data = {"test": "test"} data = sy.serialize(raw_data, to_bytes=True) @@ -20,12 +27,12 @@ @pytest.fixture def authed_context(worker): - yield AuthedServiceContext(node=worker, credentials=worker.signing_key.verify_key) + yield AuthedServiceContext(server=worker, credentials=worker.signing_key.verify_key) @pytest.fixture(scope="function") def blob_storage(worker): - yield worker.get_service("BlobStorageService") + yield worker.services.blob_storage def test_blob_storage_allocate(authed_context, blob_storage): @@ -34,54 +41,45 @@ def test_blob_storage_allocate(authed_context, blob_storage): assert isinstance(blob_deposit, BlobDeposit) -def test_blob_storage_write(): - random.seed() - name = "".join(str(random.randint(0, 9)) for i in range(8)) - worker = sy.Worker.named(name=name) - blob_storage = worker.get_service("BlobStorageService") +def test_blob_storage_write(worker): + blob_storage = worker.services.blob_storage authed_context = AuthedServiceContext( - node=worker, credentials=worker.signing_key.verify_key + server=worker, credentials=worker.signing_key.verify_key ) blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) file_data = io.BytesIO(data) - written_data = blob_deposit.write(file_data) + written_data = blob_deposit.write(file_data).unwrap() assert isinstance(written_data, SyftSuccess) worker.cleanup() -def test_blob_storage_write_syft_object(): - random.seed() - name = "".join(str(random.randint(0, 9)) for i in range(8)) - worker = sy.Worker.named(name=name) - blob_storage = worker.get_service("BlobStorageService") +def test_blob_storage_write_syft_object(worker): + blob_storage = worker.services.blob_storage authed_context = AuthedServiceContext( - node=worker, credentials=worker.signing_key.verify_key + server=worker, credentials=worker.signing_key.verify_key ) blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) user = UserCreate(email="info@openmined.org", name="Jana Doe", password="password") file_data = io.BytesIO(sy.serialize(user, to_bytes=True)) - written_data = blob_deposit.write(file_data) + written_data = blob_deposit.write(file_data).unwrap() assert isinstance(written_data, SyftSuccess) worker.cleanup() -def test_blob_storage_read(): - random.seed() - name = "".join(str(random.randint(0, 9)) for i in range(8)) - worker = sy.Worker.named(name=name) - blob_storage = worker.get_service("BlobStorageService") +def test_blob_storage_read(worker): + blob_storage = worker.services.blob_storage authed_context = AuthedServiceContext( - node=worker, credentials=worker.signing_key.verify_key + server=worker, credentials=worker.signing_key.verify_key ) blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) file_data = io.BytesIO(data) - blob_deposit.write(file_data) + blob_deposit.write(file_data).unwrap() syft_retrieved_data = blob_storage.read( authed_context, blob_deposit.blob_storage_entry_id @@ -95,7 +93,61 @@ def test_blob_storage_read(): def test_blob_storage_delete(authed_context, blob_storage): blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) - blob_storage.delete(authed_context, blob_deposit.blob_storage_entry_id) - with pytest.raises(FileNotFoundError): + assert isinstance(blob_deposit, BlobDeposit) + + file_data = io.BytesIO(data) + written_data = blob_deposit.write(file_data).unwrap() + assert type(written_data) is SyftSuccess + + item = blob_storage.read(authed_context, blob_deposit.blob_storage_entry_id) + assert isinstance(item, SyftObjectRetrieval) + assert item.read() == raw_data + + del_type = blob_storage.delete(authed_context, blob_deposit.blob_storage_entry_id) + assert type(del_type) is SyftSuccess + + with pytest.raises(SyftException): blob_storage.read(authed_context, blob_deposit.blob_storage_entry_id) + + +def test_action_obj_send_save_to_blob_storage(worker): + # this small object should not be saved to blob storage + data_small: np.ndarray = np.array([1, 2, 3]) + action_obj = ActionObject.from_obj(data_small) + assert action_obj.dtype == data_small.dtype + root_client: DatasiteClient = worker.root_client + action_obj.send(root_client) + assert action_obj.syft_blob_storage_entry_id is None + + # big object that should be saved to blob storage (in mb) + assert min_size_for_blob_storage_upload(root_client.api.metadata) == 1 + num_elements = 20 * 1024 * 1024 + data_big = np.random.randint(0, 100, size=num_elements) # 4 bytes per int32 + action_obj_2 = ActionObject.from_obj(data_big) + assert can_upload_to_blob_storage(action_obj_2, root_client.api.metadata).unwrap() + action_obj_2.send(root_client) + assert isinstance(action_obj_2.syft_blob_storage_entry_id, sy.UID) + # get back the object from blob storage to check if it is the same + root_authed_ctx = AuthedServiceContext( + server=worker, credentials=root_client.verify_key + ) + blob_storage = worker.services.blob_storage + syft_retrieved_data = blob_storage.read( + root_authed_ctx, action_obj_2.syft_blob_storage_entry_id + ) + assert isinstance(syft_retrieved_data, SyftObjectRetrieval) + assert all(syft_retrieved_data.read() == data_big) + + +def test_upload_dataset_save_to_blob_storage( + worker: Worker, big_dataset: Dataset, small_dataset: Dataset +) -> None: + root_client: DatasiteClient = worker.root_client + # the small dataset should not be saved to the blob storage + root_client.upload_dataset(small_dataset) + assert len(root_client.api.services.blob_storage.get_all()) == 0 + + # the big dataset should be saved to the blob storage + root_client.upload_dataset(big_dataset) + assert len(root_client.api.services.blob_storage.get_all()) == 2 diff --git a/packages/syft/tests/syft/custom_worker/config_test.py b/packages/syft/tests/syft/custom_worker/config_test.py index 76a353e2d3b..3777be6e222 100644 --- a/packages/syft/tests/syft/custom_worker/config_test.py +++ b/packages/syft/tests/syft/custom_worker/config_test.py @@ -166,7 +166,7 @@ def test_load_custom_worker_config( DOCKER_METHODS = ["from_str", "from_path"] DOCKER_CONFIG_OPENDP = f""" - FROM openmined/grid-backend:{sy.__version__} + FROM openmined/syft-backend:{sy.__version__} RUN pip install opendp """ diff --git a/packages/syft/tests/syft/dataset/dataset_stash_test.py b/packages/syft/tests/syft/dataset/dataset_stash_test.py index 2ebeafb0c30..bfec6e00895 100644 --- a/packages/syft/tests/syft/dataset/dataset_stash_test.py +++ b/packages/syft/tests/syft/dataset/dataset_stash_test.py @@ -1,111 +1,47 @@ -# stdlib - # third party import pytest -from typeguard import TypeCheckError # syft absolute from syft.service.dataset.dataset import Dataset -from syft.service.dataset.dataset_stash import ActionIDsPartitionKey -from syft.service.dataset.dataset_stash import NamePartitionKey -from syft.store.document_store import QueryKey +from syft.service.dataset.dataset_stash import DatasetStash +from syft.store.document_store_errors import NotFoundException from syft.types.uid import UID -def test_dataset_namepartitionkey() -> None: - mock_obj = "dummy_name_key" - - assert NamePartitionKey.key == "name" - assert NamePartitionKey.type_ == str - - name_partition_key = NamePartitionKey.with_obj(obj=mock_obj) - - assert isinstance(name_partition_key, QueryKey) - assert name_partition_key.key == "name" - assert name_partition_key.type_ == str - assert name_partition_key.value == mock_obj - - with pytest.raises(AttributeError): - NamePartitionKey.with_obj(obj=[UID()]) - - -def test_dataset_actionidpartitionkey() -> None: - mock_obj = [UID() for _ in range(3)] - - assert ActionIDsPartitionKey.key == "action_ids" - assert ActionIDsPartitionKey.type_ == list[UID] - - action_ids_partition_key = ActionIDsPartitionKey.with_obj(obj=mock_obj) - - assert isinstance(action_ids_partition_key, QueryKey) - assert action_ids_partition_key.key == "action_ids" - assert action_ids_partition_key.type_ == list[UID] - assert action_ids_partition_key.value == mock_obj - - with pytest.raises(AttributeError): - ActionIDsPartitionKey.with_obj(obj="dummy_str") - - # Not sure what Exception should be raised here, Type or Attibute - with pytest.raises(TypeCheckError): - ActionIDsPartitionKey.with_obj(obj=["first_str", "second_str"]) - - -def test_dataset_get_by_name(root_verify_key, mock_dataset_stash, mock_dataset) -> None: +def test_dataset_get_by_name( + root_verify_key, mock_dataset_stash: DatasetStash, mock_dataset: Dataset +) -> None: # retrieving existing dataset result = mock_dataset_stash.get_by_name(root_verify_key, mock_dataset.name) - assert result.is_ok(), f"Dataset could not be retrieved, result: {result}" + assert result.is_ok() assert isinstance(result.ok(), Dataset) assert result.ok().id == mock_dataset.id # retrieving non-existing dataset result = mock_dataset_stash.get_by_name(root_verify_key, "non_existing_dataset") - assert result.is_ok(), f"Dataset could not be retrieved, result: {result}" + assert result.is_err(), "Item not found" assert result.ok() is None + assert type(result.err()) is NotFoundException -# @pytest.mark.skip(reason="DatasetUpdate is not implemeted yet") -# def test_dataset_update( -# root_verify_key, mock_dataset_stash, mock_dataset, mock_dataset_update -# ) -> None: -# # succesful dataset update -# result = mock_dataset_stash.update( -# root_verify_key, dataset_update=mock_dataset_update -# ) -# assert result.is_ok(), f"Dataset could not be retrieved, result: {result}" -# assert isinstance(result.ok(), Dataset) -# assert mock_dataset.id == result.ok().id - -# # error should be raised -# other_obj = object() -# result = mock_dataset_stash.update(root_verify_key, dataset_update=other_obj) -# assert result.err(), ( -# f"Dataset was updated with non-DatasetUpdate object," f"result: {result}" -# ) - - -def test_dataset_search_action_ids(root_verify_key, mock_dataset_stash, mock_dataset): +def test_dataset_search_action_ids( + root_verify_key, mock_dataset_stash: DatasetStash, mock_dataset +): action_id = mock_dataset.assets[0].action_id result = mock_dataset_stash.search_action_ids(root_verify_key, uid=action_id) assert result.is_ok(), f"Dataset could not be retrieved, result: {result}" - assert result.ok() != [], f"Dataset was not found by action_id {action_id}" - assert isinstance(result.ok()[0], Dataset) - assert result.ok()[0].id == mock_dataset.id - - # retrieving dataset by list of action_ids - result = mock_dataset_stash.search_action_ids(root_verify_key, uid=[action_id]) - assert result.is_ok(), f"Dataset could not be retrieved, result: {result}" + assert result.ok() != [] assert isinstance(result.ok()[0], Dataset) assert result.ok()[0].id == mock_dataset.id # retrieving dataset by non-existing action_id other_action_id = UID() result = mock_dataset_stash.search_action_ids(root_verify_key, uid=other_action_id) - assert result.is_ok(), f"Dataset could not be retrieved, result: {result}" + assert result.is_ok() assert result.ok() == [] - # inconsitent behaviour, line 62 return None, this returns [] # passing random object random_obj = object() - with pytest.raises(AttributeError): + with pytest.raises(ValueError): result = mock_dataset_stash.search_action_ids(root_verify_key, uid=random_obj) diff --git a/packages/syft/tests/syft/dataset/fixtures.py b/packages/syft/tests/syft/dataset/fixtures.py index 7d92e1104bd..bcb26bff262 100644 --- a/packages/syft/tests/syft/dataset/fixtures.py +++ b/packages/syft/tests/syft/dataset/fixtures.py @@ -30,7 +30,7 @@ def mock_dataset_stash(document_store) -> DatasetStash: @pytest.fixture -def mock_asset(worker, root_domain_client) -> Asset: +def mock_asset(worker, root_datasite_client) -> Asset: # sometimes the access rights for client are overwritten # so we need to assing the root_client manually uploader = Contributor( @@ -44,21 +44,25 @@ def mock_asset(worker, root_domain_client) -> Asset: data=np.array([0, 1, 2, 3, 4]), mock=np.array([0, 1, 1, 1, 1]), mock_is_real=False, - node_uid=worker.id, + server_uid=worker.id, uploader=uploader, contributors=[uploader], + syft_server_location=worker.id, + syft_client_verify_key=root_datasite_client.credentials.verify_key, ) - node_transform_context = TransformContext( - node=worker, - credentials=root_domain_client.credentials.verify_key, + server_transform_context = TransformContext( + server=worker, + credentials=root_datasite_client.credentials.verify_key, obj=create_asset, ) - mock_asset = create_asset.to(Asset, context=node_transform_context) + mock_asset = create_asset.to(Asset, context=server_transform_context) yield mock_asset @pytest.fixture -def mock_dataset(root_verify_key, mock_dataset_stash, mock_asset) -> Dataset: +def mock_dataset( + root_verify_key, mock_dataset_stash: DatasetStash, mock_asset +) -> Dataset: uploader = Contributor( role=str(Roles.UPLOADER), name="test", @@ -68,7 +72,7 @@ def mock_dataset(root_verify_key, mock_dataset_stash, mock_asset) -> Dataset: id=UID(), name="test_dataset", uploader=uploader, contributors=[uploader] ) mock_dataset.asset_list.append(mock_asset) - result = mock_dataset_stash.partition.set(root_verify_key, mock_dataset) + result = mock_dataset_stash.set(root_verify_key, mock_dataset) mock_dataset = result.ok() yield mock_dataset diff --git a/packages/syft/tests/syft/eager_test.py b/packages/syft/tests/syft/eager_test.py index fcfb10d3bdb..243a18130a2 100644 --- a/packages/syft/tests/syft/eager_test.py +++ b/packages/syft/tests/syft/eager_test.py @@ -1,40 +1,56 @@ # third party import numpy as np +import pytest # syft absolute -from syft.service.action.action_object import ActionObject from syft.service.action.plan import planify +from syft.types.errors import SyftException from syft.types.twin_object import TwinObject # relative from ..utils.custom_markers import currently_fail_on_python_3_12 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_eager_permissions(worker, guest_client): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + + guest_client = worker.guest_client + input_obj = TwinObject( private_obj=np.array([[3, 3, 3], [3, 3, 3]]), mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - input_ptr = root_domain_client.api.services.action.set(input_obj) + input_ptr = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_ptr.id) - input_ptr = root_domain_client.api.services.action.set(input_obj) + input_ptr = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_ptr.id) flat_ptr = pointer.flatten() - res_guest = guest_client.api.services.action.get(flat_ptr.id) - assert not isinstance(res_guest, ActionObject) - res_root = root_domain_client.api.services.action.get(flat_ptr.id) + with pytest.raises(SyftException) as exc: + guest_client.api.services.action.get(flat_ptr.id) + + # TODO: Improve this error msg + assert exc.type == SyftException + assert "denied" in str(exc.value) + + res_root = root_datasite_client.api.services.action.get(flat_ptr.id) assert all(res_root == [3, 3, 3, 3, 3, 3]) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_plan(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client @planify @@ -49,18 +65,18 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - input_obj = root_domain_client.api.services.action.set(input_obj) - pointer = guest_client.api.services.action.get_pointer(input_obj.id) + input_ptr = input_obj.send(root_datasite_client) + + pointer = guest_client.api.services.action.get_pointer(input_ptr.id) res_ptr = plan_ptr(x=pointer) # guest cannot access result - assert not isinstance( - guest_client.api.services.action.get(res_ptr.id), ActionObject - ) + with pytest.raises(SyftException): + guest_client.api.services.action.get(res_ptr.id) # root can access result assert ( - root_domain_client.api.services.action.get(res_ptr.id) + root_datasite_client.api.services.action.get(res_ptr.id) == np.array([[3, 3, 3], [3, 3, 3]]).flatten().prod() ) @@ -68,14 +84,20 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 res_ptr.request(guest_client) # root approves result - root_domain_client.api.services.request[-1].approve_with_client(root_domain_client) + root_datasite_client.api.services.request[-1].approve_with_client( + root_datasite_client + ) assert res_ptr.get_from(guest_client) == 729 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") @currently_fail_on_python_3_12(raises=AttributeError) def test_plan_with_function_call(worker, guest_client): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client @planify @@ -90,39 +112,49 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - input_obj = root_domain_client.api.services.action.set(input_obj) + input_obj = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_obj.id) res_ptr = plan_ptr(x=pointer) - assert root_domain_client.api.services.action.get(res_ptr.id) == 18 + assert root_datasite_client.api.services.action.get(res_ptr.id) == 18 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_plan_with_object_instantiation(worker, guest_client): + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + + guest_client = worker.guest_client + @planify def my_plan(x=np.array([1, 2, 3, 4, 5, 6])): # noqa: B008 return x + 1 - root_domain_client = worker.root_client - plan_ptr = my_plan.send(guest_client) input_obj = TwinObject( private_obj=np.array([1, 2, 3, 4, 5, 6]), mock_obj=np.array([1, 1, 1, 1, 1, 1]) ) - _id = root_domain_client.api.services.action.set(input_obj).id + _id = input_obj.send(root_datasite_client).id pointer = guest_client.api.services.action.get_pointer(_id) res_ptr = plan_ptr(x=pointer) assert all( - root_domain_client.api.services.action.get(res_ptr.id).syft_action_data + root_datasite_client.api.services.action.get(res_ptr.id).syft_action_data == np.array([2, 3, 4, 5, 6, 7]) ) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_setattribute(worker, guest_client): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + + guest_client = worker.guest_client private_data, mock_data = ( np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), @@ -133,7 +165,7 @@ def test_setattribute(worker, guest_client): assert private_data.dtype != np.int32 - obj_pointer = root_domain_client.api.services.action.set(obj) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) original_id = obj_pointer.id @@ -142,10 +174,10 @@ def test_setattribute(worker, guest_client): obj_pointer.dtype = np.int32 # local object is updated - assert obj_pointer.id.id in worker.action_store.data + assert obj_pointer.id.id in worker.action_store._data assert obj_pointer.id != original_id - res = root_domain_client.api.services.action.get(obj_pointer.id) + res = root_datasite_client.api.services.action.get(obj_pointer.id) # check if updated assert res.dtype == np.int32 @@ -158,59 +190,69 @@ def test_setattribute(worker, guest_client): assert not (obj_pointer.syft_action_data == private_data).all() +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_getattribute(worker, guest_client): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client + obj = TwinObject( private_obj=np.array([[1, 2, 3], [4, 5, 6]]), mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - obj_pointer = root_domain_client.api.services.action.set(obj) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) size_pointer = obj_pointer.size # check result - assert size_pointer.id.id in worker.action_store.data - assert root_domain_client.api.services.action.get(size_pointer.id) == 6 + assert size_pointer.id.id in worker.action_store._data + assert root_datasite_client.api.services.action.get(size_pointer.id) == 6 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_eager_method(worker, guest_client): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client obj = TwinObject( private_obj=np.array([[1, 2, 3], [4, 5, 6]]), mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - obj_pointer = root_domain_client.api.services.action.set(obj) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) flat_pointer = obj_pointer.flatten() - assert flat_pointer.id.id in worker.action_store.data + assert flat_pointer.id.id in worker.action_store._data # check result assert all( - root_domain_client.api.services.action.get(flat_pointer.id) + root_datasite_client.api.services.action.get(flat_pointer.id) == np.array([1, 2, 3, 4, 5, 6]) ) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_eager_dunder_method(worker, guest_client): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client obj = TwinObject( private_obj=np.array([[1, 2, 3], [4, 5, 6]]), mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - obj_pointer = root_domain_client.api.services.action.set(obj) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) first_row_pointer = obj_pointer[0] - assert first_row_pointer.id.id in worker.action_store.data + assert first_row_pointer.id.id in worker.action_store._data # check result assert all( - root_domain_client.api.services.action.get(first_row_pointer.id) + root_datasite_client.api.services.action.get(first_row_pointer.id) == np.array([1, 2, 3]) ) diff --git a/packages/syft/tests/syft/grid_url_test.py b/packages/syft/tests/syft/grid_url_test.py deleted file mode 100644 index ef93d15e783..00000000000 --- a/packages/syft/tests/syft/grid_url_test.py +++ /dev/null @@ -1,27 +0,0 @@ -# third party -import pytest - -# syft absolute -from syft.types.grid_url import GridURL - -test_suite = [ - ("http://0.0.0.0", 8081, "http://0.0.0.0:8081"), - ("http://0.0.0.0", None, "http://0.0.0.0:80"), - (None, None, "http://localhost:80"), - ("http://0.0.0.0:8081", 8082, "http://0.0.0.0:8081"), - ("0.0.0.0:8081", None, "http://0.0.0.0:8081"), - ("domainname.com", None, "http://domainname.com:80"), - ("https://domainname.com", None, "https://domainname.com:80"), -] - - -@pytest.mark.parametrize("url, port, ground_truth", test_suite) -def test_grid_url(url, port, ground_truth) -> None: - if not url and not port: - assert GridURL().base_url == ground_truth - elif not url: - assert GridURL(port=port).base_url == ground_truth - elif not port: - assert GridURL(host_or_ip=url).base_url == ground_truth - else: - assert GridURL(host_or_ip=url, port=port).base_url == ground_truth diff --git a/packages/syft/tests/syft/hash_test.py b/packages/syft/tests/syft/hash_test.py index df97de4a19e..bfdf99219fa 100644 --- a/packages/syft/tests/syft/hash_test.py +++ b/packages/syft/tests/syft/hash_test.py @@ -3,12 +3,16 @@ # syft absolute from syft.serde.serializable import serializable -from syft.types.syft_object import SYFT_OBJECT_VERSION_2 +from syft.types.syft_object import SYFT_OBJECT_VERSION_1 from syft.types.syft_object import SyftBaseObject from syft.types.syft_object import SyftHashableObject -@serializable(attrs=["key", "value", "flag"]) +@serializable( + attrs=["key", "value", "flag"], + canonical_name="MockObject", + version=1, +) class MockObject(SyftHashableObject): key: str value: str @@ -26,7 +30,7 @@ def __init__(self, key, value, flag=None): @serializable(attrs=["id", "data"]) class MockWrapper(SyftBaseObject, SyftHashableObject): __canonical_name__ = "MockWrapper" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: str data: MockObject | None diff --git a/packages/syft/tests/syft/locks_test.py b/packages/syft/tests/syft/locks_test.py deleted file mode 100644 index 429e983ead9..00000000000 --- a/packages/syft/tests/syft/locks_test.py +++ /dev/null @@ -1,381 +0,0 @@ -# stdlib -from pathlib import Path -from secrets import token_hex -import tempfile -from threading import Thread -import time - -# third party -import pytest - -# syft absolute -from syft.store.locks import FileLockingConfig -from syft.store.locks import LockingConfig -from syft.store.locks import NoLockingConfig -from syft.store.locks import SyftLock -from syft.store.locks import ThreadingLockingConfig - -def_params = { - "lock_name": "testing_lock", - "expire": 5, # seconds, - "timeout": 1, # seconds, - "retry_interval": 0.1, # seconds, -} - - -@pytest.fixture(scope="function") -def locks_nop_config(request): - def_params["lock_name"] = token_hex(8) - yield NoLockingConfig(**def_params) - - -@pytest.fixture(scope="function") -def locks_threading_config(request): - def_params["lock_name"] = token_hex(8) - yield ThreadingLockingConfig(**def_params) - - -@pytest.fixture(scope="function") -def locks_file_config(): - def_params["lock_name"] = token_hex(8) - yield FileLockingConfig(**def_params) - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_nop_config"), - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -def test_sanity(config: LockingConfig): - lock = SyftLock(config) - - assert lock is not None - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_nop_config"), - ], -) -def test_acquire_nop(config: LockingConfig): - lock = SyftLock(config) - - assert lock.locked() is False - - acq_ok = lock.acquire() - assert acq_ok - - assert lock.locked() is False - - lock.release() - - assert lock.locked() is False - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_release(config: LockingConfig): - lock = SyftLock(config) - - expected_not_locked = lock.locked() - - acq_ok = lock.acquire() - assert acq_ok - - expected_locked = lock.locked() - - lock.release() - - expected_not_locked_again = lock.locked() - - assert not expected_not_locked - assert expected_locked - assert not expected_not_locked_again - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_release_with(config: LockingConfig): - was_locked = True - with SyftLock(config) as lock: - was_locked = lock.locked() - - assert was_locked - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -def test_acquire_expire(config: LockingConfig): - config.expire = 1 # second - lock = SyftLock(config) - - expected_not_locked = lock.locked() - - acq_ok = lock.acquire(blocking=True) - assert acq_ok - - expected_locked = lock.locked() - - time.sleep(config.expire + 1.0) - - expected_not_locked_again = lock.locked() - - assert not expected_not_locked - assert expected_locked - assert not expected_not_locked_again - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_double_aqcuire_timeout_fail(config: LockingConfig): - config.timeout = 1 - config.expire = 5 - lock = SyftLock(config) - - acq_ok = lock.acquire(blocking=True) - assert acq_ok - - not_acq = lock.acquire(blocking=True) - - lock.release() - - assert not not_acq - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_double_aqcuire_timeout_ok(config: LockingConfig): - config.timeout = 2 - config.expire = 1 - lock = SyftLock(config) - - lock.locked() - - acq_ok = lock.acquire(blocking=True) - assert acq_ok - - also_acq = lock.acquire(blocking=True) - - lock.release() - - assert also_acq - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_double_aqcuire_nonblocking(config: LockingConfig): - config.timeout = 2 - config.expire = 1 - lock = SyftLock(config) - - lock.locked() - - acq_ok = lock.acquire(blocking=False) - assert acq_ok - - not_acq = lock.acquire(blocking=False) - - lock.release() - - assert not not_acq - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_double_aqcuire_retry_interval(config: LockingConfig): - config.timeout = 2 - config.expire = 1 - config.retry_interval = 3 - lock = SyftLock(config) - - lock.locked() - - acq_ok = lock.acquire(blocking=True) - assert acq_ok - - not_acq = lock.acquire(blocking=True) - - lock.release() - - assert not not_acq - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_double_release(config: LockingConfig): - lock = SyftLock(config) - - lock.acquire(blocking=True) - - lock.release() - lock.release() - - -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_acquire_same_name_diff_namespace(config: LockingConfig): - config.namespace = "ns1" - lock1 = SyftLock(config) - assert lock1.acquire(blocking=True) - - config.namespace = "ns2" - lock2 = SyftLock(config) - assert lock2.acquire(blocking=True) - - lock2.release() - lock1.release() - - -@pytest.mark.skip(reason="The tests are highly flaky, delaying progress on PR's") -@pytest.mark.parametrize( - "config", - [ - pytest.lazy_fixture("locks_threading_config"), - pytest.lazy_fixture("locks_file_config"), - ], -) -def test_locks_parallel_multithreading(config: LockingConfig) -> None: - thread_cnt = 3 - repeats = 5 - - temp_dir = Path(tempfile.TemporaryDirectory().name) - temp_dir.mkdir(parents=True, exist_ok=True) - temp_file = temp_dir / "dbg.txt" - if temp_file.exists(): - temp_file.unlink() - - with open(temp_file, "w") as f: - f.write("0") - - config.timeout = 10 - lock = SyftLock(config) - - def _kv_cbk(tid: int) -> None: - for _idx in range(repeats): - locked = lock.acquire() - if not locked: - continue - - for _retry in range(10): - try: - with open(temp_file) as f: - prev = f.read() - prev = int(prev) - with open(temp_file, "w") as f: - f.write(str(prev + 1)) - f.flush() - break - except BaseException as e: - print("failed ", e) - - lock.release() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - with open(temp_file) as f: - stored = int(f.read()) - - assert stored == thread_cnt * repeats - - -# @pytest.mark.skip(reason="Joblib is flaky") -# @pytest.mark.parametrize( -# "config", -# [ -# pytest.lazy_fixture("locks_file_config"), -# ], -# ) -# def test_parallel_joblib( -# config: LockingConfig, -# ) -> None: -# thread_cnt = 3 -# repeats = 5 - -# temp_dir = Path(tempfile.TemporaryDirectory().name) -# temp_dir.mkdir(parents=True, exist_ok=True) -# temp_file = temp_dir / "dbg.txt" -# if temp_file.exists(): -# temp_file.unlink() - -# with open(temp_file, "w") as f: -# f.write("0") - -# def _kv_cbk(tid: int) -> None: -# for _idx in range(repeats): -# with SyftLock(config): -# with open(temp_file) as f: -# prev = int(f.read()) -# with open(temp_file, "w") as f: -# f.write(str(prev + 1)) - -# Parallel(n_jobs=thread_cnt)(delayed(_kv_cbk)(idx) for idx in range(thread_cnt)) - -# with open(temp_file) as f: -# stored = int(f.read()) - -# assert stored == thread_cnt * repeats diff --git a/packages/syft/tests/syft/migrations/data_migration_test.py b/packages/syft/tests/syft/migrations/data_migration_test.py new file mode 100644 index 00000000000..a5203e2a0f8 --- /dev/null +++ b/packages/syft/tests/syft/migrations/data_migration_test.py @@ -0,0 +1,159 @@ +# stdlib +from contextlib import contextmanager +import secrets + +# third party +import faker +import numpy as np +import pytest +import yaml + +# syft absolute +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.service.migration.object_migration_state import MigrationData +from syft.service.response import SyftSuccess +from syft.service.user.user import User +from syft.types.errors import SyftException + + +def register_ds(client): + f = faker.Faker() + + email = f.email() + password = secrets.token_urlsafe(16) + client.register( + name=f.name(), + email=email, + password=password, + password_verify=password, + ) + return client.login(email=email, password=password) + + +def create_dataset(client): + mock = np.random.random(5) + private = np.random.random(5) + + dataset = sy.Dataset( + name=sy.util.util.random_name().lower(), + description="Lorem ipsum dolor sit amet, consectetur adipiscing elit", + asset_list=[ + sy.Asset( + name="numpy-data", + mock=mock, + data=private, + shape=private.shape, + mock_is_real=True, + ) + ], + ) + + client.upload_dataset(dataset) + return dataset + + +def make_request(client: DatasiteClient) -> DatasiteClient: + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + _ = client.code.request_code_execution(compute) + + +def prepare_data(client: DatasiteClient) -> None: + # Create DS, upload dataset, create + approve + execute single request + ds_client = register_ds(client) + create_dataset(client) + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + ds_client.code.request_code_execution(compute) + + client.requests[0].approve() + + result = ds_client.code.compute() + assert result.get() == 42 + + +def test_get_migration_data(worker, tmp_path): + # Tests a full data dump for migration + # TODO current prepare_data is a small scenario, add more data + edge-cases + client = worker.root_client + prepare_data(client) + + migration_data = client.get_migration_data() + assert isinstance(migration_data, MigrationData) + + # Admin + data scientist + assert len(migration_data.store_objects[User]) == 2 + + # Check if all blobs are there + blob_ids = {blob.id for blob in migration_data.blob_storage_objects} + assert blob_ids == set(migration_data.blobs.keys()) + + # Save + load + blob_path = tmp_path / "migration.blob" + yaml_path = tmp_path / "migration.yaml" + migration_data.save(blob_path, yaml_path) + + loaded_migration_data = MigrationData.from_file(blob_path) + + with open(yaml_path) as f: + loaded_migration_yaml = yaml.safe_load(f) + + assert isinstance(loaded_migration_data, MigrationData) + assert loaded_migration_data.num_objects == migration_data.num_objects + assert loaded_migration_data.num_action_objects == migration_data.num_action_objects + assert loaded_migration_data.blobs.keys() == migration_data.blobs.keys() + + assert loaded_migration_yaml == migration_data.make_migration_config() + + +@contextmanager +def named_worker_context(name): + # required to launch worker with same name twice within the same test + ensure cleanup + worker = sy.Worker.named(name=name, db_url="sqlite://") + try: + yield worker + finally: + worker.cleanup() + + +def test_data_migration_same_version(tmp_path): + server_name = secrets.token_hex(8) + blob_path = tmp_path / "migration.blob" + yaml_path = tmp_path / "migration.yaml" + + # Setup + save migration data + with named_worker_context(server_name) as first_worker: + prepare_data(first_worker.root_client) + first_migration_data = first_worker.root_client.get_migration_data() + first_migration_data.save(blob_path, yaml_path) + + # Load migration data on wrong worker + with named_worker_context(secrets.token_hex(8)) as wrong_worker: + with pytest.raises(SyftException): + result = wrong_worker.root_client.load_migration_data(blob_path) + + # Load migration data on correct worker + # NOTE worker is correct because admin keys and server id are derived from server name, + # so they match the first worker + with named_worker_context(server_name) as migration_worker: + client = migration_worker.root_client + + # DB is new, no DS registered yet + assert len(client.users.get_all()) == 1 + + assert migration_worker.id == first_migration_data.server_uid + assert migration_worker.verify_key == first_migration_data.root_verify_key + + result = migration_worker.root_client.load_migration_data(blob_path) + assert isinstance(result, SyftSuccess) + + assert len(client.users.get_all()) == 2 + assert len(client.requests.get_all()) == 1 + assert len(client.datasets.get_all()) == 1 + assert len(client.code.get_all()) == 1 diff --git a/packages/syft/tests/syft/migrations/protocol_communication_test.py b/packages/syft/tests/syft/migrations/protocol_communication_test.py deleted file mode 100644 index b2b7f5a15e9..00000000000 --- a/packages/syft/tests/syft/migrations/protocol_communication_test.py +++ /dev/null @@ -1,264 +0,0 @@ -# stdlib -from copy import deepcopy -from pathlib import Path -from unittest import mock - -# third party -import pytest - -# syft absolute -import syft as sy -from syft.node.worker import Worker -from syft.protocol.data_protocol import get_data_protocol -from syft.protocol.data_protocol import protocol_release_dir -from syft.protocol.data_protocol import stage_protocol_changes -from syft.serde.recursive import TYPE_BANK -from syft.serde.serializable import serializable -from syft.service.context import AuthedServiceContext -from syft.service.response import SyftError -from syft.service.service import AbstractService -from syft.service.service import ServiceConfigRegistry -from syft.service.service import service_method -from syft.service.user.user_roles import GUEST_ROLE_LEVEL -from syft.store.document_store import BaseStash -from syft.store.document_store import DocumentStore -from syft.store.document_store import PartitionSettings -from syft.types.syft_migration import migrate -from syft.types.syft_object import SYFT_OBJECT_VERSION_2 -from syft.types.syft_object import SyftBaseObject -from syft.types.syft_object import SyftObject -from syft.types.transforms import convert_types -from syft.types.transforms import rename -from syft.types.uid import UID -from syft.util.util import index_syft_by_module_name - -MOCK_TYPE_BANK = deepcopy(TYPE_BANK) - - -def get_klass_version_1(): - @serializable() - class SyftMockObjectTestV1(SyftObject): - __canonical_name__ = "SyftMockObjectTest" - __version__ = SYFT_OBJECT_VERSION_2 - - id: UID - name: str - version: int - - return SyftMockObjectTestV1 - - -def get_klass_version_2(): - @serializable() - class SyftMockObjectTestV2(SyftObject): - __canonical_name__ = "SyftMockObjectTest" - __version__ = SYFT_OBJECT_VERSION_2 - - id: UID - full_name: str - version: str - - return SyftMockObjectTestV2 - - -def setup_migration_transforms(mock_klass_v1, mock_klass_v2): - @migrate(mock_klass_v1, mock_klass_v2) - def mock_v1_to_v2(): - return [rename("name", "full_name"), convert_types(["version"], str)] - - @migrate(mock_klass_v2, mock_klass_v1) - def mock_v2_to_v1(): - return [rename("full_name", "name"), convert_types(["version"], int)] - - return mock_v1_to_v2, mock_v2_to_v1 - - -def get_stash_klass(syft_object: type[SyftBaseObject]): - @serializable() - class SyftMockObjectStash(BaseStash): - object_type = syft_object - settings: PartitionSettings = PartitionSettings( - name=object_type.__canonical_name__, - object_type=syft_object, - ) - - def __init__(self, store: DocumentStore) -> None: - super().__init__(store=store) - - return SyftMockObjectStash - - -def setup_service_method(syft_object): - stash_klass: BaseStash = get_stash_klass(syft_object=syft_object) - - @serializable() - class SyftMockObjectService(AbstractService): - store: DocumentStore - stash: stash_klass - __module__: str = "syft.test" - - def __init__(self, store: DocumentStore) -> None: - self.store = store - self.stash = stash_klass(store=store) - - @service_method( - path="dummy.syft_object", - name="get", - roles=GUEST_ROLE_LEVEL, - ) - def get(self, context: AuthedServiceContext) -> list[syft_object] | SyftError: - result = self.stash.get_all(context.credentials, has_permission=True) - if result.is_ok(): - return result.ok() - return SyftError(message=f"{result.err()}") - - return SyftMockObjectService - - -def setup_version_one(node_name: str): - syft_klass_version_one = get_klass_version_1() - sy.stage_protocol_changes() - sy.bump_protocol_version() - - syft_service_klass = setup_service_method( - syft_object=syft_klass_version_one, - ) - - node = sy.orchestra.launch(node_name, dev_mode=True, reset=True) - - worker: Worker = node.python_node - - worker.services.append(syft_service_klass) - worker.service_path_map[syft_service_klass.__name__.lower()] = syft_service_klass( - store=worker.document_store - ) - - return node, syft_klass_version_one - - -def mock_syft_version(): - return f"{sy.__version__}.dev" - - -def setup_version_second(node_name: str, klass_version_one: type): - syft_klass_version_second = get_klass_version_2() - setup_migration_transforms(klass_version_one, syft_klass_version_second) - - sy.stage_protocol_changes() - sy.bump_protocol_version() - - syft_service_klass = setup_service_method(syft_object=syft_klass_version_second) - - node = sy.orchestra.launch(node_name, dev_mode=True) - - worker: Worker = node.python_node - - worker.services.append(syft_service_klass) - worker.service_path_map[syft_service_klass.__name__.lower()] = syft_service_klass( - store=worker.document_store - ) - - return node, syft_klass_version_second - - -@pytest.fixture -def my_stage_protocol(protocol_file: Path): - with mock.patch( - "syft.protocol.data_protocol.PROTOCOL_STATE_FILENAME", - protocol_file.name, - ): - dp = get_data_protocol() - stage_protocol_changes() - yield dp.protocol_history - dp.revert_latest_protocol() - dp.save_history(dp.protocol_history) - - # Cleanup release dir, remove unused released files - for _file_path in protocol_release_dir().iterdir(): - for version in dp.read_json(_file_path): - if version not in dp.protocol_history.keys(): - _file_path.unlink() - - -@pytest.mark.skip( - reason="Issues running with other tests. Shared release folder causes issues." -) -def test_client_server_running_different_protocols(my_stage_protocol): - def patched_index_syft_by_module_name(fully_qualified_name: str): - if klass_v1.__name__ in fully_qualified_name: - return klass_v1 - elif klass_v2.__name__ in fully_qualified_name: - return klass_v2 - - return index_syft_by_module_name(fully_qualified_name) - - node_name = UID().to_string() - with mock.patch("syft.serde.recursive.TYPE_BANK", MOCK_TYPE_BANK): - with mock.patch( - "syft.protocol.data_protocol.TYPE_BANK", - MOCK_TYPE_BANK, - ): - with mock.patch( - "syft.client.api.index_syft_by_module_name", - patched_index_syft_by_module_name, - ): - # Setup mock object version one - nh1, klass_v1 = setup_version_one(node_name) - assert klass_v1.__canonical_name__ == "SyftMockObjectTest" - assert klass_v1.__name__ == "SyftMockObjectTestV1" - - nh1_client = nh1.client - assert nh1_client is not None - result_from_client_1 = nh1_client.api.services.dummy.get() - - protocol_version_with_mock_obj_v1 = get_data_protocol().latest_version - - # No data saved - assert len(result_from_client_1) == 0 - - # Setup mock object version second - with mock.patch( - "syft.protocol.data_protocol.__version__", mock_syft_version() - ): - nh2, klass_v2 = setup_version_second( - node_name, klass_version_one=klass_v1 - ) - - # Create a sample data in version second - sample_data = klass_v2(full_name="John", version=str(1), id=UID()) - - assert isinstance(sample_data, klass_v2) - - # Validate migrations - sample_data_v1 = sample_data.migrate_to( - version=klass_v1.__version__, - ) - assert sample_data_v1.name == sample_data.full_name - assert sample_data_v1.version == int(sample_data.version) - - # Set the sample data in version second - service_klass = nh1.python_node.get_service("SyftMockObjectService") - service_klass.stash.set( - nh1.python_node.root_client.verify_key, - sample_data, - ) - - nh2_client = nh2.client - assert nh2_client is not None - # Force communication protocol to when version object is defined - nh2_client.communication_protocol = ( - protocol_version_with_mock_obj_v1 - ) - # Reset api - nh2_client._api = None - - # Call the API with an older communication protocol version - result2 = nh2_client.api.services.dummy.get() - assert isinstance(result2, list) - - # Validate the data received - for data in result2: - assert isinstance(data, klass_v1) - assert data.name == sample_data.full_name - assert data.version == int(sample_data.version) - ServiceConfigRegistry.__service_config_registry__.pop("dummy.syft_object", None) diff --git a/packages/syft/tests/syft/network_test.py b/packages/syft/tests/syft/network_test.py new file mode 100644 index 00000000000..3bb4b5e84e3 --- /dev/null +++ b/packages/syft/tests/syft/network_test.py @@ -0,0 +1,31 @@ +# syft absolute +from syft.abstract_server import ServerType +from syft.server.credentials import SyftSigningKey +from syft.service.network.network_service import NetworkStash +from syft.service.network.server_peer import ServerPeer +from syft.service.network.server_peer import ServerPeerUpdate +from syft.types.uid import UID + + +def test_add_route() -> None: + uid = UID() + peer = ServerPeer( + id=uid, + name="test", + verify_key=SyftSigningKey.generate().verify_key, + server_type=ServerType.DATASITE, + admin_email="info@openmined.org", + ) + network_stash = NetworkStash.random() + + network_stash.set( + credentials=network_stash.db.root_verify_key, + obj=peer, + ).unwrap() + peer_update = ServerPeerUpdate(id=uid, name="new name") + peer = network_stash.update( + credentials=network_stash.db.root_verify_key, + obj=peer_update, + ).unwrap() + + assert peer.name == "new name" diff --git a/packages/syft/tests/syft/notebook_ui_test.py b/packages/syft/tests/syft/notebook_ui_test.py new file mode 100644 index 00000000000..7129e7beb74 --- /dev/null +++ b/packages/syft/tests/syft/notebook_ui_test.py @@ -0,0 +1,101 @@ +# stdlib +from typing import Any + +# third party +import numpy as np +import pytest +import torch + +# syft absolute +from syft import UID +from syft.service.action.action_object import ActionObject +from syft.service.user.user import User +from syft.util.table import TABLE_INDEX_KEY +from syft.util.table import prepare_table_data + + +def table_displayed(obj_to_check: Any) -> bool: + return "Tabulator" in obj_to_check._repr_html_() + + +def no_html_repr_displayed(obj_to_check: Any) -> bool: + return obj_to_check._repr_html_() is None + + +def obj_repr_displayed(obj_to_check: Any) -> bool: + return obj_to_check._repr_html_() == obj_to_check.__repr__() + + +def table_test_cases() -> list[tuple[list, str | None]]: + ao_1 = ActionObject.from_obj(10.0) + ao_2 = ActionObject.from_obj(20.0) + np_ao = ActionObject.from_obj(np.array([10, 20])) + torch_ao = ActionObject.from_obj(torch.tensor([10, 20])) + user_1 = User(email="x@y.z") + user_2 = User(email="a@b.c") + + # Makes table + homogenous_ao = ([ao_1, ao_2], table_displayed) + non_homogenous_same_repr = ([ao_1, ao_2, np_ao], table_displayed) + homogenous_user = ([user_1, user_2], table_displayed) + empty_list = ([], obj_repr_displayed) + non_syft_objs = ([1, 2.0, 3, 4], no_html_repr_displayed) + + # Doesn't make table + non_homogenous_different_repr = ( + [ao_1, ao_2, user_1, user_2], + no_html_repr_displayed, + ) + non_syft_obj_1 = ([1, ao_1, ao_2], no_html_repr_displayed) + non_syft_obj_2 = ([ao_1, ao_2, 1], no_html_repr_displayed) + torch_type_obj = ( + [type(torch_ao.syft_action_data), 1.0, UID()], + no_html_repr_displayed, + ) + return [ + homogenous_ao, + non_homogenous_same_repr, + homogenous_user, + empty_list, + non_syft_objs, + non_homogenous_different_repr, + non_syft_obj_1, + non_syft_obj_2, + torch_type_obj, + ] + + +@pytest.mark.parametrize("test_case", table_test_cases()) +def test_list_dict_repr_html(test_case): + obj, validation_func = test_case + + assert validation_func(obj) + assert validation_func(dict(enumerate(obj))) + assert validation_func(set(obj)) + assert validation_func(tuple(obj)) + + +def test_sort_table_rows(): + emails = [ + "x@y.z", + "a@b.c", + "c@d.e", + ] + sorted_order = [1, 2, 0] + users = [User(email=email) for email in emails] + + table_data, _ = prepare_table_data(users) + + # No sorting + table_emails = [row["email"] for row in table_data] + table_indices = [row[TABLE_INDEX_KEY] for row in table_data] + assert table_emails == emails + assert table_indices == list(range(len(emails))) + + # Sort by email + User.__table_sort_attr__ = "email" + table_data_sorted, _ = prepare_table_data(users) + table_emails_sorted = [row["email"] for row in table_data_sorted] + table_indices_sorted = [row[TABLE_INDEX_KEY] for row in table_data_sorted] + assert table_emails_sorted == sorted(emails) + assert table_indices_sorted == sorted_order diff --git a/packages/syft/tests/syft/notifications/fixtures.py b/packages/syft/tests/syft/notifications/fixtures.py index dade06c4424..65722d90061 100644 --- a/packages/syft/tests/syft/notifications/fixtures.py +++ b/packages/syft/tests/syft/notifications/fixtures.py @@ -2,14 +2,17 @@ import pytest # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.serde.serializable import serializable +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext +from syft.service.notification.email_templates import EmailTemplate from syft.service.notification.notification_service import NotificationService from syft.service.notification.notification_stash import NotificationStash from syft.service.notification.notifications import CreateNotification from syft.service.notification.notifications import Notification +from syft.service.notifier.notifier_enums import NOTIFIERS from syft.service.user.user import User from syft.store.linked_obj import LinkedObject from syft.types.datetime import DateTime @@ -34,19 +37,30 @@ def notification_service(document_store): @pytest.fixture def authed_context(admin_user: User, worker: Worker) -> AuthedServiceContext: - yield AuthedServiceContext(credentials=test_verify_key, node=worker) + yield AuthedServiceContext(credentials=test_verify_key, server=worker) @pytest.fixture def linked_object(): yield LinkedObject( - node_uid=UID(), + server_uid=UID(), service_type=NotificationService, object_type=Notification, object_uid=UID(), ) +@serializable(canonical_name="NewEmail", version=1) +class NewEmail(EmailTemplate): + @staticmethod + def email_title(notification: "Notification", context) -> str: + return f"Welcome to {context.server.name} server!" + + @staticmethod + def email_body(notification: "Notification", context) -> str: + return "x" + + @pytest.fixture def mock_create_notification(faker) -> CreateNotification: test_signing_key1 = SyftSigningKey.generate() @@ -57,10 +71,12 @@ def mock_create_notification(faker) -> CreateNotification: mock_notification = CreateNotification( subject="mock_created_notification", id=UID(), - node_uid=UID(), + server_uid=UID(), + notifier_types=[NOTIFIERS.EMAIL], from_user_verify_key=test_verify_key1, to_user_verify_key=test_verify_key2, created_at=DateTime.now(), + email_template=NewEmail, ) yield mock_notification @@ -73,7 +89,7 @@ def mock_notification( ) -> Notification: mock_notification = Notification( subject="mock_notification", - node_uid=UID(), + server_uid=UID(), from_user_verify_key=SyftSigningKey.generate().verify_key, to_user_verify_key=SyftSigningKey.generate().verify_key, created_at=DateTime.now(), diff --git a/packages/syft/tests/syft/notifications/notification_service_test.py b/packages/syft/tests/syft/notifications/notification_service_test.py index 868759e348a..38241108473 100644 --- a/packages/syft/tests/syft/notifications/notification_service_test.py +++ b/packages/syft/tests/syft/notifications/notification_service_test.py @@ -1,22 +1,24 @@ +# stdlib +from typing import NoReturn + # third party +import pytest from pytest import MonkeyPatch -from result import Err -from result import Ok # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey from syft.service.context import AuthedServiceContext from syft.service.notification.notification_service import NotificationService from syft.service.notification.notification_stash import NotificationStash from syft.service.notification.notifications import CreateNotification from syft.service.notification.notifications import Notification from syft.service.notification.notifications import NotificationStatus -from syft.service.response import SyftError from syft.service.response import SyftSuccess -from syft.store.document_store import DocumentStore +from syft.store.document_store_errors import StashException from syft.store.linked_obj import LinkedObject from syft.types.datetime import DateTime +from syft.types.result import as_result from syft.types.uid import UID test_verify_key_string = ( @@ -26,6 +28,35 @@ test_verify_key = SyftVerifyKey.from_string(test_verify_key_string) +class MockSMTP: + def __init__(self, smtp_server, smtp_port, timeout): + self.sent_mail = [] + self.smtp_server = smtp_server + self.smtp_port = smtp_port + self.timeout = timeout + + def sendmail(self, from_addr, to_addrs, msg): + self.sent_mail.append((from_addr, to_addrs, msg)) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + def ehlo(self): + return True + + def has_extn(self, extn): + return True + + def login(self, username, password): + return True + + def starttls(self): + return True + + def add_mock_notification( root_verify_key, notification_stash: NotificationStash, @@ -37,7 +68,7 @@ def add_mock_notification( mock_notification = Notification( subject="mock_notification", - node_uid=UID(), + server_uid=UID(), from_user_verify_key=from_user_verify_key, to_user_verify_key=to_user_verify_key, created_at=DateTime.now(), @@ -61,8 +92,9 @@ def test_send_success( expected_message = mock_create_notification.to(Notification, authed_context) - def mock_set(*args, **kwargs) -> Ok: - return Ok(expected_message) + @as_result(StashException) + def mock_set(*args, **kwargs) -> str: + return expected_message monkeypatch.setattr(notification_service.stash, "set", mock_set) response = test_notification_service.send(authed_context, mock_create_notification) @@ -76,24 +108,27 @@ def test_send_error_on_set( authed_context: AuthedServiceContext, mock_create_notification: CreateNotification, ) -> None: - def mock_set(*args, **kwargs) -> Err: - return Err(expected_error) - test_notification_service = notification_service expected_error = "Failed to set notification." + @as_result(StashException) + def mock_set(*args, **kwargs) -> NoReturn: + raise StashException(public_message=expected_error) + monkeypatch.setattr(notification_service.stash, "set", mock_set) - response = test_notification_service.send(authed_context, mock_create_notification) - assert isinstance(response, SyftError) - assert response.message == expected_error + with pytest.raises(StashException) as exc: + test_notification_service.send(authed_context, mock_create_notification) + + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_get_all_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -108,19 +143,12 @@ def test_get_all_success( NotificationStatus.UNREAD, ) - def mock_get_all_inbox_for_verify_key(*args, **kwargs) -> Ok: - return Ok([expected_message]) - - monkeypatch.setattr( - notification_service.stash, - "get_all_inbox_for_verify_key", - mock_get_all_inbox_for_verify_key, - ) - response = test_notification_service.get_all(authed_context) assert len(response) == 1 assert isinstance(response[0], Notification) + response[0].syft_client_verify_key = None + response[0].syft_server_location = None assert response[0] == expected_message @@ -131,10 +159,11 @@ def test_get_all_error_on_get_all_inbox( ) -> None: expected_error = "Failed to get all inbox." + @as_result(StashException) def mock_get_all_inbox_for_verify_key( credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, @@ -142,18 +171,16 @@ def mock_get_all_inbox_for_verify_key( mock_get_all_inbox_for_verify_key, ) - response = notification_service.get_all(authed_context) + with pytest.raises(StashException) as exc: + notification_service.get_all(authed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_get_sent_success( - root_verify_key, - monkeypatch: MonkeyPatch, - notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -168,22 +195,16 @@ def test_get_sent_success( NotificationStatus.UNREAD, ) - def mock_get_all_sent_for_verify_key(credentials, verify_key) -> Ok: - return Ok([expected_message]) - - monkeypatch.setattr( - notification_service.stash, - "get_all_sent_for_verify_key", - mock_get_all_sent_for_verify_key, - ) - response = test_notification_service.get_all_sent(authed_context) assert len(response) == 1 assert isinstance(response[0], Notification) + response[0].syft_server_location = None + response[0].syft_client_verify_key = None assert response[0] == expected_message +@pytest.mark.flaky(reruns=3, reruns_delay=1) def test_get_all_error_on_get_all_sent( monkeypatch: MonkeyPatch, notification_service: NotificationService, @@ -191,10 +212,11 @@ def test_get_all_error_on_get_all_sent( ) -> None: expected_error = "Failed to get all sent." + @as_result(StashException) def mock_get_all_sent_for_verify_key( credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, @@ -202,10 +224,11 @@ def mock_get_all_sent_for_verify_key( mock_get_all_sent_for_verify_key, ) - response = notification_service.get_all_sent(authed_context) + with pytest.raises(StashException) as exc: + notification_service.get_all_sent(authed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_get_all_for_status_success( @@ -213,7 +236,7 @@ def test_get_all_for_status_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -228,8 +251,9 @@ def test_get_all_for_status_success( NotificationStatus.UNREAD, ) - def mock_get_all_by_verify_key_for_status(*args, **kwargs) -> Ok: - return Ok([expected_message]) + @as_result(StashException) + def mock_get_all_by_verify_key_for_status(*args, **kwargs) -> list[Notification]: + return [expected_message] monkeypatch.setattr( notification_service.stash, @@ -239,7 +263,7 @@ def mock_get_all_by_verify_key_for_status(*args, **kwargs) -> Ok: response = test_notification_service.get_all_for_status( authed_context, NotificationStatus.UNREAD - ) + ).unwrap() assert len(response) == 1 assert isinstance(response[0], Notification) @@ -253,12 +277,13 @@ def test_error_on_get_all_for_status( ) -> None: expected_error = "Failed to get all for status." + @as_result(StashException) def mock_get_all_by_verify_key_for_status( credentials: SyftVerifyKey, verify_key: SyftVerifyKey, status: NotificationStatus, - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, @@ -266,13 +291,14 @@ def mock_get_all_by_verify_key_for_status( mock_get_all_by_verify_key_for_status, ) - response = notification_service.get_all_for_status( - authed_context, - NotificationStatus.UNREAD, - ) + with pytest.raises(StashException) as exc: + notification_service.get_all_for_status( + authed_context, + NotificationStatus.UNREAD, + ).unwrap() - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_get_all_read_success( @@ -280,7 +306,7 @@ def test_get_all_read_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -295,19 +321,12 @@ def test_get_all_read_success( NotificationStatus.READ, ) - def mock_get_all_by_verify_key_for_status() -> Ok: - return Ok(expected_message) - - monkeypatch.setattr( - notification_service.stash, - "get_all_by_verify_key_for_status", - mock_get_all_by_verify_key_for_status, - ) - response = test_notification_service.get_all_read(authed_context) assert len(response) == 1 assert isinstance(response[0], Notification) + response[0].syft_server_location = None + response[0].syft_client_verify_key = None assert response[0] == expected_message @@ -318,12 +337,13 @@ def test_error_on_get_all_read( ) -> None: expected_error = "Failed to get all for status." + @as_result(StashException) def mock_get_all_by_verify_key_for_status( credentials: SyftVerifyKey, verify_key: SyftVerifyKey, status: NotificationStatus, - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, @@ -331,10 +351,11 @@ def mock_get_all_by_verify_key_for_status( mock_get_all_by_verify_key_for_status, ) - response = notification_service.get_all_read(authed_context) + with pytest.raises(StashException) as exc: + notification_service.get_all_read(authed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_get_all_unread_success( @@ -342,7 +363,7 @@ def test_get_all_unread_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -357,19 +378,11 @@ def test_get_all_unread_success( NotificationStatus.UNREAD, ) - def mock_get_all_by_verify_key_for_status() -> Ok: - return Ok(expected_message) - - monkeypatch.setattr( - notification_service.stash, - "get_all_by_verify_key_for_status", - mock_get_all_by_verify_key_for_status, - ) - response = test_notification_service.get_all_unread(authed_context) - assert len(response) == 1 assert isinstance(response[0], Notification) + response[0].syft_server_location = None + response[0].syft_client_verify_key = None assert response[0] == expected_message @@ -380,12 +393,13 @@ def test_error_on_get_all_unread( ) -> None: expected_error = "Failed to get all for status." + @as_result(StashException) def mock_get_all_by_verify_key_for_status( credentials: SyftVerifyKey, verify_key: SyftVerifyKey, status: NotificationStatus, - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, @@ -393,10 +407,11 @@ def mock_get_all_by_verify_key_for_status( mock_get_all_by_verify_key_for_status, ) - response = notification_service.get_all_unread(authed_context) + with pytest.raises(StashException) as exc: + notification_service.get_all_unread(authed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_mark_as_read_success( @@ -404,7 +419,7 @@ def test_mark_as_read_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -421,8 +436,9 @@ def test_mark_as_read_success( assert expected_message.status == NotificationStatus.UNREAD - def mock_update_notification_status() -> Ok: - return Ok(expected_message) + @as_result(StashException) + def mock_update_notification_status() -> Notification: + return expected_message monkeypatch.setattr( notification_service.stash, @@ -442,7 +458,7 @@ def test_mark_as_read_error_on_update_notification_status( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -457,10 +473,11 @@ def test_mark_as_read_error_on_update_notification_status( ) expected_error = "Failed to update notification status." + @as_result(StashException) def mock_update_notification_status( credentials: SyftVerifyKey, uid: UID, status: NotificationStatus - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, @@ -468,10 +485,11 @@ def mock_update_notification_status( mock_update_notification_status, ) - response = notification_service.mark_as_read(authed_context, expected_.id) + with pytest.raises(StashException) as exc: + notification_service.mark_as_read(authed_context, expected_.id) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_mark_as_unread_success( @@ -479,7 +497,7 @@ def test_mark_as_unread_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -496,8 +514,10 @@ def test_mark_as_unread_success( assert expected_notification.status == NotificationStatus.READ - def mock_update_notification_status() -> Ok: - return Ok(expected_notification) + as_result(StashException) + + def mock_update_notification_status() -> Notification: + return expected_notification monkeypatch.setattr( notification_service.stash, @@ -517,7 +537,7 @@ def test_mark_as_unread_error_on_update_notification_status( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -532,23 +552,23 @@ def test_mark_as_unread_error_on_update_notification_status( ) expected_error = "Failed to update notification status." - def mock_update_notificatiion_status( + @as_result(StashException) + def mock_update_notification_status( credentials: SyftVerifyKey, uid: UID, status: NotificationStatus - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( notification_service.stash, "update_notification_status", - mock_update_notificatiion_status, + mock_update_notification_status, ) - response = notification_service.mark_as_unread( - authed_context, expected_notification.id - ) + with pytest.raises(StashException) as exc: + notification_service.mark_as_unread(authed_context, expected_notification.id) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error # TODO: Fix this test - unsure how to return a LinkedObject Notification. @@ -558,7 +578,7 @@ def test_resolve_object_success( authed_context: AuthedServiceContext, linked_object: LinkedObject, notification_service: NotificationService, - document_store: DocumentStore, + document_store, ) -> None: test_notification_service = NotificationService(document_store) @@ -566,15 +586,16 @@ def mock_get_service(linked_obj: LinkedObject) -> NotificationService: return test_notification_service monkeypatch.setattr( - authed_context.node, + authed_context.server, "get_service", mock_get_service, ) + @as_result(StashException) def mock_resolve_link( context: AuthedServiceContext, linked_obj: LinkedObject - ) -> Ok: - return Ok(None) + ) -> None: + return None monkeypatch.setattr( test_notification_service, @@ -591,7 +612,7 @@ def test_resolve_object_error_on_resolve_link( monkeypatch: MonkeyPatch, authed_context: AuthedServiceContext, linked_object: LinkedObject, - document_store: DocumentStore, + document_store, notification_service: NotificationService, ) -> None: test_notification_service = NotificationService(document_store) @@ -601,15 +622,16 @@ def mock_get_service(linked_obj: LinkedObject) -> NotificationService: return test_notification_service monkeypatch.setattr( - authed_context.node, + authed_context.server, "get_service", mock_get_service, ) + @as_result(StashException) def mock_resolve_link( context: AuthedServiceContext, linked_obj: LinkedObject - ) -> Err: - return Err(expected_error) + ) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( test_notification_service, @@ -617,10 +639,11 @@ def mock_resolve_link( mock_resolve_link, ) - response = test_notification_service.resolve_object(authed_context, linked_object) + with pytest.raises(StashException) as exc: + test_notification_service.resolve_object(authed_context, linked_object) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error def test_clear_success( @@ -628,15 +651,15 @@ def test_clear_success( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_notification_service = NotificationService(document_store) test_stash = NotificationStash(store=document_store) + success_msg = "All notifications cleared!" - expected_success_message = "All notifications cleared !!" - add_mock_notification( + notification = add_mock_notification( authed_context.credentials, test_stash, random_verify_key, @@ -647,8 +670,9 @@ def test_clear_success( assert len(inbox_before_delete) == 1 - def mock_delete_all_for_verify_key(credentials, verify_key) -> Ok: - return Ok(SyftSuccess.notification) + @as_result(StashException) + def mock_delete_all_for_verify_key(credentials, verify_key) -> SyftSuccess: + return SyftSuccess(message=success_msg, value=[notification.id]) monkeypatch.setattr( notification_service.stash, @@ -659,7 +683,7 @@ def mock_delete_all_for_verify_key(credentials, verify_key) -> Ok: response = test_notification_service.clear(authed_context) inbox_after_delete = test_notification_service.get_all(authed_context) - assert response.message == expected_success_message + assert response assert len(inbox_after_delete) == 0 @@ -668,7 +692,7 @@ def test_clear_error_on_delete_all_for_verify_key( monkeypatch: MonkeyPatch, notification_service: NotificationService, authed_context: AuthedServiceContext, - document_store: DocumentStore, + document_store, ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key @@ -687,8 +711,9 @@ def test_clear_error_on_delete_all_for_verify_key( assert len(inbox_before_delete) == 1 - def mock_delete_all_for_verify_key(**kwargs) -> Err: - return Err(expected_error) + @as_result(StashException) + def mock_delete_all_for_verify_key(**kwargs) -> NoReturn: + raise StashException(public_message=expected_error) monkeypatch.setattr( test_notification_service.stash, @@ -696,9 +721,71 @@ def mock_delete_all_for_verify_key(**kwargs) -> Err: mock_delete_all_for_verify_key, ) - response = test_notification_service.clear(authed_context) + with pytest.raises(StashException) as exc: + test_notification_service.clear(authed_context) + inbox_after_delete = test_notification_service.get_all(authed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error + assert exc.type is StashException + assert exc.value.public_message == expected_error assert len(inbox_after_delete) == 1 + + +# a list of all the mock objects created +mock_smtps = [] + + +def test_send_email(worker, monkeypatch, mock_create_notification, authed_context): + # stdlib + import smtplib + + # we use this to have a reference to all the mock objects we create + def create_smtp(*args, **kwargs): + # we sum over all the mocks + global mock_smtps + res = MockSMTP(*args, **kwargs) + mock_smtps.append(res) + return res + + monkeypatch.setattr(smtplib, "SMTP", create_smtp) + root_client = worker.root_client + mock_create_notification.to_user_verify_key = root_client.verify_key + mock_create_notification.from_user_verify_key = root_client.verify_key + + root_client.settings.enable_notifications( + email_sender="someone@example.com", + email_port="2525", + email_server="localhost", + email_username="someuser", + email_password="password", + ) + + def emails_sent(): + global mock_smtps + return sum([len(x.sent_mail) for x in mock_smtps]) + + mock_create_notification.to(Notification, authed_context) + root_client.notifications.send(mock_create_notification) + + assert emails_sent() == 1 + + mock_create_notification.id = UID() + + root_client.settings.disable_notifications() + root_client.notifications.send(mock_create_notification) + assert emails_sent() == 1 + + new_port = "2526" + + root_client.settings.enable_notifications( + email_sender="someone@example.com", + email_port=new_port, + email_server="localhost", + email_username="someuser", + email_password="password", + ) + + mock_create_notification.id = UID() + root_client.notifications.send(mock_create_notification) + assert emails_sent() == 2 + assert int(mock_smtps[-1].smtp_port) == int(new_port) diff --git a/packages/syft/tests/syft/notifications/notification_stash_test.py b/packages/syft/tests/syft/notifications/notification_stash_test.py index 7a2e88c1e93..ceca7d3b37b 100644 --- a/packages/syft/tests/syft/notifications/notification_stash_test.py +++ b/packages/syft/tests/syft/notifications/notification_stash_test.py @@ -1,22 +1,22 @@ +# stdlib +from typing import NoReturn + # third party import pytest from pytest import MonkeyPatch -from result import Err # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.service.notification.notification_stash import ( - OrderByCreatedAtTimeStampPartitionKey, -) -from syft.service.notification.notification_stash import FromUserVerifyKeyPartitionKey +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey from syft.service.notification.notification_stash import NotificationStash -from syft.service.notification.notification_stash import StatusPartitionKey -from syft.service.notification.notification_stash import ToUserVerifyKeyPartitionKey from syft.service.notification.notifications import Notification from syft.service.notification.notifications import NotificationExpiryStatus from syft.service.notification.notifications import NotificationStatus +from syft.store.db.db import DBManager +from syft.store.document_store_errors import StashException from syft.types.datetime import DateTime +from syft.types.errors import SyftException +from syft.types.result import as_result from syft.types.uid import UID test_signing_key_string = ( @@ -40,7 +40,7 @@ def add_mock_notification( mock_notification = Notification( subject="test_notification", - node_uid=UID(), + server_uid=UID(), from_user_verify_key=from_user_verify_key, to_user_verify_key=to_user_verify_key, created_at=DateTime.now(), @@ -55,86 +55,16 @@ def add_mock_notification( return mock_notification -def test_fromuserverifykey_partitionkey() -> None: - random_verify_key = SyftSigningKey.generate().verify_key - - assert FromUserVerifyKeyPartitionKey.type_ == SyftVerifyKey - assert FromUserVerifyKeyPartitionKey.key == "from_user_verify_key" - - result = FromUserVerifyKeyPartitionKey.with_obj(random_verify_key) - - assert result.type_ == SyftVerifyKey - assert result.key == "from_user_verify_key" - - assert result.value == random_verify_key - - signing_key = SyftSigningKey.from_string(test_signing_key_string) - with pytest.raises(AttributeError): - FromUserVerifyKeyPartitionKey.with_obj(signing_key) - - -def test_touserverifykey_partitionkey() -> None: - random_verify_key = SyftSigningKey.generate().verify_key - - assert ToUserVerifyKeyPartitionKey.type_ == SyftVerifyKey - assert ToUserVerifyKeyPartitionKey.key == "to_user_verify_key" - - result = ToUserVerifyKeyPartitionKey.with_obj(random_verify_key) - - assert result.type_ == SyftVerifyKey - assert result.key == "to_user_verify_key" - assert result.value == random_verify_key - - signing_key = SyftSigningKey.from_string(test_signing_key_string) - with pytest.raises(AttributeError): - ToUserVerifyKeyPartitionKey.with_obj(signing_key) - - -def test_status_partitionkey() -> None: - assert StatusPartitionKey.key == "status" - assert StatusPartitionKey.type_ == NotificationStatus - - result1 = StatusPartitionKey.with_obj(NotificationStatus.UNREAD) - result2 = StatusPartitionKey.with_obj(NotificationStatus.READ) - - assert result1.type_ == NotificationStatus - assert result1.key == "status" - assert result1.value == NotificationStatus.UNREAD - assert result2.type_ == NotificationStatus - assert result2.key == "status" - assert result2.value == NotificationStatus.READ - - notification_expiry_status_auto = NotificationExpiryStatus(0) - - with pytest.raises(AttributeError): - StatusPartitionKey.with_obj(notification_expiry_status_auto) - - -def test_orderbycreatedattimestamp_partitionkey() -> None: - random_datetime = DateTime.now() - - assert OrderByCreatedAtTimeStampPartitionKey.key == "created_at" - assert OrderByCreatedAtTimeStampPartitionKey.type_ == DateTime - - result = OrderByCreatedAtTimeStampPartitionKey.with_obj(random_datetime) - - assert result.type_ == DateTime - assert result.key == "created_at" - assert result.value == random_datetime - - -def test_get_all_inbox_for_verify_key(root_verify_key, document_store) -> None: +def test_get_all_inbox_for_verify_key( + root_verify_key, document_store: DBManager +) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_stash = NotificationStash(store=document_store) - response = test_stash.get_all_inbox_for_verify_key( + result = test_stash.get_all_inbox_for_verify_key( root_verify_key, random_verify_key - ) - - assert response.is_ok() - - result = response.ok() + ).unwrap() assert len(result) == 0 # list of mock notifications @@ -147,14 +77,11 @@ def test_get_all_inbox_for_verify_key(root_verify_key, document_store) -> None: notification_list.append(mock_notification) # returned list of notifications from stash that's sorted by created_at - response2 = test_stash.get_all_inbox_for_verify_key( + result = test_stash.get_all_inbox_for_verify_key( root_verify_key, random_verify_key - ) + ).unwrap() - assert response2.is_ok() - - result = response2.ok() - assert len(response2.value) == 5 + assert len(result) == 5 for notification in notification_list: # check if all notifications are present in the result @@ -168,7 +95,9 @@ def test_get_all_inbox_for_verify_key(root_verify_key, document_store) -> None: assert result == sorted_notification_list -def test_get_all_sent_for_verify_key(root_verify_key, document_store) -> None: +def test_get_all_sent_for_verify_key( + root_verify_key, document_store: DBManager +) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_stash = NotificationStash(store=document_store) @@ -197,15 +126,12 @@ def test_get_all_sent_for_verify_key(root_verify_key, document_store) -> None: test_stash.get_all_sent_for_verify_key(root_verify_key, random_signing_key) -def test_get_all_for_verify_key(root_verify_key, document_store) -> None: +def test_get_all_for_verify_key(root_verify_key, document_store: DBManager) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key - query_key = FromUserVerifyKeyPartitionKey.with_obj(test_verify_key) test_stash = NotificationStash(store=document_store) - response = test_stash.get_all_for_verify_key( - root_verify_key, random_verify_key, query_key - ) + response = test_stash.get_all_for_verify_key(root_verify_key, random_verify_key) assert response.is_ok() @@ -216,11 +142,8 @@ def test_get_all_for_verify_key(root_verify_key, document_store) -> None: root_verify_key, test_stash, test_verify_key, random_verify_key ) - query_key2 = FromUserVerifyKeyPartitionKey.with_obj( - mock_notification.from_user_verify_key - ) response_from_verify_key = test_stash.get_all_for_verify_key( - root_verify_key, mock_notification.from_user_verify_key, query_key2 + root_verify_key, mock_notification.from_user_verify_key ) assert response_from_verify_key.is_ok() @@ -230,7 +153,7 @@ def test_get_all_for_verify_key(root_verify_key, document_store) -> None: assert result[0] == mock_notification response_from_verify_key_string = test_stash.get_all_for_verify_key( - root_verify_key, test_verify_key_string, query_key2 + root_verify_key, test_verify_key_string ) assert response_from_verify_key_string.is_ok() @@ -239,33 +162,28 @@ def test_get_all_for_verify_key(root_verify_key, document_store) -> None: assert len(result) == 1 -def test_get_all_by_verify_key_for_status(root_verify_key, document_store) -> None: +def test_get_all_by_verify_key_for_status( + root_verify_key, document_store: DBManager +) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_stash = NotificationStash(store=document_store) - response = test_stash.get_all_by_verify_key_for_status( + result = test_stash.get_all_by_verify_key_for_status( root_verify_key, random_verify_key, NotificationStatus.READ - ) - - assert response.is_ok() - - result = response.ok() + ).unwrap() assert len(result) == 0 mock_notification = add_mock_notification( root_verify_key, test_stash, test_verify_key, random_verify_key ) - response2 = test_stash.get_all_by_verify_key_for_status( + result2 = test_stash.get_all_by_verify_key_for_status( root_verify_key, mock_notification.to_user_verify_key, NotificationStatus.UNREAD - ) - assert response2.is_ok() - - result = response2.ok() - assert len(result) == 1 + ).unwrap() + assert len(result2) == 1 - assert result[0] == mock_notification + assert result2[0] == mock_notification with pytest.raises(AttributeError): test_stash.get_all_by_verify_key_for_status( @@ -273,18 +191,18 @@ def test_get_all_by_verify_key_for_status(root_verify_key, document_store) -> No ) -def test_update_notification_status(root_verify_key, document_store) -> None: +def test_update_notification_status(root_verify_key, document_store: DBManager) -> None: random_uid = UID() random_verify_key = SyftSigningKey.generate().verify_key test_stash = NotificationStash(store=document_store) - expected_error = Err(f"No notification exists for id: {random_uid}") - response = test_stash.update_notification_status( - root_verify_key, uid=random_uid, status=NotificationStatus.READ - ) + with pytest.raises(SyftException) as exc: + test_stash.update_notification_status( + root_verify_key, uid=random_uid, status=NotificationStatus.READ + ).unwrap() - assert response.is_err() - assert response == expected_error + assert issubclass(exc.type, SyftException) + assert exc.value.public_message mock_notification = add_mock_notification( root_verify_key, test_stash, test_verify_key, random_verify_key @@ -302,12 +220,15 @@ def test_update_notification_status(root_verify_key, document_store) -> None: assert result.status == NotificationStatus.READ notification_expiry_status_auto = NotificationExpiryStatus(0) - with pytest.raises(AttributeError): - test_stash.pdate_notification_status( + with pytest.raises(SyftException) as exc: + test_stash.update_notification_status( root_verify_key, uid=mock_notification.id, status=notification_expiry_status_auto, - ) + ).unwrap() + + assert issubclass(exc.type, SyftException) + assert exc.value.public_message def test_update_notification_status_error_on_get_by_uid( @@ -315,39 +236,39 @@ def test_update_notification_status_error_on_get_by_uid( ) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key - random_uid = UID() test_stash = NotificationStash(store=document_store) + expected_error_msg = f"No notification exists for id: {random_verify_key}" - def mock_get_by_uid(root_verify_key, uid: random_uid) -> Err: - return Err(None) + add_mock_notification( + root_verify_key, test_stash, test_verify_key, random_verify_key + ) + + @as_result(StashException) + def mock_get_by_uid(root_verify_key: SyftVerifyKey, uid: UID) -> NoReturn: + raise StashException(public_message=f"No notification exists for id: {uid}") monkeypatch.setattr( test_stash, "get_by_uid", mock_get_by_uid, ) + with pytest.raises(StashException) as exc: + test_stash.update_notification_status( + root_verify_key, random_verify_key, NotificationStatus.READ + ).unwrap() - add_mock_notification( - root_verify_key, test_stash, test_verify_key, random_verify_key - ) + assert exc.type is StashException + assert exc.value.public_message == expected_error_msg - response = test_stash.update_notification_status( - root_verify_key, random_verify_key, NotificationStatus.READ - ) - - assert response is None - -def test_delete_all_for_verify_key(root_verify_key, document_store) -> None: +def test_delete_all_for_verify_key(root_verify_key, document_store: DBManager) -> None: random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_stash = NotificationStash(store=document_store) - response = test_stash.delete_all_for_verify_key(root_verify_key, test_verify_key) - - assert response.is_ok() - - result = response.ok() + result = test_stash.delete_all_for_verify_key( + root_verify_key, test_verify_key + ).unwrap() assert result is True add_mock_notification( @@ -356,23 +277,23 @@ def test_delete_all_for_verify_key(root_verify_key, document_store) -> None: inbox_before = test_stash.get_all_inbox_for_verify_key( root_verify_key, random_verify_key - ).value + ).unwrap() assert len(inbox_before) == 1 - response2 = test_stash.delete_all_for_verify_key(root_verify_key, random_verify_key) - - assert response2.is_ok() - - result = response2.ok() - assert result is True + result2 = test_stash.delete_all_for_verify_key( + root_verify_key, random_verify_key + ).unwrap() + assert result2 is True inbox_after = test_stash.get_all_inbox_for_verify_key( root_verify_key, random_verify_key - ).value + ).unwrap() assert len(inbox_after) == 0 with pytest.raises(AttributeError): - test_stash.delete_all_for_verify_key(root_verify_key, random_signing_key) + test_stash.delete_all_for_verify_key( + root_verify_key, random_signing_key + ).unwrap() def test_delete_all_for_verify_key_error_on_get_all_inbox_for_verify_key( @@ -381,11 +302,11 @@ def test_delete_all_for_verify_key_error_on_get_all_inbox_for_verify_key( random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_stash = NotificationStash(store=document_store) + error_msg = "Database failure" - def mock_get_all_inbox_for_verify_key( - root_verify_key, verify_key: random_verify_key - ) -> Err: - return Err(None) + @as_result(StashException) + def mock_get_all_inbox_for_verify_key(root_verify_key, verify_key) -> NoReturn: + raise StashException(public_message=error_msg) monkeypatch.setattr( test_stash, @@ -393,9 +314,13 @@ def mock_get_all_inbox_for_verify_key( mock_get_all_inbox_for_verify_key, ) - response = test_stash.delete_all_for_verify_key(root_verify_key, random_verify_key) + with pytest.raises(StashException) as exc: + test_stash.delete_all_for_verify_key( + root_verify_key, random_verify_key + ).unwrap() - assert response == Err(None) + assert exc.type is StashException + assert exc.value.public_message == error_msg def test_delete_all_for_verify_key_error_on_delete_by_uid( @@ -404,12 +329,11 @@ def test_delete_all_for_verify_key_error_on_delete_by_uid( random_signing_key = SyftSigningKey.generate() random_verify_key = random_signing_key.verify_key test_stash = NotificationStash(store=document_store) - mock_notification = add_mock_notification( - root_verify_key, test_stash, test_verify_key, random_verify_key - ) + error_msg = "Failed to delete notification" - def mock_delete_by_uid(root_verify_key, uid=mock_notification.id) -> Err: - return Err(None) + @as_result(StashException) + def mock_delete_by_uid(root_verify_key, uid: UID) -> NoReturn: + raise StashException(public_message=error_msg) monkeypatch.setattr( test_stash, @@ -417,8 +341,14 @@ def mock_delete_by_uid(root_verify_key, uid=mock_notification.id) -> Err: mock_delete_by_uid, ) - response = test_stash.delete_all_for_verify_key( - root_verify_key, random_verify_key - ).value + add_mock_notification( + root_verify_key, test_stash, test_verify_key, random_verify_key + ) + + with pytest.raises(StashException) as exc: + test_stash.delete_all_for_verify_key( + root_verify_key, random_verify_key + ).unwrap() - assert response is None + assert exc.type is StashException + assert exc.value.public_message == error_msg diff --git a/packages/syft/tests/syft/policy/mixed_policy_test.py b/packages/syft/tests/syft/policy/mixed_policy_test.py new file mode 100644 index 00000000000..fedb6bed14a --- /dev/null +++ b/packages/syft/tests/syft/policy/mixed_policy_test.py @@ -0,0 +1,71 @@ +# stdlib + +# third party + +# syft absolute +import syft as sy +from syft.service.action.action_endpoint import CustomEndpointActionObject +from syft.service.action.action_object import ActionObject +from syft.service.policy.policy import CreatePolicyRuleConstant +from syft.service.response import SyftSuccess + + +@sy.api_endpoint_method() +def private_query_function(context, query_str: str) -> str: + return query_str + + +@sy.api_endpoint_method() +def mock_query_function(context, query_str: str) -> str: + return query_str + + +def test_constant(worker) -> None: + root_client = worker.root_client + new_endpoint = sy.TwinAPIEndpoint( + path="test.test_query", + description="Test", + private_function=private_query_function, + mock_function=mock_query_function, + ) + + res = root_client.api.services.api.add(endpoint=new_endpoint) + + assert isinstance(res, SyftSuccess) + + create_constant = CreatePolicyRuleConstant(val=2) + constant = create_constant.to_policy_rule("test") + + assert constant.val == 2 + assert constant.klass == int + + create_constant = CreatePolicyRuleConstant( + val=root_client.api.services.test.test_query + ) + constant = create_constant.to_policy_rule("test_2") + + assert constant.val == root_client.api.services.api[0].action_object_id + assert constant.klass == CustomEndpointActionObject + + +def test_mixed_policy(worker, ds_client) -> None: + root_client = worker.root_client + + ao = ActionObject.from_obj(2) + ao = ao.send(ds_client) + + @sy.syft_function( + input_policy=sy.MixedInputPolicy( + arg_1=sy.Constant(val=1), + arg_2=ao.id, + arg_3=int, + client=ds_client, + ) + ) + def test(arg_1: int, arg_2: int, arg_3: int): + return arg_1 + arg_2 + arg_3 + + ds_client.code.request_code_execution(test) + root_client.requests[0].approve() + + ds_client.code.test(arg_2=ao, arg_3=2) diff --git a/packages/syft/tests/syft/project/project_test.py b/packages/syft/tests/syft/project/project_test.py index 9b2c8ce92f3..ba531ff2aac 100644 --- a/packages/syft/tests/syft/project/project_test.py +++ b/packages/syft/tests/syft/project/project_test.py @@ -17,13 +17,13 @@ def test_project_creation(worker): password_verify="bazinga", ) - ds_client = sy.login(node=worker, email="sheldon@caltech.edu", password="bazinga") + ds_client = sy.login(server=worker, email="sheldon@caltech.edu", password="bazinga") new_project = sy.Project( name="My Cool Project", description="My Cool Description", members=[ds_client] ) - project = new_project.start() + project = new_project.send() assert isinstance(project, Project) assert new_project.id == project.id @@ -33,7 +33,7 @@ def test_project_creation(worker): assert project.description == "My Cool Description" -def test_error_data_owner_project_creation(worker): +def test_data_owner_project_creation(worker): root_client = worker.root_client root_client.register( @@ -47,10 +47,8 @@ def test_error_data_owner_project_creation(worker): name="My Cool Project", description="My Cool Description", members=[root_client] ) - project = new_project.start() - - assert isinstance(project, sy.SyftError) - assert project.message == "Only Data Scientists can create projects" + project = new_project.send() + assert project.name == "My Cool Project" def test_exception_different_email(worker): @@ -70,9 +68,13 @@ def test_exception_different_email(worker): password_verify="penny", ) - ds_sheldon = sy.login(node=worker, email="sheldon@caltech.edu", password="bazinga") + ds_sheldon = sy.login( + server=worker, email="sheldon@caltech.edu", password="bazinga" + ) - ds_leonard = sy.login(node=worker, email="leonard@princeton.edu", password="penny") + ds_leonard = sy.login( + server=worker, email="leonard@princeton.edu", password="penny" + ) with pytest.raises(ValidationError): sy.Project( @@ -92,11 +94,15 @@ def test_project_serde(worker): password_verify="bazinga", ) + ds_sheldon = sy.login( + server=worker, email="sheldon@caltech.edu", password="bazinga" + ) + new_project = sy.Project( - name="My Cool Project", description="My Cool Description", members=[root_client] + name="My Cool Project", description="My Cool Description", members=[ds_sheldon] ) - project = new_project.start() + project = new_project.send() ser_data = sy.serialize(project, to_bytes=True) assert isinstance(ser_data, bytes) diff --git a/packages/syft/tests/syft/request/fixtures.py b/packages/syft/tests/syft/request/fixtures.py index c82cb59f4b4..6294ff7eae7 100644 --- a/packages/syft/tests/syft/request/fixtures.py +++ b/packages/syft/tests/syft/request/fixtures.py @@ -4,21 +4,26 @@ # syft absolute from syft.client.client import SyftClient -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext +from syft.service.request.request_service import RequestService from syft.service.request.request_stash import RequestStash -from syft.store.document_store import DocumentStore @pytest.fixture -def request_stash(document_store: DocumentStore) -> RequestStash: +def request_stash(document_store) -> RequestStash: yield RequestStash(store=document_store) @pytest.fixture -def authed_context_guest_domain_client( - guest_domain_client: SyftClient, worker: Worker +def authed_context_guest_datasite_client( + guest_datasite_client: SyftClient, worker: Worker ) -> AuthedServiceContext: - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key - yield AuthedServiceContext(credentials=verify_key, node=worker) + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key + yield AuthedServiceContext(credentials=verify_key, server=worker) + + +@pytest.fixture +def request_service(document_store): + yield RequestService(store=document_store) diff --git a/packages/syft/tests/syft/request/request_code_accept_deny_test.py b/packages/syft/tests/syft/request/request_code_accept_deny_test.py index b79675e03f2..ba70cd0461b 100644 --- a/packages/syft/tests/syft/request/request_code_accept_deny_test.py +++ b/packages/syft/tests/syft/request/request_code_accept_deny_test.py @@ -1,14 +1,10 @@ -# stdlib -from textwrap import dedent - # third party -from faker import Faker import pytest # syft absolute import syft from syft.client.client import SyftClient -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject from syft.service.action.action_permissions import ActionPermission from syft.service.code.user_code import UserCode @@ -18,37 +14,16 @@ from syft.service.request.request import ObjectMutation from syft.service.request.request import RequestStatus from syft.service.request.request import UserCodeStatusChange -from syft.service.request.request_service import RequestService -from syft.service.response import SyftError from syft.service.response import SyftSuccess from syft.service.settings.settings_service import SettingsService -from syft.store.document_store import DocumentStore from syft.store.linked_obj import LinkedObject - - -@pytest.fixture -def request_service(document_store: DocumentStore): - yield RequestService(store=document_store) - - -def get_ds_client(faker: Faker, root_client: SyftClient, guest_client: SyftClient): - guest_email = faker.email() - password = "mysecretpassword" - result = root_client.register( - name=faker.name(), - email=guest_email, - password=password, - password_verify=password, - ) - assert isinstance(result, SyftSuccess) - ds_client = guest_client.login(email=guest_email, password=password) - return ds_client +from syft.types.errors import SyftException def test_object_mutation(worker: Worker): root_client = worker.root_client setting = root_client.api.services.settings.get() - linked_obj = LinkedObject.from_obj(setting, SettingsService, node_uid=worker.id) + linked_obj = LinkedObject.from_obj(setting, SettingsService, server_uid=worker.id) original_name = setting.organization new_name = "Test Organization" @@ -60,7 +35,7 @@ def test_object_mutation(worker: Worker): ) change_context = ChangeContext( - node=worker, + server=worker, approving_user_credentials=root_client.credentials.verify_key, ) @@ -79,18 +54,16 @@ def test_object_mutation(worker: Worker): assert setting.organization == original_name -def test_action_store_change(faker: Faker, worker: Worker): +def test_action_store_change(worker: Worker, ds_client: SyftClient): root_client = worker.root_client dummy_data = [1, 2, 3] data = ActionObject.from_obj(dummy_data) - action_obj = root_client.api.services.action.set(data) + action_obj = data.send(root_client) assert action_obj.get() == dummy_data - ds_client = get_ds_client(faker, root_client, worker.guest_client) - action_object_link = LinkedObject.from_obj( - action_obj, node_uid=action_obj.syft_node_uid + action_obj, server_uid=action_obj.syft_server_uid ) permission_change = ActionStoreChange( linked_obj=action_object_link, @@ -98,7 +71,7 @@ def test_action_store_change(faker: Faker, worker: Worker): ) change_context = ChangeContext( - node=worker, + server=worker, approving_user_credentials=root_client.credentials.verify_key, requesting_user_credentials=ds_client.credentials.verify_key, ) @@ -115,17 +88,18 @@ def test_action_store_change(faker: Faker, worker: Worker): result = permission_change.undo(change_context) assert result.is_ok() - result = action_obj_ptr.get() - assert isinstance(result, SyftError) + with pytest.raises(SyftException) as exc: + action_obj_ptr.get() + assert exc.type is SyftException + assert "Permission", "denied" in exc.value.public_message -def test_user_code_status_change(faker: Faker, worker: Worker): + +def test_user_code_status_change(worker: Worker, ds_client: SyftClient): root_client = worker.root_client dummy_data = [1, 2, 3] data = ActionObject.from_obj(dummy_data) - action_obj = root_client.api.services.action.set(data) - - ds_client = get_ds_client(faker, root_client, worker.guest_client) + action_obj = data.send(root_client) @syft.syft_function( input_policy=syft.ExactMatch(data=action_obj), @@ -134,13 +108,12 @@ def test_user_code_status_change(faker: Faker, worker: Worker): def simple_function(data): return sum(data) - simple_function.code = dedent(simple_function.code) result = ds_client.code.submit(simple_function) assert isinstance(result, SyftSuccess) user_code: UserCode = ds_client.code.get_all()[0] - linked_user_code = LinkedObject.from_obj(user_code, node_uid=worker.id) + linked_user_code = LinkedObject.from_obj(user_code, server_uid=worker.id) user_code_change = UserCodeStatusChange( value=UserCodeStatus.APPROVED, @@ -149,7 +122,7 @@ def simple_function(data): ) change_context = ChangeContext( - node=worker, + server=worker, approving_user_credentials=root_client.credentials.verify_key, requesting_user_credentials=ds_client.credentials.verify_key, ) @@ -168,13 +141,11 @@ def simple_function(data): assert not user_code.status.approved -def test_code_accept_deny(faker: Faker, worker: Worker): +def test_code_accept_deny(worker: Worker, ds_client: SyftClient): root_client = worker.root_client dummy_data = [1, 2, 3] data = ActionObject.from_obj(dummy_data) - action_obj = root_client.api.services.action.set(data) - - ds_client = get_ds_client(faker, root_client, worker.guest_client) + action_obj = data.send(root_client) @syft.syft_function( input_policy=syft.ExactMatch(data=action_obj), @@ -183,21 +154,19 @@ def test_code_accept_deny(faker: Faker, worker: Worker): def simple_function(data): return sum(data) - simple_function.code = dedent(simple_function.code) - result = ds_client.code.request_code_execution(simple_function) - assert not isinstance(result, SyftError) - request = root_client.requests.get_all()[0] - result = request.accept_by_depositing_result(result=10) + result = request.approve() assert isinstance(result, SyftSuccess) request = root_client.requests.get_all()[0] assert request.status == RequestStatus.APPROVED + result = ds_client.code.simple_function(data=action_obj) - assert result.get() == 10 + assert result.get() == sum(dummy_data) - result = request.deny(reason="Function output needs differential privacy !!") + deny_reason = "Function output needs differential privacy!!" + result = request.deny(reason=deny_reason) assert isinstance(result, SyftSuccess) request = root_client.requests.get_all()[0] @@ -206,6 +175,8 @@ def simple_function(data): user_code = ds_client.code.get_all()[0] assert not user_code.status.approved - result = ds_client.code.simple_function(data=action_obj) - assert isinstance(result, SyftError) - assert "Execution denied" in result.message + with pytest.raises(SyftException) as exc: + ds_client.code.simple_function(data=action_obj) + + assert exc.type is SyftException + assert deny_reason in exc.value.public_message diff --git a/packages/syft/tests/syft/request/request_code_permissions_test.py b/packages/syft/tests/syft/request/request_code_permissions_test.py new file mode 100644 index 00000000000..0a07cc393f9 --- /dev/null +++ b/packages/syft/tests/syft/request/request_code_permissions_test.py @@ -0,0 +1,69 @@ +# syft absolute +import syft +from syft.client.client import SyftClient +from syft.server.worker import Worker +from syft.service.action.action_object import ActionObject +from syft.service.code.user_code import UserCode +from syft.service.response import SyftSuccess + + +def test_code_request_submitted_by_admin_only_admin_can_view( + worker: Worker, ds_client: SyftClient +): + root_client = worker.root_client + dummy_data = [1, 2, 3] + data = ActionObject.from_obj(dummy_data) + action_obj = data.send(root_client) + + @syft.syft_function( + input_policy=syft.ExactMatch(data=action_obj), + output_policy=syft.SingleExecutionExactOutput(), + ) + def simple_function(data): + return sum(data) + + project = syft.Project(name="test", members=[root_client]) + + result = project.create_code_request(simple_function, root_client) + assert isinstance(result, SyftSuccess) + + # only root should be able to see request and access code + ds_request_all = ds_client.requests.get_all() + assert len(ds_request_all) == 0 + + root_request_all = root_client.requests.get_all() + assert len(root_request_all) == 1 + root_code_access = root_request_all[0].code + assert isinstance(root_code_access, UserCode) + + +def test_code_request_submitted_by_ds_root_and_ds_can_view( + worker: Worker, ds_client: SyftClient +): + root_client = worker.root_client + dummy_data = [1, 2, 3] + data = ActionObject.from_obj(dummy_data) + action_obj = data.send(root_client) + + @syft.syft_function( + input_policy=syft.ExactMatch(data=action_obj), + output_policy=syft.SingleExecutionExactOutput(), + ) + def simple_function(data): + return sum(data) + + project = syft.Project(name="test", members=[ds_client]) + + result = project.create_code_request(simple_function, ds_client) + assert isinstance(result, SyftSuccess) + + # both root and ds should be able to see request and access code + ds_request_all = ds_client.requests.get_all() + assert len(ds_request_all) == 1 + ds_code_access = ds_request_all[0].code + assert isinstance(ds_code_access, UserCode) + + root_request_all = root_client.requests.get_all() + assert len(root_request_all) == 1 + root_code_access = root_request_all[0].code + assert isinstance(root_code_access, UserCode) diff --git a/packages/syft/tests/syft/request/request_stash_test.py b/packages/syft/tests/syft/request/request_stash_test.py index c3172083d43..a9115d5c934 100644 --- a/packages/syft/tests/syft/request/request_stash_test.py +++ b/packages/syft/tests/syft/request/request_stash_test.py @@ -1,28 +1,22 @@ # third party -import pytest -from pytest import MonkeyPatch -from result import Err # syft absolute from syft.client.client import SyftClient -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftVerifyKey from syft.service.context import AuthedServiceContext from syft.service.request.request import Request from syft.service.request.request import SubmitRequest from syft.service.request.request_stash import RequestStash -from syft.service.request.request_stash import RequestingUserVerifyKeyPartitionKey -from syft.store.document_store import PartitionKey -from syft.store.document_store import QueryKeys def test_requeststash_get_all_for_verify_key_no_requests( root_verify_key, request_stash: RequestStash, - guest_domain_client: SyftClient, + guest_datasite_client: SyftClient, ) -> None: # test when there are no requests from a client - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key requests = request_stash.get_all_for_verify_key( root_verify_key, verify_key=verify_key ) @@ -30,21 +24,20 @@ def test_requeststash_get_all_for_verify_key_no_requests( assert len(requests.ok()) == 0 -@pytest.mark.xfail def test_requeststash_get_all_for_verify_key_success( root_verify_key, request_stash: RequestStash, - guest_domain_client: SyftClient, - authed_context_guest_domain_client: AuthedServiceContext, + guest_datasite_client: SyftClient, + authed_context_guest_datasite_client: AuthedServiceContext, ) -> None: # test when there is one request submit_request: SubmitRequest = SubmitRequest(changes=[]) stash_set_result = request_stash.set( root_verify_key, - submit_request.to(Request, context=authed_context_guest_domain_client), + submit_request.to(Request, context=authed_context_guest_datasite_client), ) - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key requests = request_stash.get_all_for_verify_key( credentials=root_verify_key, verify_key=verify_key, @@ -58,7 +51,7 @@ def test_requeststash_get_all_for_verify_key_success( submit_request_2: SubmitRequest = SubmitRequest(changes=[]) stash_set_result_2 = request_stash.set( root_verify_key, - submit_request_2.to(Request, context=authed_context_guest_domain_client), + submit_request_2.to(Request, context=authed_context_guest_datasite_client), ) requests = request_stash.get_all_for_verify_key( @@ -74,57 +67,3 @@ def test_requeststash_get_all_for_verify_key_success( requests.ok()[1] == stash_set_result_2.ok() or requests.ok()[0] == stash_set_result_2.ok() ) - - -def test_requeststash_get_all_for_verify_key_fail( - root_verify_key, - request_stash: RequestStash, - monkeypatch: MonkeyPatch, - guest_domain_client: SyftClient, -) -> None: - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key - mock_error_message = ( - "verify key not in the document store's unique or searchable keys" - ) - - def mock_query_all_error( - credentials: SyftVerifyKey, qks: QueryKeys, order_by: PartitionKey | None - ) -> Err: - return Err(mock_error_message) - - monkeypatch.setattr(request_stash, "query_all", mock_query_all_error) - - requests = request_stash.get_all_for_verify_key(root_verify_key, verify_key) - - assert requests.is_err() is True - assert requests.err() == mock_error_message - - -def test_requeststash_get_all_for_verify_key_find_index_fail( - root_verify_key, - request_stash: RequestStash, - monkeypatch: MonkeyPatch, - guest_domain_client: SyftClient, -) -> None: - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key - qks = QueryKeys(qks=[RequestingUserVerifyKeyPartitionKey.with_obj(verify_key)]) - - mock_error_message = f"Failed to query index or search with {qks.all[0]}" - - def mock_find_index_or_search_keys_error( - credentials: SyftVerifyKey, - index_qks: QueryKeys, - search_qks: QueryKeys, - order_by: PartitionKey | None, - ) -> Err: - return Err(mock_error_message) - - monkeypatch.setattr( - request_stash.partition, - "find_index_or_search_keys", - mock_find_index_or_search_keys_error, - ) - - requests = request_stash.get_all_for_verify_key(root_verify_key, verify_key) - assert requests.is_err() is True - assert requests.err() == mock_error_message diff --git a/packages/syft/tests/syft/serde/numpy_functions_test.py b/packages/syft/tests/syft/serde/numpy_functions_test.py index 7def84d128c..14a858f101e 100644 --- a/packages/syft/tests/syft/serde/numpy_functions_test.py +++ b/packages/syft/tests/syft/serde/numpy_functions_test.py @@ -4,7 +4,6 @@ # syft absolute from syft import ActionObject -from syft.service.response import SyftAttributeError # relative from ...utils.custom_markers import FAIL_ON_PYTHON_3_12_REASON @@ -92,7 +91,7 @@ def test_numpy_functions(func, func_arguments, request): except Exception as e: assert ( - e == SyftAttributeError + e == AttributeError ), f"Can not evaluate {func}({func_arguments}) with {e}" print(e) else: diff --git a/packages/syft/tests/syft/serializable_test.py b/packages/syft/tests/syft/serializable_test.py index 6f84f7afde1..154589c2190 100644 --- a/packages/syft/tests/syft/serializable_test.py +++ b/packages/syft/tests/syft/serializable_test.py @@ -21,7 +21,11 @@ class AbstractBase: uid: str -@serializable(attrs=["uid", "value"]) +@serializable( + attrs=["uid", "value"], + canonical_name="Base", + version=1, +) class Base(AbstractBase): """Serialize: uid, value""" @@ -32,7 +36,11 @@ def __init__(self, uid: str, value: int): self.value = value -@serializable(attrs=["status"]) +@serializable( + attrs=["status"], + canonical_name="Derived", + version=1, +) class Derived(Base): """Serialize: uid, value, status""" @@ -43,7 +51,12 @@ def __init__(self, uid: str, value: int, status: int) -> None: self.status = status -@serializable(attrs=["status"], without=["uid"]) +@serializable( + attrs=["status"], + without=["uid"], + canonical_name="DerivedWithoutAttrs", + version=1, +) class DerivedWithoutAttrs(Base): """Serialize: value, status""" @@ -54,7 +67,12 @@ def __init__(self, uid: str, value: int, status: int) -> None: self.status = status -@serializable(attrs=["status"], inherit=False) +@serializable( + attrs=["status"], + inherit=False, + canonical_name="DerivedNoInherit", + version=1, +) class DerivedNoInherit(Base): """Serialize: status""" @@ -65,7 +83,12 @@ def __init__(self, uid: str, value: int, status: int) -> None: self.status = status -@serializable(attrs=["uid", "value"], inheritable=False) +@serializable( + attrs=["uid", "value"], + inheritable=False, + canonical_name="BaseAttrsNonInheritable", + version=1, +) class BaseAttrsNonInheritable(AbstractBase): """Serialize: uid, value (Derived cannot inherit base attrs)""" @@ -76,7 +99,11 @@ def __init__(self, uid: str = None, value: int = None): self.value = value -@serializable(attrs=["status"]) +@serializable( + attrs=["status"], + canonical_name="DerivedWithoutBaseAttrs", + version=1, +) class DerivedWithoutBaseAttrs(BaseAttrsNonInheritable): """Serialize: status (Dervied cannot inherit base attrs)""" @@ -168,7 +195,10 @@ def test_derived_without_base_attrs(): # ------------------------------ Pydantic classes ------------------------------ -@serializable() +@serializable( + canonical_name="PydBase", + version=1, +) class PydBase(BaseModel): """Serialize: uid, value, flag""" @@ -177,7 +207,10 @@ class PydBase(BaseModel): flag: bool | None = None -@serializable() +@serializable( + canonical_name="PydDerived", + version=1, +) class PydDerived(PydBase): """Serialize: uid, value, flag, source, target""" @@ -185,7 +218,11 @@ class PydDerived(PydBase): target: str -@serializable(without=["uid"]) +@serializable( + without=["uid"], + canonical_name="PydDerivedWithoutAttr", + version=1, +) class PydDerivedWithoutAttr(PydBase): """ Serialize: value, flag, source, target @@ -196,7 +233,11 @@ class PydDerivedWithoutAttr(PydBase): target: str -@serializable(without=["uid", "flag", "config"]) +@serializable( + without=["uid", "flag", "config"], + canonical_name="PydDerivedWithoutAttrs", + version=1, +) class PydDerivedWithoutAttrs(PydBase): """ Serialize: value, source, target @@ -208,7 +249,11 @@ class PydDerivedWithoutAttrs(PydBase): config: dict | None = None -@serializable(attrs=["source", "target"]) +@serializable( + attrs=["source", "target"], + canonical_name="PydDerivedOnly", + version=1, +) class PydDerivedOnly(PydBase): """ Serialize: source, target diff --git a/packages/syft/tests/syft/server_url_test.py b/packages/syft/tests/syft/server_url_test.py new file mode 100644 index 00000000000..c23670f9f12 --- /dev/null +++ b/packages/syft/tests/syft/server_url_test.py @@ -0,0 +1,27 @@ +# third party +import pytest + +# syft absolute +from syft.types.server_url import ServerURL + +test_suite = [ + ("http://0.0.0.0", 8081, "http://0.0.0.0:8081"), + ("http://0.0.0.0", None, "http://0.0.0.0:80"), + (None, None, "http://localhost:80"), + ("http://0.0.0.0:8081", 8082, "http://0.0.0.0:8081"), + ("0.0.0.0:8081", None, "http://0.0.0.0:8081"), + ("example.com", None, "http://example.com:80"), + ("https://example.com", None, "https://example.com:80"), +] + + +@pytest.mark.parametrize("url, port, ground_truth", test_suite) +def test_server_url(url, port, ground_truth) -> None: + if not url and not port: + assert ServerURL().base_url == ground_truth + elif not url: + assert ServerURL(port=port).base_url == ground_truth + elif not port: + assert ServerURL(host_or_ip=url).base_url == ground_truth + else: + assert ServerURL(host_or_ip=url, port=port).base_url == ground_truth diff --git a/packages/syft/tests/syft/service/action/action_object_test.py b/packages/syft/tests/syft/service/action/action_object_test.py index fa8efab4eaf..a57a61e1509 100644 --- a/packages/syft/tests/syft/service/action/action_object_test.py +++ b/packages/syft/tests/syft/service/action/action_object_test.py @@ -20,9 +20,12 @@ from syft.service.action.action_object import HOOK_ON_POINTERS from syft.service.action.action_object import PreHookContext from syft.service.action.action_object import make_action_side_effect -from syft.service.action.action_object import propagate_node_uid +from syft.service.action.action_object import propagate_server_uid from syft.service.action.action_object import send_action_side_effect from syft.service.action.action_types import action_type_for_type +from syft.service.response import SyftSuccess +from syft.store.blob_storage import SyftObjectRetrieval +from syft.types.errors import SyftException from syft.types.uid import LineageID from syft.types.uid import UID @@ -32,9 +35,9 @@ def helper_make_action_obj(orig_obj: Any): def helper_make_action_pointers(worker, obj, *args, **kwargs): - root_domain_client = worker.root_client - root_domain_client.api.services.action.set(obj) - obj_pointer = root_domain_client.api.services.action.get_pointer(obj.id) + root_datasite_client = worker.root_client + res = obj.send(root_datasite_client) + obj_pointer = root_datasite_client.api.services.action.get_pointer(res.id) # The args and kwargs should automatically be pointerized by obj_pointer return obj_pointer, args, kwargs @@ -160,10 +163,27 @@ def test_actionobject_hooks_init(orig_obj: Any): assert HOOK_ALWAYS in obj.syft_pre_hooks__ assert HOOK_ALWAYS in obj.syft_post_hooks__ + assert HOOK_ON_POINTERS in obj.syft_pre_hooks__ + assert HOOK_ON_POINTERS in obj.syft_post_hooks__ + + assert make_action_side_effect in obj.syft_pre_hooks__[HOOK_ALWAYS] + + +def test_actionobject_add_pre_hooks(): + # Eager execution is disabled by default + obj = ActionObject.from_obj(1) + + assert make_action_side_effect in obj.syft_pre_hooks__[HOOK_ALWAYS] + assert send_action_side_effect not in obj.syft_pre_hooks__[HOOK_ON_POINTERS] + assert propagate_server_uid not in obj.syft_post_hooks__[HOOK_ALWAYS] + + # eager exec tests: + obj._syft_add_pre_hooks__(eager_execution=True) + obj._syft_add_post_hooks__(eager_execution=True) assert make_action_side_effect in obj.syft_pre_hooks__[HOOK_ALWAYS] assert send_action_side_effect in obj.syft_pre_hooks__[HOOK_ON_POINTERS] - assert propagate_node_uid in obj.syft_post_hooks__[HOOK_ALWAYS] + assert propagate_server_uid in obj.syft_post_hooks__[HOOK_ALWAYS] @pytest.mark.parametrize( @@ -188,10 +208,7 @@ def test_actionobject_hooks_make_action_side_effect(orig_obj_op: Any): obj = ActionObject.from_obj(orig_obj) context = PreHookContext(obj=obj, op_name=op) - result = make_action_side_effect(context) - assert result.is_ok() - - context, args, kwargs = result.ok() + context, args, kwargs = make_action_side_effect(context).unwrap() assert context.action is not None assert isinstance(context.action, Action) assert context.action.full_path.endswith("." + op) @@ -234,12 +251,12 @@ def test_actionobject_hooks_send_action_side_effect_err_invalid_args(worker): ], ) def test_actionobject_hooks_send_action_side_effect_ignore_op( - root_domain_client, orig_obj_op + root_datasite_client, orig_obj_op ): orig_obj, op, args, kwargs = orig_obj_op obj = helper_make_action_obj(orig_obj) - obj = obj.send(root_domain_client) + obj = obj.send(root_datasite_client) context = PreHookContext(obj=obj, op_name=op) result = send_action_side_effect(context, *args, **kwargs) @@ -282,18 +299,18 @@ def test_actionobject_hooks_send_action_side_effect_ok(worker, orig_obj_op): assert context.result_id is not None -def test_actionobject_hooks_propagate_node_uid_err(): +def test_actionobject_hooks_propagate_server_uid_err(): orig_obj = "abc" op = "capitalize" obj = ActionObject.from_obj(orig_obj) context = PreHookContext(obj=obj, op_name=op) - result = propagate_node_uid(context, op=op, result="orig_obj") + result = propagate_server_uid(context, op=op, result="orig_obj") assert result.is_err() -def test_actionobject_hooks_propagate_node_uid_ok(): +def test_actionobject_hooks_propagate_server_uid_ok(): orig_obj = "abc" op = "capitalize" @@ -303,7 +320,7 @@ def test_actionobject_hooks_propagate_node_uid_ok(): obj.syft_point_to(obj_id) context = PreHookContext(obj=obj, op_name=op) - result = propagate_node_uid(context, op=op, result="orig_obj") + result = propagate_server_uid(context, op=op, result="orig_obj") assert result.is_ok() @@ -315,7 +332,7 @@ def test_actionobject_syft_point_to(): obj.syft_point_to(obj_id) - assert obj.syft_node_uid == obj_id + assert obj.syft_server_uid == obj_id @pytest.mark.parametrize( @@ -342,8 +359,9 @@ def test_actionobject_syft_execute_ok(worker, testcase): ) context = PreHookContext(obj=obj_pointer, op_name=op, action_type=ActionType.METHOD) - result = make_action_side_effect(context, *args_pointers, **kwargs_pointers) - context, _, _ = result.ok() + context, _, _ = make_action_side_effect( + context, *args_pointers, **kwargs_pointers + ).unwrap() action_result = context.obj.syft_execute_action(context.action, sync=True) assert action_result == expected @@ -486,17 +504,17 @@ def test_actionobject_syft_get_path(testcase): ], ) def test_actionobject_syft_send_get(worker, testcase): - root_domain_client = worker.root_client - root_domain_client._fetch_api(root_domain_client.credentials) - action_store = worker.get_service("actionservice").store + root_datasite_client = worker.root_client + root_datasite_client._fetch_api(root_datasite_client.credentials) + action_store = worker.services.action.stash orig_obj = testcase obj = helper_make_action_obj(orig_obj) - assert len(action_store.data) == 0 + assert len(action_store._data) == 0 - ptr = obj.send(root_domain_client) - assert len(action_store.data) == 1 + ptr = obj.send(root_datasite_client) + assert len(action_store._data) == 1 retrieved = ptr.get() assert obj.syft_action_data == retrieved @@ -564,8 +582,11 @@ def test_actionobject_syft_get_attr_context(): (complex(1, 2), "conjugate", [], {}, complex(1, -2)), ], ) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_actionobject_syft_execute_hooks(worker, testcase): client = worker.root_client + assert client.settings.enable_eager_execution(enable=True) + orig_obj, op, args, kwargs, expected = testcase obj = helper_make_action_obj(orig_obj) @@ -582,9 +603,9 @@ def test_actionobject_syft_execute_hooks(worker, testcase): ) assert context.result_id is not None - context.obj.syft_node_uid = UID() + context.obj.syft_server_uid = UID() result = obj_pointer._syft_run_post_hooks__(context, name=op, result=obj_pointer) - assert result.syft_node_uid == context.obj.syft_node_uid + assert result.syft_server_uid == context.obj.syft_server_uid @pytest.mark.parametrize( @@ -637,7 +658,7 @@ def test_actionobject_syft_wrap_attribute_for_properties(orig_obj): assert prop is not None assert isinstance(prop, ActionObject) assert hasattr(prop, "id") - assert hasattr(prop, "syft_node_uid") + assert hasattr(prop, "syft_server_uid") assert hasattr(prop, "syft_history_hash") @@ -918,7 +939,7 @@ def test_actionobject_syft_getattr_int(orig_obj: int, worker, scenario): assert (3 >> obj) == (3 >> orig_obj) -def test_actionobject_syft_getattr_int_history(worker): +def test_actionobject_syft_getattr_int_history(): orig_obj = 5 obj1 = ActionObject.from_obj(orig_obj) obj2 = ActionObject.from_obj(orig_obj) @@ -980,7 +1001,7 @@ def test_actionobject_syft_getattr_float_history(): @pytest.mark.skipif( - sys.platform != "linux", + sys.platform == "win32", reason="This is a hackish way to test attribute set/get, and it might fail on Windows or OSX", ) def test_actionobject_syft_getattr_np(worker): @@ -1004,3 +1025,35 @@ def test_actionobject_syft_getattr_pandas(worker): obj.columns = ["a", "b", "c"] assert (obj.columns == ["a", "b", "c"]).all() + + +def test_actionobject_delete(worker): + """ + Test deleting action objects and their corresponding blob storage entries + """ + root_client = worker.root_client + + # small object with no blob store entry + data_small = np.random.randint(0, 100, size=3) + action_obj = ActionObject.from_obj(data_small) + action_obj.send(root_client) + assert action_obj.syft_blob_storage_entry_id is None + del_res = root_client.api.services.action.delete(uid=action_obj.id) + assert isinstance(del_res, SyftSuccess) + + # big object with blob store entry + num_elements = 25 * 1024 * 1024 + data_big = np.random.randint(0, 100, size=num_elements) # 4 bytes per int32 + action_obj_2 = ActionObject.from_obj(data_big) + action_obj_2.send(root_client) + assert isinstance(action_obj_2.syft_blob_storage_entry_id, UID) + read_res = root_client.api.services.blob_storage.read( + action_obj_2.syft_blob_storage_entry_id + ) + assert isinstance(read_res, SyftObjectRetrieval) + del_res = root_client.api.services.action.delete(uid=action_obj_2.id) + assert isinstance(del_res, SyftSuccess) + with pytest.raises(SyftException): + read_res = root_client.api.services.blob_storage.read( + action_obj_2.syft_blob_storage_entry_id + ) diff --git a/packages/syft/tests/syft/service/action/action_service_test.py b/packages/syft/tests/syft/service/action/action_service_test.py index e4d9b663500..e97362a6340 100644 --- a/packages/syft/tests/syft/service/action/action_service_test.py +++ b/packages/syft/tests/syft/service/action/action_service_test.py @@ -10,16 +10,18 @@ def get_auth_ctx(worker): - return AuthedServiceContext(node=worker, credentials=worker.signing_key.verify_key) + return AuthedServiceContext( + server=worker, credentials=worker.signing_key.verify_key + ) def test_action_service_sanity(worker): - service = worker.get_service("actionservice") + service = worker.services.action + root_datasite_client = worker.root_client obj = ActionObject.from_obj("abc") + pointer = obj.send(root_datasite_client) - pointer = service.set(get_auth_ctx(worker), obj).ok() - - assert len(service.store.data) == 1 + assert len(service.stash._data) == 1 res = pointer.capitalize() assert res[0] == "A" diff --git a/packages/syft/tests/syft/service/dataset/dataset_service_test.py b/packages/syft/tests/syft/service/dataset/dataset_service_test.py index a60bc653c13..4d73e35fa6f 100644 --- a/packages/syft/tests/syft/service/dataset/dataset_service_test.py +++ b/packages/syft/tests/syft/service/dataset/dataset_service_test.py @@ -5,20 +5,22 @@ # third party import numpy as np +import pandas as pd from pydantic import ValidationError import pytest +import torch # syft absolute import syft as sy -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject +from syft.service.action.action_object import TwinMode +from syft.service.blob_storage.util import can_upload_to_blob_storage from syft.service.dataset.dataset import CreateAsset as Asset from syft.service.dataset.dataset import CreateDataset as Dataset from syft.service.dataset.dataset import _ASSET_WITH_NONE_MOCK_ERROR_MESSAGE -from syft.service.response import SyftError -from syft.service.response import SyftException from syft.service.response import SyftSuccess -from syft.types.twin_object import TwinMode +from syft.types.errors import SyftException def random_hash() -> str: @@ -125,10 +127,12 @@ def test_cannot_set_empty_mock_with_true_mock_is_real( asset = Asset(**asset_with_mock, mock_is_real=True) assert asset.mock_is_real - with pytest.raises(SyftException): + with pytest.raises(SyftException) as exc: asset.set_mock(empty_mock, mock_is_real=True) assert asset.mock is asset_with_mock["mock"] + assert exc.type == SyftException + assert exc.value.public_message def test_dataset_cannot_have_assets_with_none_mock() -> None: @@ -195,11 +199,11 @@ def test_guest_client_get_empty_mock_as_private_pointer( asset = Asset(**asset_with_empty_mock) dataset = Dataset(name=random_hash(), asset_list=[asset]) - root_domain_client = worker.root_client - root_domain_client.upload_dataset(dataset) + root_datasite_client = worker.root_client + root_datasite_client.upload_dataset(dataset) - guest_domain_client = root_domain_client.guest() - guest_datasets = guest_domain_client.api.services.dataset.get_all() + guest_datasite_client = root_datasite_client.guest() + guest_datasets = guest_datasite_client.api.services.dataset.get_all() guest_dataset = guest_datasets[0] mock = guest_dataset.assets[0].pointer @@ -209,16 +213,16 @@ def test_guest_client_get_empty_mock_as_private_pointer( assert mock.syft_twin_type is TwinMode.MOCK -def test_domain_client_cannot_upload_dataset_with_non_mock(worker: Worker) -> None: +def test_datasite_client_cannot_upload_dataset_with_non_mock(worker: Worker) -> None: assets = [Asset(**make_asset_with_mock()) for _ in range(10)] dataset = Dataset(name=random_hash(), asset_list=assets) dataset.asset_list[0].mock = None - root_domain_client = worker.root_client + root_datasite_client = worker.root_client with pytest.raises(ValueError) as excinfo: - root_domain_client.upload_dataset(dataset) + root_datasite_client.upload_dataset(dataset) assert _ASSET_WITH_NONE_MOCK_ERROR_MESSAGE in str(excinfo.value) @@ -230,12 +234,16 @@ def test_adding_contributors_with_duplicate_email(): res1 = dataset.add_contributor( role=sy.roles.UPLOADER, name="Alice", email="alice@naboo.net" ) - res2 = dataset.add_contributor( - role=sy.roles.UPLOADER, name="Alice Smith", email="alice@naboo.net" - ) assert isinstance(res1, SyftSuccess) - assert isinstance(res2, SyftError) + + with pytest.raises(SyftException) as exc: + dataset.add_contributor( + role=sy.roles.UPLOADER, name="Alice Smith", email="alice@naboo.net" + ) + assert exc.type == SyftException + assert exc.value.public_message + assert len(dataset.contributors) == 1 # Assets @@ -245,11 +253,134 @@ def test_adding_contributors_with_duplicate_email(): role=sy.roles.UPLOADER, name="Bob", email="bob@naboo.net" ) - res4 = asset.add_contributor( - role=sy.roles.UPLOADER, name="Bob Abraham", email="bob@naboo.net" - ) + assert isinstance(res3, SyftSuccess) + + with pytest.raises(SyftException) as exc: + asset.add_contributor( + role=sy.roles.UPLOADER, name="Bob Abraham", email="bob@naboo.net" + ) + + assert exc.type == SyftException + assert exc.value.public_message + dataset.add_asset(asset) - assert isinstance(res3, SyftSuccess) - assert isinstance(res4, SyftError) assert len(asset.contributors) == 1 + + +@pytest.fixture( + params=[ + 1, + "hello", + {"key": "value"}, + {1, 2, 3}, + np.array([1, 2, 3]), + pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}), + torch.Tensor([1, 2, 3]), + ] +) +def different_data_types( + request, +) -> int | str | dict | set | np.ndarray | pd.DataFrame | torch.Tensor: + return request.param + + +def test_upload_dataset_with_assets_of_different_data_types( + worker: Worker, + different_data_types: ( + int | str | dict | set | np.ndarray | pd.DataFrame | torch.Tensor + ), +) -> None: + asset = sy.Asset( + name=random_hash(), + data=different_data_types, + mock=different_data_types, + ) + dataset = Dataset(name=random_hash()) + dataset.add_asset(asset) + root_datasite_client = worker.root_client + res = root_datasite_client.upload_dataset(dataset) + assert isinstance(res, SyftSuccess) + assert len(root_datasite_client.api.services.dataset.get_all()) == 1 + assert type(root_datasite_client.datasets[0].assets[0].data) == type( + different_data_types + ) + assert type(root_datasite_client.datasets[0].assets[0].mock) == type( + different_data_types + ) + + +def test_delete_small_datasets(worker: Worker, small_dataset: Dataset) -> None: + root_client = worker.root_client + assert not can_upload_to_blob_storage(small_dataset, root_client.metadata).unwrap() + upload_res = root_client.upload_dataset(small_dataset) + assert isinstance(upload_res, SyftSuccess) + + dataset = root_client.api.services.dataset.get_all()[0] + asset = dataset.asset_list[0] + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + + # delete the dataset without deleting its assets + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=False + ) + assert isinstance(del_res, SyftSuccess) + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + assert len(root_client.api.services.dataset.get_all()) == 0 + # we can still get back the deleted dataset by uid + deleted_dataset = root_client.api.services.dataset.get_by_id(uid=dataset.id) + assert deleted_dataset.name == f"_deleted_{dataset.name}_{dataset.id}" + assert deleted_dataset.to_be_deleted + + # delete the dataset and its assets + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=True + ) + assert isinstance(del_res, SyftSuccess) + assert asset.data is None + with pytest.raises(SyftException): + print(asset.mock) + assert len(root_client.api.services.dataset.get_all()) == 0 + + +def test_delete_big_datasets(worker: Worker, big_dataset: Dataset) -> None: + root_client = worker.root_client + assert can_upload_to_blob_storage(big_dataset, root_client.metadata).unwrap() + upload_res = root_client.upload_dataset(big_dataset) + assert isinstance(upload_res, SyftSuccess) + + dataset = root_client.api.services.dataset.get_all()[0] + asset = dataset.asset_list[0] + + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + # test that the data is saved in the blob storage + assert len(root_client.api.services.blob_storage.get_all()) == 2 + + # delete the dataset without deleting its assets + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=False + ) + assert isinstance(del_res, SyftSuccess) + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + assert len(root_client.api.services.dataset.get_all()) == 0 + # we can still get back the deleted dataset by uid + deleted_dataset = root_client.api.services.dataset.get_by_id(uid=dataset.id) + assert deleted_dataset.name == f"_deleted_{dataset.name}_{dataset.id}" + assert deleted_dataset.to_be_deleted + # the dataset's blob entries are still there + assert len(root_client.api.services.blob_storage.get_all()) == 2 + + # delete the dataset + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=True + ) + assert isinstance(del_res, SyftSuccess) + assert asset.data is None + with pytest.raises(SyftException): + print(asset.mock) + assert len(root_client.api.services.blob_storage.get_all()) == 0 + assert len(root_client.api.services.dataset.get_all()) == 0 diff --git a/packages/syft/tests/syft/service/jobs/job_stash_test.py b/packages/syft/tests/syft/service/jobs/job_stash_test.py index 9d3f5a964aa..561ac1aebfd 100644 --- a/packages/syft/tests/syft/service/jobs/job_stash_test.py +++ b/packages/syft/tests/syft/service/jobs/job_stash_test.py @@ -1,13 +1,16 @@ # stdlib from datetime import datetime from datetime import timedelta +from datetime import timezone # third party import pytest # syft absolute +import syft as sy from syft.service.job.job_stash import Job from syft.service.job.job_stash import JobStatus +from syft.types.errors import SyftException from syft.types.uid import UID @@ -30,10 +33,10 @@ def test_eta_string(current_iter, n_iters, status, creation_time_delta, expected): job = Job( id=UID(), - node_uid=UID(), + server_uid=UID(), n_iters=n_iters, current_iter=current_iter, - creation_time=(datetime.now() - creation_time_delta).isoformat(), + creation_time=(datetime.now(tz=timezone.utc) - creation_time_delta).isoformat(), status=status, ) @@ -43,3 +46,19 @@ def test_eta_string(current_iter, n_iters, status, creation_time_delta, expected assert job.eta_string is not None assert isinstance(job.eta_string, str) assert expected in job.eta_string + + +def test_job_no_consumer(worker): + client = worker.root_client + ds_client = worker.guest_client + + @sy.syft_function_single_use() + def process_all(): ... + + _ = ds_client.code.request_code_execution(process_all) + job = client.code.process_all(blocking=False) + + with pytest.raises(SyftException) as exc: + job.wait() + + assert "has no workers" in exc.value.public_message diff --git a/packages/syft/tests/syft/service/policy/policy_test.py b/packages/syft/tests/syft/service/policy/policy_test.py new file mode 100644 index 00000000000..52909c9e206 --- /dev/null +++ b/packages/syft/tests/syft/service/policy/policy_test.py @@ -0,0 +1,146 @@ +# third party +import pytest + +# syft absolute +from syft import Asset +from syft import Constant +from syft import Dataset +from syft import MixedInputPolicy +from syft import syft_function +from syft.client.api import AuthedServiceContext +from syft.service.user.user_roles import ServiceRole +from syft.types.errors import SyftException + + +@pytest.fixture +def submit_code_with_constants_only(ds_client, worker): + input_policy = MixedInputPolicy( + endpoint=Constant(val="TEST ENDPOINT"), + query=Constant(val="TEST QUERY"), + client=ds_client, + ) + + @syft_function( + input_policy=input_policy, + ) + def test_func(): + return 1 + + admin_client = worker.root_client + + ds_client.code.submit(test_func) + + user_code = admin_client.api.services.code[0] + + yield user_code + + +@pytest.fixture +def submit_code_with_mixed_inputs(ds_client, worker): + admin_client = worker.root_client + ds = Dataset(name="test", asset_list=[Asset(name="test", data=[1, 2], mock=[2, 3])]) + + admin_client.upload_dataset(ds) + + asset = ds_client.datasets[0].assets[0] + + mix_input_policy = MixedInputPolicy( + data=asset, + endpoint=Constant(val="TEST ENDPOINT"), + query=Constant(val="TEST QUERY"), + client=ds_client, + ) + + @syft_function( + input_policy=mix_input_policy, + ) + def test_func_data(data, test_basic_python_type): + return data + + admin_client = worker.root_client + + ds_client.code.submit(test_func_data) + + user_code = admin_client.api.services.code[0] + + yield user_code + + +class TestMixedInputPolicy: + def test_constants_not_required(self, submit_code_with_constants_only): + user_code = submit_code_with_constants_only + + policy = user_code.input_policy + + assert policy.is_valid(context=None, usr_input_kwargs={}) + + def test_providing_constants_valid(self, submit_code_with_constants_only): + user_code = submit_code_with_constants_only + + policy = user_code.input_policy + + assert policy.is_valid( + context=None, + usr_input_kwargs={"endpoint": "TEST ENDPOINT", "query": "TEST QUERY"}, + ) + + def test_constant_vals_can_be_retrieved_by_admin( + self, submit_code_with_constants_only + ): + user_code = submit_code_with_constants_only + + policy = user_code.input_policy + + mapped_inputs = {k: v.val for k, v in list(policy.inputs.values())[0].items()} + + assert mapped_inputs == {"endpoint": "TEST ENDPOINT", "query": "TEST QUERY"} + + def test_mixed_inputs_invalid_without_same_ds(self, submit_code_with_mixed_inputs): + user_code = submit_code_with_mixed_inputs + + policy = user_code.input_policy + + with pytest.raises(SyftException): + policy.is_valid(context=None, usr_input_kwargs={}) + + def test_mixed_inputs_valid_with_same_asset( + self, worker, ds_client, submit_code_with_mixed_inputs + ): + user_code = submit_code_with_mixed_inputs + + policy = user_code.input_policy + + asset = ds_client.datasets[0].assets[0] + ds_context = AuthedServiceContext( + server=worker, + credentials=ds_client.verify_key, + role=ServiceRole.DATA_SCIENTIST, + ) + assert policy.is_valid( + context=ds_context, usr_input_kwargs={"data": asset.action_id} + ) + + def test_mixed_inputs_invalid_with_different_asset_raises( + self, worker, ds_client, submit_code_with_mixed_inputs + ): + admin_client = worker.root_client + + ds = Dataset( + name="different ds", + asset_list=[Asset(name="different asset", data=[1, 2], mock=[2, 3])], + ) + admin_client.upload_dataset(ds) + user_code = submit_code_with_mixed_inputs + + policy = user_code.input_policy + + asset = ds_client.datasets["different ds"].assets[0] + ds_context = AuthedServiceContext( + server=worker, + credentials=ds_client.verify_key, + role=ServiceRole.DATA_SCIENTIST, + ) + with pytest.raises(SyftException): + policy.is_valid( + context=ds_context, usr_input_kwargs={"data": asset.action_id} + ) diff --git a/packages/syft/tests/syft/service/sync/get_set_object_test.py b/packages/syft/tests/syft/service/sync/get_set_object_test.py new file mode 100644 index 00000000000..e6681dc621f --- /dev/null +++ b/packages/syft/tests/syft/service/sync/get_set_object_test.py @@ -0,0 +1,57 @@ +# third party + +# syft absolute +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.service.action.action_object import ActionObject +from syft.service.dataset.dataset import Dataset + + +def get_ds_client(client: DatasiteClient) -> DatasiteClient: + client.register( + name="a", + email="a@a.com", + password="asdf", + password_verify="asdf", + ) + return client.login(email="a@a.com", password="asdf") + + +def test_get_set_object(high_worker): + high_client: DatasiteClient = high_worker.root_client + _ = get_ds_client(high_client) + root_datasite_client = high_worker.root_client + dataset = sy.Dataset( + name="local_test", + asset_list=[ + sy.Asset( + name="local_test", + data=[1, 2, 3], + mock=[1, 1, 1], + ) + ], + ) + root_datasite_client.upload_dataset(dataset) + dataset = root_datasite_client.datasets[0] + + other_dataset = high_client.api.services.migration._get_object( + uid=dataset.id, object_type=Dataset + ) + other_dataset.server_uid = dataset.server_uid + assert dataset == other_dataset + other_dataset.name = "new_name" + updated_dataset = high_client.api.services.migration._update_object( + object=other_dataset + ) + assert updated_dataset.name == "new_name" + + asset = root_datasite_client.datasets[0].assets[0] + source_ao = high_client.api.services.action.get(uid=asset.action_id) + ao = high_client.api.services.migration._get_object( + uid=asset.action_id, object_type=ActionObject + ) + ao._set_obj_location_( + high_worker.id, + root_datasite_client.credentials, + ) + assert source_ao == ao diff --git a/packages/syft/tests/syft/service/sync/sync_flow_test.py b/packages/syft/tests/syft/service/sync/sync_flow_test.py deleted file mode 100644 index 61a662049d4..00000000000 --- a/packages/syft/tests/syft/service/sync/sync_flow_test.py +++ /dev/null @@ -1,383 +0,0 @@ -# stdlib -import sys -from textwrap import dedent - -# third party -import numpy as np -import pytest - -# syft absolute -import syft as sy -from syft.abstract_node import NodeSideType -from syft.client.syncing import compare_states -from syft.client.syncing import resolve -from syft.service.action.action_object import ActionObject -from syft.service.response import SyftError - - -@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -# @pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sync_flow(): - # somehow skipif does not work - if sys.platform == "win32": - return - low_worker = sy.Worker( - name="low-test", - local_db=True, - n_consumers=1, - create_producer=True, - node_side_type=NodeSideType.LOW_SIDE, - queue_port=None, - in_memory_workers=True, - ) - high_worker = sy.Worker( - name="high-test", - local_db=True, - n_consumers=1, - create_producer=True, - node_side_type=NodeSideType.HIGH_SIDE, - queue_port=None, - in_memory_workers=True, - ) - - low_client = low_worker.root_client - high_client = high_worker.root_client - - low_client.register( - email="newuser@openmined.org", - name="John Doe", - password="pw", - password_verify="pw", - ) - client_low_ds = low_worker.guest_client - - mock_high = np.array([10, 11, 12, 13, 14]) - private_high = np.array([15, 16, 17, 18, 19]) - - dataset_high = sy.Dataset( - name="my-dataset", - description="abc", - asset_list=[ - sy.Asset( - name="numpy-data", - mock=mock_high, - data=private_high, - shape=private_high.shape, - mock_is_real=True, - ) - ], - ) - - high_client.upload_dataset(dataset_high) - mock_low = np.array([0, 1, 2, 3, 4]) # do_high.mock - - dataset_low = sy.Dataset( - id=dataset_high.id, - name="my-dataset", - description="abc", - asset_list=[ - sy.Asset( - name="numpy-data", - mock=mock_low, - data=ActionObject.empty(data_node_id=high_client.id), - shape=mock_low.shape, - mock_is_real=True, - ) - ], - ) - - res = low_client.upload_dataset(dataset_low) - - data_low = client_low_ds.datasets[0].assets[0] - - @sy.syft_function_single_use(data=data_low) - def compute_mean(data) -> float: - return data.mean() - - compute_mean.code = dedent(compute_mean.code) - - res = client_low_ds.code.request_code_execution(compute_mean) - print(res) - print("LOW CODE:", low_client.code.get_all()) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - - print(low_state.objects, high_state.objects) - - diff_state = compare_states(low_state, high_state) - low_items_to_sync, high_items_to_sync = resolve( - diff_state, decision="low", share_private_objects=True - ) - - print(low_items_to_sync, high_items_to_sync) - - low_client.apply_state(low_items_to_sync) - - high_client.apply_state(high_items_to_sync) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - - diff_state = compare_states(low_state, high_state) - - high_client._fetch_api(high_client.credentials) - - data_high = high_client.datasets[0].assets[0] - - print(high_client.code.get_all()) - job_high = high_client.code.compute_mean(data=data_high, blocking=False) - print("Waiting for job...") - job_high.wait(timeout=60) - job_high.result.get() - - # syft absolute - from syft.service.request.request import Request - - request: Request = high_client.requests[0] - job_info = job_high.info(public_metadata=True, result=True) - - print(request.syft_client_verify_key, request.syft_node_location) - print(request.code.syft_client_verify_key, request.code.syft_node_location) - request.accept_by_depositing_result(job_info) - - request = high_client.requests[0] - code = request.code - job_high._get_log_objs() - - action_store_high = high_worker.get_service("actionservice").store - blob_store_high = high_worker.get_service("blobstorageservice").stash.partition - assert ( - f"{client_low_ds.verify_key}_READ" - in action_store_high.permissions[job_high.result.id.id] - ) - assert ( - f"{client_low_ds.verify_key}_READ" - in blob_store_high.permissions[job_high.result.syft_blob_storage_entry_id] - ) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - - diff_state_2 = compare_states(low_state, high_state) - - low_items_to_sync, high_items_to_sync = resolve( - diff_state_2, decision="high", share_private_objects=True - ) - for diff in diff_state_2.diffs: - print(diff.status, diff.object_type) - low_client.apply_state(low_items_to_sync) - - action_store_low = low_worker.get_service("actionservice").store - blob_store_low = low_worker.get_service("blobstorageservice").stash.partition - assert ( - f"{client_low_ds.verify_key}_READ" - in action_store_low.permissions[job_high.result.id.id] - ) - assert ( - f"{client_low_ds.verify_key}_READ" - in blob_store_low.permissions[job_high.result.syft_blob_storage_entry_id] - ) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - res_low = client_low_ds.code.compute_mean(data=data_low) - print("Res Low", res_low) - - assert res_low.get() == private_high.mean() - - assert ( - res_low.id.id - == job_high.result.id.id - == code.output_history[-1].outputs[0].id.id - ) - assert ( - job_high.result.syft_blob_storage_entry_id == res_low.syft_blob_storage_entry_id - ) - - job_low = client_low_ds.code.compute_mean(data=data_low, blocking=False) - - assert job_low.id == job_high.id - assert job_low.result.id == job_high.result.id - assert ( - job_low.result.syft_blob_storage_entry_id - == job_high.result.syft_blob_storage_entry_id - ) - low_worker.cleanup() - high_worker.cleanup() - - -@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sync_flow_no_sharing(): - # somehow skipif does not work - if sys.platform == "win32": - return - low_worker = sy.Worker( - name="low-test-2", - local_db=True, - n_consumers=1, - create_producer=True, - node_side_type=NodeSideType.LOW_SIDE, - queue_port=None, - in_memory_workers=True, - ) - high_worker = sy.Worker( - name="high-test-2", - local_db=True, - n_consumers=1, - create_producer=True, - node_side_type=NodeSideType.HIGH_SIDE, - queue_port=None, - in_memory_workers=True, - ) - - low_client = low_worker.root_client - high_client = high_worker.root_client - - low_client.register( - email="newuser@openmined.org", - name="John Doe", - password="pw", - password_verify="pw", - ) - client_low_ds = low_worker.guest_client - - mock_high = np.array([10, 11, 12, 13, 14]) - private_high = np.array([15, 16, 17, 18, 19]) - - dataset_high = sy.Dataset( - name="my-dataset", - description="abc", - asset_list=[ - sy.Asset( - name="numpy-data", - mock=mock_high, - data=private_high, - shape=private_high.shape, - mock_is_real=True, - ) - ], - ) - - high_client.upload_dataset(dataset_high) - mock_low = np.array([0, 1, 2, 3, 4]) # do_high.mock - - dataset_low = sy.Dataset( - id=dataset_high.id, - name="my-dataset", - description="abc", - asset_list=[ - sy.Asset( - name="numpy-data", - mock=mock_low, - data=ActionObject.empty(data_node_id=high_client.id), - shape=mock_low.shape, - mock_is_real=True, - ) - ], - ) - - res = low_client.upload_dataset(dataset_low) - - data_low = client_low_ds.datasets[0].assets[0] - - @sy.syft_function_single_use(data=data_low) - def compute_mean(data) -> float: - return data.mean() - - compute_mean.code = dedent(compute_mean.code) - - res = client_low_ds.code.request_code_execution(compute_mean) - print(res) - print("LOW CODE:", low_client.code.get_all()) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - - print(low_state.objects, high_state.objects) - - diff_state = compare_states(low_state, high_state) - low_items_to_sync, high_items_to_sync = resolve( - diff_state, decision="low", share_private_objects=True - ) - - print(low_items_to_sync, high_items_to_sync) - - low_client.apply_state(low_items_to_sync) - - high_client.apply_state(high_items_to_sync) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - - diff_state = compare_states(low_state, high_state) - - high_client._fetch_api(high_client.credentials) - - data_high = high_client.datasets[0].assets[0] - - print(high_client.code.get_all()) - job_high = high_client.code.compute_mean(data=data_high, blocking=False) - print("Waiting for job...") - job_high.wait(timeout=60) - job_high.result.get() - - # syft absolute - from syft.service.request.request import Request - - request: Request = high_client.requests[0] - job_info = job_high.info(public_metadata=True, result=True) - - print(request.syft_client_verify_key, request.syft_node_location) - print(request.code.syft_client_verify_key, request.code.syft_node_location) - request.accept_by_depositing_result(job_info) - - request = high_client.requests[0] - job_high._get_log_objs() - - action_store_high = high_worker.get_service("actionservice").store - blob_store_high = high_worker.get_service("blobstorageservice").stash.partition - assert ( - f"{client_low_ds.verify_key}_READ" - in action_store_high.permissions[job_high.result.id.id] - ) - assert ( - f"{client_low_ds.verify_key}_READ" - in blob_store_high.permissions[job_high.result.syft_blob_storage_entry_id] - ) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - - diff_state_2 = compare_states(low_state, high_state) - - low_items_to_sync, high_items_to_sync = resolve( - diff_state_2, decision="high", share_private_objects=False, ask_for_input=False - ) - for diff in diff_state_2.diffs: - print(diff.status, diff.object_type) - low_client.apply_state(low_items_to_sync) - - low_state = low_client.get_sync_state() - high_state = high_client.get_sync_state() - res_low = client_low_ds.code.compute_mean(data=data_low) - assert isinstance(res_low, SyftError) - assert ( - res_low.message - == f"Permission: [READ: {job_high.result.id.id} as {client_low_ds.verify_key}] denied" - ) - - job_low = client_low_ds.code.compute_mean(data=data_low, blocking=False) - - assert job_low.id == job_high.id - assert job_low.result.id == job_high.result.id - result = job_low.result.get() - assert isinstance(result, SyftError) - assert ( - result.message - == f"Permission: [READ: {job_high.result.id.id} as {client_low_ds.verify_key}] denied" - ) - - low_worker.cleanup() - high_worker.cleanup() diff --git a/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py b/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py new file mode 100644 index 00000000000..9b91142be62 --- /dev/null +++ b/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py @@ -0,0 +1,447 @@ +# third party +import numpy as np +import pytest + +# syft absolute +import syft +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.client.sync_decision import SyncDecision +from syft.client.syncing import compare_clients +from syft.client.syncing import resolve +from syft.server.worker import Worker +from syft.service.code.user_code import ApprovalDecision +from syft.service.code.user_code import UserCodeStatus +from syft.service.job.job_stash import Job +from syft.service.request.request import RequestStatus +from syft.service.response import SyftSuccess +from syft.service.sync.resolve_widget import ResolveWidget +from syft.types.errors import SyftException + + +def handle_decision(widget: ResolveWidget, decision: SyncDecision) -> SyftSuccess: + if decision == SyncDecision.IGNORE: + # ignore not yet implemented on the widget + return widget.obj_diff_batch.ignore() + elif decision in [SyncDecision.LOW, SyncDecision.HIGH]: + return widget.click_sync() + elif decision == SyncDecision.SKIP: + # Skip is no-op + return SyftSuccess(message="skipped") + else: + raise ValueError(f"Unknown decision {decision}") + + +def compare_and_resolve( + *, + from_client: DatasiteClient, + to_client: DatasiteClient, + decision: SyncDecision = SyncDecision.LOW, + decision_callback: callable = None, + share_private_data: bool = True, +): + diff_state_before = compare_clients(from_client, to_client) + for obj_diff_batch in diff_state_before.active_batches: + widget = resolve( + obj_diff_batch, + ) + if decision_callback: + decision = decision_callback(obj_diff_batch) + if share_private_data: + widget.click_share_all_private_data() + res = handle_decision(widget, decision) + assert isinstance(res, SyftSuccess) + from_client.refresh() + to_client.refresh() + diff_state_after = compare_clients(from_client, to_client) + return diff_state_before, diff_state_after + + +def run_and_deposit_result(client): + result = client.code.compute(blocking=True) + job = client.requests[0].deposit_result(result) + return job + + +def create_dataset(client, _id: sy.UID | None = None): + mock = np.random.random(5) + private = np.random.random(5) + + dataset = sy.Dataset( + name=sy.util.util.random_name().lower(), + description="Lorem ipsum dolor sit amet, consectetur adipiscing elit", + asset_list=[ + sy.Asset( + name="numpy-data", + mock=mock, + data=private, + shape=private.shape, + mock_is_real=True, + ) + ], + ) + if _id is not None: + dataset.id = _id + + client.upload_dataset(dataset) + return dataset + + +@syft.syft_function_single_use() +def compute() -> int: + return 42 + + +def get_ds_client(client: DatasiteClient) -> DatasiteClient: + client.register( + name="a", + email="a@a.com", + password="asdf", + password_verify="asdf", + ) + return client.login(email="a@a.com", password="asdf") + + +def test_diff_state(low_worker, high_worker): + low_client: DatasiteClient = low_worker.root_client + client_low_ds = get_ds_client(low_client) + high_client: DatasiteClient = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + _ = client_low_ds.code.request_code_execution(compute) + + diff_state_before, diff_state_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_state_before.is_same + + assert diff_state_after.is_same + + run_and_deposit_result(high_client) + diff_state_before, diff_state_after = compare_and_resolve( + from_client=high_client, to_client=low_client + ) + + # high_state = high_client.get_sync_state() + # low_state = high_client.get_sync_state() + # assert high_state.get_previous_state_diff().is_same + # assert low_state.get_previous_state_diff().is_same + assert diff_state_after.is_same + + client_low_ds.refresh() + + # this result comes from the cache + res = client_low_ds.code.compute(blocking=True) + assert res.get() == 42 + assert res.get() == compute(syft_no_server=True) + + +def test_skip_deletion(low_worker, high_worker): + low_client: DatasiteClient = low_worker.root_client + high_client: DatasiteClient = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + _ = low_client.code.request_code_execution(compute) + + w = sy.sync(high_client, low_client) + assert isinstance(w, SyftSuccess), f"Expected empty diff, got {w}" + + +def test_diff_state_with_dataset(low_worker: Worker, high_worker: Worker): + low_client: DatasiteClient = low_worker.root_client + client_low_ds = get_ds_client(low_client) + high_client: DatasiteClient = high_worker.root_client + + ds_high = create_dataset(high_client) + create_dataset(low_client, _id=ds_high.id) + + @sy.syft_function_single_use() + def compute_mean(data) -> int: + return data.mean() + + _ = client_low_ds.code.request_code_execution(compute_mean) + + with pytest.raises(SyftException) as exc: + client_low_ds.code.compute_mean(blocking=False) + + assert ( + "Please wait for the admin to allow the execution of this code" + in exc.value.public_message + ) + + diff_state_before, diff_state_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_state_before.is_same + + assert diff_state_after.is_same + + # run_and_deposit_result(high_client) + data_high = high_client.datasets[0].assets[0] + mean_result = high_client.code.compute_mean(data=data_high, blocking=True) + high_client.requests[0].deposit_result(mean_result) + + # the high side client delete the dataset after depositing the result + dataset_del_res = high_client.api.services.dataset.delete( + uid=high_client.datasets[0].id + ) + assert isinstance(dataset_del_res, SyftSuccess) + + diff_state_before, diff_state_after = compare_and_resolve( + from_client=high_client, to_client=low_client + ) + + # high_state = high_client.get_sync_state() + # low_state = high_client.get_sync_state() + # assert high_state.get_previous_state_diff().is_same + # assert low_state.get_previous_state_diff().is_same + assert diff_state_after.is_same + + client_low_ds.refresh() + + data_low = low_client.datasets[0].assets[0] + + # check loading results for both blocking and non-blocking case + res_blocking = client_low_ds.code.compute_mean(data=data_low, blocking=True) + res_blocking = res_blocking.get() + + res_non_blocking = client_low_ds.code.compute_mean( + data=data_low, blocking=False + ).wait() + + # expected_result = compute_mean(syft_no_server=True, data=) + assert res_blocking == res_non_blocking == mean_result + + +def test_sync_with_error(low_worker, high_worker): + """Check syncing with an error in a syft function""" + low_client: DatasiteClient = low_worker.root_client + client_low_ds = get_ds_client(low_client) + high_client: DatasiteClient = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + raise RuntimeError + return 42 + + _ = client_low_ds.code.request_code_execution(compute) + + diff_state_before, diff_state_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_state_before.is_same + + assert diff_state_after.is_same + + with pytest.raises(SyftException): + run_and_deposit_result(high_client) + + diff_state_before, diff_state_after = compare_and_resolve( + from_client=high_client, to_client=low_client + ) + + assert diff_state_before.is_same + assert diff_state_after.is_same + + client_low_ds.refresh() + + with pytest.raises(SyftException): + client_low_ds.code.compute(blocking=True) + + +def test_ignore_unignore_single(low_worker, high_worker): + low_client: DatasiteClient = low_worker.root_client + client_low_ds = get_ds_client(low_client) + high_client: DatasiteClient = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + _ = client_low_ds.code.request_code_execution(compute) + + diff = compare_clients(low_client, high_client, hide_usercode=False) + + assert len(diff.batches) == 2 # Request + UserCode + assert len(diff.ignored_batches) == 0 + + # Ignore usercode, request also gets ignored + res = diff[0].ignore() + assert isinstance(res, SyftSuccess) + + diff = compare_clients(low_client, high_client, hide_usercode=False) + assert len(diff.batches) == 0 + assert len(diff.ignored_batches) == 2 + assert len(diff.all_batches) == 2 + + # Unignore usercode + res = diff.ignored_batches[0].unignore() + assert isinstance(res, SyftSuccess) + + diff = compare_clients(low_client, high_client, hide_usercode=False) + assert len(diff.batches) == 1 + assert len(diff.ignored_batches) == 1 + assert len(diff.all_batches) == 2 + + +def test_request_code_execution_multiple(low_worker, high_worker): + low_client = low_worker.root_client + client_low_ds = low_worker.guest_client + high_client = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + @sy.syft_function_single_use() + def compute_twice() -> int: + return 42 * 2 + + @sy.syft_function_single_use() + def compute_thrice() -> int: + return 42 * 3 + + _ = client_low_ds.code.request_code_execution(compute) + _ = client_low_ds.code.request_code_execution(compute_twice) + + diff_before, diff_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_before.is_same + assert diff_after.is_same + + _ = client_low_ds.code.request_code_execution(compute_thrice) + + diff_before, diff_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_before.is_same + assert diff_after.is_same + + +def test_filter_out_l2_requests(low_worker, high_worker): + low_client = low_worker.root_client + high_client = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + high_client.code.request_code_execution(compute) + high_client.code.compute(blocking=False) + + w = sy.sync(from_client=high_client, to_client=low_client) + assert isinstance(w, SyftSuccess), f"Expected empty diff, got {w}" + + +def test_approve_request_on_sync_blocking(low_worker, high_worker): + low_client = low_worker.root_client + client_low_ds = get_ds_client(low_client) + high_client = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + _ = client_low_ds.code.request_code_execution(compute) + + # No execute permissions + with pytest.raises(SyftException) as exc: + client_low_ds.code.compute(blocking=True) + + assert "waiting for approval" in exc.value.public_message + + assert low_client.requests[0].status == RequestStatus.PENDING + + # Sync request to high side + diff_before, diff_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_before.is_same + assert diff_after.is_same + + # Execute on high side + job = run_and_deposit_result(high_client) + assert job.result.get() == 42 + + assert high_client.requests[0].status == RequestStatus.PENDING + + # Sync back to low side, share private data + diff_before, diff_after = compare_and_resolve( + from_client=high_client, to_client=low_client, share_private_data=True + ) + assert len(diff_before.batches) == 1 + root_types = [x.root_type for x in diff_before.batches] + assert Job in root_types + # assert ( + # Request in root_types + # ) # we have not configured it to count UserCode as a root type """ + assert low_client.requests[0].status == RequestStatus.APPROVED + + assert client_low_ds.code.compute().get() == 42 + assert len(client_low_ds.code.compute.jobs) == 1 + # check if user retrieved from cache, instead of re-executing + assert len(client_low_ds.requests[0].code.output_history) >= 1 + + +def test_deny_and_sync(low_worker, high_worker): + low_client = low_worker.root_client + client_low_ds = get_ds_client(low_client) + high_client = high_worker.root_client + + @sy.syft_function_single_use() + def compute() -> int: + return 42 + + _ = client_low_ds.code.request_code_execution(compute) + + # No execute permissions + with pytest.raises(SyftException): + client_low_ds.code.compute(blocking=True) + + assert low_client.requests[0].status == RequestStatus.PENDING + + # Deny on low side + request_low = low_client.requests[0] + res = request_low.deny(reason="bad request") + print(res) + assert low_client.requests[0].status == RequestStatus.REJECTED + + # Un-deny. NOTE: not supported by current UX, this is just used to re-deny on high side + low_client.api.code_status.update( + id=request_low.status_id, + decision=ApprovalDecision(status=UserCodeStatus.PENDING), + ) + assert low_client.requests[0].status == RequestStatus.PENDING + + # Sync request to high side + diff_before, diff_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + assert not diff_before.is_same + assert diff_after.is_same + + # Deny on high side + high_client.requests[0].deny(reason="bad request") + assert high_client.requests[0].status == RequestStatus.REJECTED + + diff_before, diff_after = compare_and_resolve( + from_client=high_client, to_client=low_client + ) + + assert diff_after.is_same + + assert low_client.requests[0].status == RequestStatus.REJECTED diff --git a/packages/syft/tests/syft/service_permission_test.py b/packages/syft/tests/syft/service_permission_test.py index edb66dd9f96..ceb6d63923c 100644 --- a/packages/syft/tests/syft/service_permission_test.py +++ b/packages/syft/tests/syft/service_permission_test.py @@ -2,53 +2,52 @@ import pytest # syft absolute -from syft import SyftError from syft.client.api import SyftAPICall +from syft.types.errors import SyftException +from syft.types.syft_object import EXCLUDED_FROM_SIGNATURE @pytest.fixture def guest_mock_user(root_verify_key, user_stash, guest_user): - result = user_stash.partition.set(root_verify_key, guest_user) - assert result.is_ok() - - user = result.ok() + user = user_stash.set(root_verify_key, guest_user).unwrap() assert user is not None - yield user def test_call_service_syftapi_with_permission(worker, guest_mock_user, update_user): user_id = guest_mock_user.id - res = worker.root_client.api.services.user.update(user_id, update_user) + res = worker.root_client.api.services.user.update( + uid=user_id, + **{k: v for k, v in update_user if k not in EXCLUDED_FROM_SIGNATURE}, + ) assert res # this throws an AttributeError, maybe we want something more clear? -def test_call_service_syftapi_no_permission(guest_domain_client): +def test_call_service_syftapi_no_permission(guest_datasite_client): with pytest.raises(AttributeError): - guest_domain_client.api.services.user.get_all() + guest_datasite_client.api.services.user.get_all() def test_directly_call_service_with_permission(worker, guest_mock_user, update_user): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client user_id = guest_mock_user.id api_call = SyftAPICall( - node_uid=root_domain_client.id, + server_uid=root_datasite_client.id, path="user.update", - args=[user_id, update_user], - kwargs={}, + args=[], + kwargs={"uid": user_id, **update_user}, ) - signed_call = api_call.sign(root_domain_client.api.signing_key) - signed_result = root_domain_client.api.connection.make_call(signed_call) + signed_call = api_call.sign(root_datasite_client.api.signing_key) + signed_result = root_datasite_client.api.connection.make_call(signed_call) result = signed_result.message.data assert result -def test_directly_call_service_no_permission(guest_domain_client): +def test_directly_call_service_no_permission(guest_datasite_client): api_call = SyftAPICall( - node_uid=guest_domain_client.id, path="user.get_all", args=[], kwargs={} + server_uid=guest_datasite_client.id, path="user.get_all", args=[], kwargs={} ) - signed_call = api_call.sign(guest_domain_client.api.signing_key) - signed_result = guest_domain_client.api.connection.make_call(signed_call) - result = signed_result.message.data - assert isinstance(result, SyftError) + signed_call = api_call.sign(guest_datasite_client.api.signing_key) + with pytest.raises(SyftException): + guest_datasite_client.api.connection.make_call(signed_call) diff --git a/packages/syft/tests/syft/settings/fixtures.py b/packages/syft/tests/syft/settings/fixtures.py index f2b6096d460..637f47b9f42 100644 --- a/packages/syft/tests/syft/settings/fixtures.py +++ b/packages/syft/tests/syft/settings/fixtures.py @@ -6,12 +6,13 @@ # syft absolute from syft.__init__ import __version__ -from syft.abstract_node import NodeSideType -from syft.abstract_node import NodeType -from syft.node.credentials import SyftSigningKey -from syft.service.metadata.node_metadata import NodeMetadataJSON -from syft.service.settings.settings import NodeSettingsUpdate -from syft.service.settings.settings import NodeSettingsV2 +from syft.abstract_server import ServerSideType +from syft.abstract_server import ServerType +from syft.server.credentials import SyftSigningKey +from syft.service.metadata.server_metadata import ServerMetadataJSON +from syft.service.notifier.notifier_stash import NotifierStash +from syft.service.settings.settings import ServerSettings +from syft.service.settings.settings import ServerSettingsUpdate from syft.service.settings.settings_service import SettingsService from syft.service.settings.settings_stash import SettingsStash from syft.types.syft_object import HIGHEST_SYFT_OBJECT_VERSION @@ -19,14 +20,19 @@ from syft.types.uid import UID +@pytest.fixture +def notifier_stash(document_store) -> NotifierStash: + yield NotifierStash(store=document_store) + + @pytest.fixture def settings_stash(document_store) -> SettingsStash: yield SettingsStash(store=document_store) @pytest.fixture -def settings(worker, faker) -> NodeSettingsV2: - yield NodeSettingsV2( +def settings(worker, faker) -> ServerSettings: + yield ServerSettings( id=UID(), name=worker.name, organization=faker.text(), @@ -35,16 +41,19 @@ def settings(worker, faker) -> NodeSettingsV2: deployed_on=datetime.now().date().strftime("%m/%d/%Y"), signup_enabled=False, admin_email="info@openmined.org", - node_side_type=NodeSideType.LOW_SIDE, + server_side_type=ServerSideType.LOW_SIDE, show_warnings=False, verify_key=SyftSigningKey.generate().verify_key, - node_type=NodeType.DOMAIN, + server_type=ServerType.DATASITE, + association_request_auto_approval=False, + default_worker_pool="default-pool", + notifications_enabled=False, ) @pytest.fixture -def update_settings(faker) -> NodeSettingsUpdate: - yield NodeSettingsUpdate( +def update_settings(faker) -> ServerSettingsUpdate: + yield ServerSettingsUpdate( name=faker.name(), description=faker.text(), on_board=faker.boolean(), @@ -52,8 +61,8 @@ def update_settings(faker) -> NodeSettingsUpdate: @pytest.fixture -def metadata_json(faker) -> NodeMetadataJSON: - yield NodeMetadataJSON( +def metadata_json(faker) -> ServerMetadataJSON: + yield ServerMetadataJSON( metadata_version=faker.random_int(), name=faker.name(), id=faker.text(), @@ -61,9 +70,10 @@ def metadata_json(faker) -> NodeMetadataJSON: highest_object_version=HIGHEST_SYFT_OBJECT_VERSION, lowest_object_version=LOWEST_SYFT_OBJECT_VERSION, syft_version=__version__, - node_side_type=NodeSideType.LOW_SIDE.value, + server_side_type=ServerSideType.LOW_SIDE.value, show_warnings=False, - node_type=NodeType.DOMAIN.value, + server_type=ServerType.DATASITE.value, + min_size_blob_storage_mb=1, ) diff --git a/packages/syft/tests/syft/settings/metadata_test.py b/packages/syft/tests/syft/settings/metadata_test.py index 9d28d275a27..6dded7e48eb 100644 --- a/packages/syft/tests/syft/settings/metadata_test.py +++ b/packages/syft/tests/syft/settings/metadata_test.py @@ -3,7 +3,7 @@ # syft absolute from syft.__init__ import __version__ -from syft.service.metadata.node_metadata import check_version +from syft.service.metadata.server_metadata import check_version def test_check_base_version_success() -> None: diff --git a/packages/syft/tests/syft/settings/settings_serde_test.py b/packages/syft/tests/syft/settings/settings_serde_test.py index 1a41c927fbc..34a4d22c198 100644 --- a/packages/syft/tests/syft/settings/settings_serde_test.py +++ b/packages/syft/tests/syft/settings/settings_serde_test.py @@ -17,7 +17,7 @@ "metadata_json", ], ) -def test_node_settings_serde(obj: Any, request: FixtureRequest) -> None: +def test_server_settings_serde(obj: Any, request: FixtureRequest) -> None: requested_obj = request.getfixturevalue(obj) ser_data = sy.serialize(requested_obj, to_bytes=True) assert isinstance(ser_data, bytes) diff --git a/packages/syft/tests/syft/settings/settings_service_test.py b/packages/syft/tests/syft/settings/settings_service_test.py index d359eb2848f..7555aadd91e 100644 --- a/packages/syft/tests/syft/settings/settings_service_test.py +++ b/packages/syft/tests/syft/settings/settings_service_test.py @@ -1,47 +1,60 @@ # stdlib from copy import deepcopy from datetime import datetime +from typing import NoReturn from unittest import mock +from uuid import uuid4 # third party from faker import Faker +import pytest from pytest import MonkeyPatch -from result import Err -from result import Ok # syft absolute import syft -from syft.abstract_node import NodeSideType -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey +from syft.abstract_server import ServerSideType +from syft.client.datasite_client import DatasiteClient +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey from syft.service.context import AuthedServiceContext -from syft.service.response import SyftError +from syft.service.notifier.notifier import NotifierSettings +from syft.service.notifier.notifier_stash import NotifierStash from syft.service.response import SyftSuccess -from syft.service.settings.settings import NodeSettingsUpdate -from syft.service.settings.settings import NodeSettingsV2 +from syft.service.service import _SIGNATURE_ERROR_MESSAGE +from syft.service.settings.settings import ServerSettings +from syft.service.settings.settings import ServerSettingsUpdate +from syft.service.settings.settings_service import ( + _NOTIFICATIONS_ENABLED_WIHOUT_CREDENTIALS_ERROR, +) from syft.service.settings.settings_service import SettingsService from syft.service.settings.settings_stash import SettingsStash -from syft.service.user.user import UserCreate +from syft.service.user.user import UserPrivateKey +from syft.service.user.user import UserView from syft.service.user.user_roles import ServiceRole +from syft.store.document_store_errors import NotFoundException +from syft.store.document_store_errors import StashException +from syft.types.errors import SyftException +from syft.types.result import as_result def test_settingsservice_get_success( monkeypatch: MonkeyPatch, settings_service: SettingsService, - settings: NodeSettingsV2, + settings: ServerSettings, authed_context: AuthedServiceContext, ) -> None: mock_stash_get_all_output = [settings, settings] - expected_output = Ok(mock_stash_get_all_output[0]) + expected_output = mock_stash_get_all_output[0] - def mock_stash_get_all(credentials) -> Ok: - return Ok(mock_stash_get_all_output) + @as_result(SyftException) + def mock_stash_get_all(credentials) -> list[ServerSettings]: + return mock_stash_get_all_output monkeypatch.setattr(settings_service.stash, "get_all", mock_stash_get_all) response = settings_service.get(context=authed_context) - assert isinstance(response.ok(), NodeSettingsV2) + assert isinstance(response, ServerSettings) assert response == expected_output @@ -50,67 +63,58 @@ def test_settingsservice_get_stash_fail( settings_service: SettingsService, authed_context: AuthedServiceContext, ) -> None: - def mock_empty_stash(credentials): - return Ok([]) + @as_result(StashException) + def mock_empty_stash(credentials) -> list[ServerSettings]: + return [] monkeypatch.setattr(settings_service.stash, "get_all", mock_empty_stash) # case 1: we got an empty list from the stash - response = settings_service.get(context=authed_context) - assert isinstance(response, SyftError) - assert response.message == "No settings found" + with pytest.raises(NotFoundException) as exc: + settings_service.get(context=authed_context) + + assert exc.type == NotFoundException + assert exc.value.public_message == "No settings found" # case 2: the stash.get_all() function fails mock_error_message = "database failure" - def mock_stash_get_all_error(credentials) -> Err: - return Err(mock_error_message) + @as_result(StashException) + def mock_stash_get_all_error(credentials) -> NoReturn: + raise StashException(public_message=mock_error_message) monkeypatch.setattr(settings_service.stash, "get_all", mock_stash_get_all_error) - response = settings_service.get(context=authed_context) - assert isinstance(response, SyftError) - assert response.message == mock_error_message + with pytest.raises(StashException) as exc: + settings_service.get(context=authed_context) + assert exc.type == StashException + assert exc.value.public_message == mock_error_message -def test_settingsservice_set_success( - settings_service: SettingsService, - settings: NodeSettingsV2, - authed_context: AuthedServiceContext, -) -> None: - response = settings_service.set(authed_context, settings) - - assert response.is_ok() is True - assert isinstance(response.ok(), NodeSettingsV2) - assert response.ok() == settings - -def test_settingsservice_set_fail( - monkeypatch: MonkeyPatch, +def test_settingsservice_set_success( settings_service: SettingsService, - settings: NodeSettingsV2, + settings: ServerSettings, authed_context: AuthedServiceContext, ) -> None: - mock_error_message = "database failure" - - def mock_stash_set_error(credentials, a) -> Err: - return Err(mock_error_message) - - monkeypatch.setattr(settings_service.stash, "set", mock_stash_set_error) - response = settings_service.set(authed_context, settings) - - assert isinstance(response, SyftError) - assert response.message == mock_error_message + assert isinstance(response, ServerSettings) + response.syft_client_verify_key = None + response.syft_server_location = None + response.pwd_token_config.syft_client_verify_key = None + response.pwd_token_config.syft_server_location = None + response.welcome_markdown.syft_client_verify_key = None + response.welcome_markdown.syft_server_location = None + assert response == settings def add_mock_settings( root_verify_key: SyftVerifyKey, settings_stash: SettingsStash, - settings: NodeSettingsV2, -) -> NodeSettingsV2: + settings: ServerSettings, +) -> ServerSettings: # create a mock settings in the stash so that we can update it - result = settings_stash.partition.set(root_verify_key, settings) + result = settings_stash.set(root_verify_key, settings) assert result.is_ok() created_settings = result.ok() @@ -124,14 +128,13 @@ def test_settingsservice_update_success( monkeypatch: MonkeyPatch, settings_stash: SettingsStash, settings_service: SettingsService, - settings: NodeSettingsV2, - update_settings: NodeSettingsUpdate, + settings: ServerSettings, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, + notifier_stash: NotifierStash, ) -> None: # add a mock settings to the stash - mock_settings = add_mock_settings( - authed_context.credentials, settings_stash, settings - ) + mock_settings = settings_stash.set(authed_context.credentials, settings).unwrap() # get a new settings according to update_settings new_settings = deepcopy(settings) @@ -143,95 +146,96 @@ def test_settingsservice_update_success( assert new_settings != mock_settings assert mock_settings == settings - mock_stash_get_all_output = [mock_settings, mock_settings] + class MockNotifierService: + def __init__(self, stash): + self.stash = stash - def mock_stash_get_all(root_verify_key) -> Ok: - return Ok(mock_stash_get_all_output) + def set_notifier_active_to_false(self, context) -> SyftSuccess: + return SyftSuccess(message="Notifier mocked to True") - monkeypatch.setattr(settings_service.stash, "get_all", mock_stash_get_all) + def settings(self, context): + return NotifierSettings() - # update the settings in the settings stash using settings_service - response = settings_service.update(authed_context, update_settings) - print(response) - updated_settings = response.ok()[0] - not_updated_settings = response.ok()[1] - - assert response.is_ok() is True - assert len(response.ok()) == len(mock_stash_get_all_output) - assert ( - updated_settings.model_dump() == new_settings.model_dump() - ) # the first settings is updated - assert ( - not_updated_settings.model_dump() == settings.model_dump() - ) # the second settings is not updated - - -def test_settingsservice_update_stash_get_all_fail( - monkeypatch: MonkeyPatch, - settings_service: SettingsService, - update_settings: NodeSettingsUpdate, - authed_context: AuthedServiceContext, -) -> None: - # the stash.get_all() function fails - mock_error_message = "database failure" + mock_notifier_service = MockNotifierService(stash=notifier_stash) - def mock_stash_get_all_error(credentials) -> Err: - return Err(mock_error_message) + def mock_get_service(service_name: str): + if service_name == "notifierservice": + return mock_notifier_service + raise ValueError(f"Unknown service: {service_name}") - monkeypatch.setattr(settings_service.stash, "get_all", mock_stash_get_all_error) - response = settings_service.update(authed_context, update_settings) + monkeypatch.setattr(authed_context.server, "get_service", mock_get_service) + + # update the settings in the settings stash using settings_service + response = settings_service.update(context=authed_context, settings=update_settings) - assert isinstance(response, SyftError) - assert response.message == mock_error_message + assert isinstance(response, SyftSuccess) def test_settingsservice_update_stash_empty( settings_service: SettingsService, - update_settings: NodeSettingsUpdate, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, ) -> None: - response = settings_service.update(authed_context, update_settings) - - assert isinstance(response, SyftError) - assert response.message == "No settings found" + with pytest.raises(NotFoundException) as exc: + settings_service.update(context=authed_context, settings=update_settings) + assert exc.value.public_message == "Server settings not found" def test_settingsservice_update_fail( monkeypatch: MonkeyPatch, - settings: NodeSettingsV2, + settings: ServerSettings, settings_service: SettingsService, - update_settings: NodeSettingsUpdate, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, + notifier_stash: NotifierStash, ) -> None: # the stash has a settings but we could not update it (the stash.update() function fails) mock_stash_get_all_output = [settings, settings] - def mock_stash_get_all(credentials) -> Ok: - return Ok(mock_stash_get_all_output) + @as_result(StashException) + def mock_stash_get_all(credentials, **kwargs) -> list[ServerSettings]: + return mock_stash_get_all_output monkeypatch.setattr(settings_service.stash, "get_all", mock_stash_get_all) - mock_update_error_message = "Failed to update obj NodeMetadata" + mock_update_error_message = "Failed to update obj ServerMetadata" - def mock_stash_update_error(credentials, update_settings: NodeSettingsV2) -> Err: - return Err(mock_update_error_message) + @as_result(StashException) + def mock_stash_update_error(credentials, obj: ServerSettings) -> NoReturn: + raise StashException(public_message=mock_update_error_message) monkeypatch.setattr(settings_service.stash, "update", mock_stash_update_error) - response = settings_service.update(authed_context, update_settings) + # Mock the get_service method to return a mocked notifier_service with the notifier_stash + class MockNotifierService: + def __init__(self, stash): + self.stash = stash + + def set_notifier_active_to_false(self, context) -> SyftSuccess: + return SyftSuccess(message="Notifier mocked to False") + + def settings(self, context): + return NotifierSettings() + + mock_notifier_service = MockNotifierService(stash=notifier_stash) + + def mock_get_service(service_name: str): + if service_name == "notifierservice": + return mock_notifier_service + raise ValueError(f"Unknown service: {service_name}") - assert isinstance(response, SyftError) - assert response.message == mock_update_error_message + monkeypatch.setattr(authed_context.server, "get_service", mock_get_service) + + with pytest.raises(StashException) as _: + settings_service.update(context=authed_context, settings=update_settings) def test_settings_allow_guest_registration( monkeypatch: MonkeyPatch, faker: Faker ) -> None: - # Create a new worker - verify_key = SyftSigningKey.generate().verify_key - mock_node_settings = NodeSettingsV2( + mock_server_settings = ServerSettings( name=faker.name(), verify_key=verify_key, highest_version=1, @@ -239,83 +243,92 @@ def test_settings_allow_guest_registration( syft_version=syft.__version__, signup_enabled=False, admin_email="info@openmined.org", - node_side_type=NodeSideType.LOW_SIDE, + server_side_type=ServerSideType.LOW_SIDE, show_warnings=False, deployed_on=datetime.now().date().strftime("%m/%d/%Y"), + association_request_auto_approval=False, + notifications_enabled=False, ) with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, - return_value=mock_node_settings, + return_value=mock_server_settings, ): - worker = syft.Worker.named(name=faker.name(), reset=True) - guest_domain_client = worker.guest_client - root_domain_client = worker.root_client + worker = syft.Worker.named(name=faker.name(), reset=True, db_url="sqlite://") + guest_datasite_client = worker.guest_client + root_datasite_client = worker.root_client email1 = faker.email() email2 = faker.email() - response_1 = root_domain_client.register( + response_1 = root_datasite_client.register( email=email1, password="joker123", password_verify="joker123", name="Joker" ) + assert isinstance(response_1, SyftSuccess) + assert isinstance(response_1.value, UserPrivateKey) # by default, the guest client can't register new user - response_2 = guest_domain_client.register( - email=email2, - password="harley123", - password_verify="harley123", - name="Harley", - ) - assert isinstance(response_2, SyftError) + with pytest.raises(SyftException) as exc: + guest_datasite_client.register( + email=email2, + password="harley123", + password_verify="harley123", + name="Harley", + ) - assert any(user.email == email1 for user in root_domain_client.users) + expected_err_msg = "You have no permission to create an account. Please contact the Datasite owner." + assert exc.value.public_message == expected_err_msg + assert any(user.email == email1 for user in root_datasite_client.users) # only after the root client enable other users to signup, they can - mock_node_settings.signup_enabled = True + mock_server_settings.signup_enabled = True with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, - return_value=mock_node_settings, + return_value=mock_server_settings, ): - worker = syft.Worker.named(name=faker.name(), reset=True) - guest_domain_client = worker.guest_client - root_domain_client = worker.root_client + worker = syft.Worker.named(name=faker.name(), reset=True, db_url="sqlite://") + guest_datasite_client = worker.guest_client + root_datasite_client = worker.root_client password = faker.email() - response_3 = guest_domain_client.register( + + response_3 = guest_datasite_client.register( email=email2, password=password, password_verify=password, name=faker.name(), ) - assert isinstance(response_3, SyftSuccess) - assert any(user.email == email2 for user in root_domain_client.users) + # FIX: SyftSuccess .value... let's have it in the response instead + assert isinstance(response_3.value, UserPrivateKey) + assert any(user.email == email2 for user in root_datasite_client.users) -def test_user_register_for_role(monkeypatch: MonkeyPatch, faker: Faker): +def test_settings_user_register_for_role(monkeypatch: MonkeyPatch, faker: Faker): # Mock patch this env variable to remove race conditions # where signup is enabled. + def get_mock_client(faker, root_client, role): - user_create = UserCreate( + email = faker.email() + password = uuid4().hex + + result = root_client.users.create( name=faker.name(), - email=faker.email(), + email=email, role=role, - password="password", - password_verify="password", + password=password, + password_verify=password, ) - result = root_client.users.create(user_create=user_create) - assert not isinstance(result, SyftError) + assert type(result) == UserView guest_client = root_client.guest() - return guest_client.login( - email=user_create.email, password=user_create.password - ) + return guest_client.login(email=email, password=password) verify_key = SyftSigningKey.generate().verify_key - mock_node_settings = NodeSettingsV2( + mock_server_settings = ServerSettings( name=faker.name(), verify_key=verify_key, highest_version=1, @@ -323,17 +336,19 @@ def get_mock_client(faker, root_client, role): syft_version=syft.__version__, signup_enabled=False, admin_email="info@openmined.org", - node_side_type=NodeSideType.LOW_SIDE, + server_side_type=ServerSideType.LOW_SIDE, show_warnings=False, deployed_on=datetime.now().date().strftime("%m/%d/%Y"), + association_request_auto_approval=False, + notifications_enabled=False, ) with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, - return_value=mock_node_settings, + return_value=mock_server_settings, ): - worker = syft.Worker.named(name=faker.name(), reset=True) + worker = syft.Worker.named(name=faker.name(), reset=True, db_url="sqlite://") root_client = worker.root_client emails_added = [] @@ -346,22 +361,63 @@ def get_mock_client(faker, root_client, role): password="password", password_verify="password", ) + assert isinstance(result, SyftSuccess) + assert isinstance(result.value, UserPrivateKey) emails_added.append(email) ds_client = get_mock_client( faker=faker, root_client=root_client, role=ServiceRole.DATA_SCIENTIST ) - response = ds_client.register( - name=faker.name(), - email=faker.email(), - password="password", - password_verify="password", - ) - assert isinstance(response, SyftError) + with pytest.raises(SyftException) as exc: + ds_client.register( + name=faker.name(), + email=faker.email(), + password="password", + password_verify="password", + ) + + error_msg = "You have no permission to create an account. Please contact the Datasite owner." + assert exc.type is SyftException + assert exc.value.public_message == error_msg users_created_count = sum( [u.email in emails_added for u in root_client.users.get_all()] ) assert users_created_count == len(emails_added) + + +def test_invalid_args_error_message(root_datasite_client: DatasiteClient) -> None: + update_args = { + "name": uuid4().hex, + "organization": uuid4().hex, + } + + update = ServerSettingsUpdate(**update_args) + + with pytest.raises(SyftException) as exc: + root_datasite_client.api.services.settings.update(settings=update) + + assert _SIGNATURE_ERROR_MESSAGE in exc.value.public_message + + with pytest.raises(SyftException) as exc: + root_datasite_client.api.services.settings.update(update) + + assert _SIGNATURE_ERROR_MESSAGE in exc.value.public_message + + root_datasite_client.api.services.settings.update(**update_args) + + settings = root_datasite_client.api.services.settings.get() + assert settings.name == update_args["name"] + assert settings.organization == update_args["organization"] + + +@pytest.mark.skip(reason="For now notifications can be enabled without credentials.") +def test_notifications_enabled_without_emails_credentials_not_allowed( + root_datasite_client: DatasiteClient, +) -> None: + with pytest.raises(SyftException) as exc: + root_datasite_client.api.services.settings.update(notifications_enabled=True) + + assert _NOTIFICATIONS_ENABLED_WIHOUT_CREDENTIALS_ERROR in exc.value.public_message diff --git a/packages/syft/tests/syft/settings/settings_stash_test.py b/packages/syft/tests/syft/settings/settings_stash_test.py index f1abc406a68..2d976b52108 100644 --- a/packages/syft/tests/syft/settings/settings_stash_test.py +++ b/packages/syft/tests/syft/settings/settings_stash_test.py @@ -1,54 +1,26 @@ -# third party - # syft absolute -from syft.service.settings.settings import NodeSettingsUpdate -from syft.service.settings.settings import NodeSettingsV2 +from syft.service.settings.settings import ServerSettings +from syft.service.settings.settings import ServerSettingsUpdate from syft.service.settings.settings_stash import SettingsStash -def add_mock_settings( - root_verify_key, settings_stash: SettingsStash, settings: NodeSettingsV2 -) -> NodeSettingsV2: - # prepare: add mock settings - result = settings_stash.partition.set(root_verify_key, settings) - assert result.is_ok() - - created_settings = result.ok() - assert created_settings is not None - - return created_settings - - def test_settingsstash_set( - root_verify_key, settings_stash: SettingsStash, settings: NodeSettingsV2 -) -> None: - result = settings_stash.set(root_verify_key, settings) - assert result.is_ok() - - created_settings = result.ok() - assert isinstance(created_settings, NodeSettingsV2) - assert created_settings == settings - assert settings.id in settings_stash.partition.data - - -def test_settingsstash_update( root_verify_key, settings_stash: SettingsStash, - settings: NodeSettingsV2, - update_settings: NodeSettingsUpdate, + settings: ServerSettings, + update_settings: ServerSettingsUpdate, ) -> None: - # prepare: add a mock settings - mock_settings = add_mock_settings(root_verify_key, settings_stash, settings) + created_settings = settings_stash.set(root_verify_key, settings).unwrap() + assert isinstance(created_settings, ServerSettings) + assert created_settings == settings + assert settings_stash.exists(root_verify_key, settings.id) # update mock_settings according to update_settings update_kwargs = update_settings.to_dict(exclude_empty=True).items() for field_name, value in update_kwargs: - setattr(mock_settings, field_name, value) + setattr(settings, field_name, value) # update the settings in the stash - result = settings_stash.update(root_verify_key, settings=mock_settings) - - assert result.is_ok() - updated_settings = result.ok() - assert isinstance(updated_settings, NodeSettingsV2) - assert mock_settings == updated_settings + updated_settings = settings_stash.update(root_verify_key, obj=settings).unwrap() + assert isinstance(updated_settings, ServerSettings) + assert settings == updated_settings diff --git a/packages/syft/tests/syft/stores/action_store_test.py b/packages/syft/tests/syft/stores/action_store_test.py index 0cabe78ef84..5c2fe63be0d 100644 --- a/packages/syft/tests/syft/stores/action_store_test.py +++ b/packages/syft/tests/syft/stores/action_store_test.py @@ -1,23 +1,26 @@ # stdlib -import sys -from typing import Any # third party import pytest # syft absolute -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey +from syft.service.action.action_object import ActionObject +from syft.service.action.action_permissions import ActionObjectOWNER +from syft.service.action.action_permissions import ActionObjectPermission from syft.service.action.action_store import ActionObjectEXECUTE -from syft.service.action.action_store import ActionObjectOWNER from syft.service.action.action_store import ActionObjectREAD +from syft.service.action.action_store import ActionObjectStash from syft.service.action.action_store import ActionObjectWRITE +from syft.service.user.user import User +from syft.service.user.user_roles import ServiceRole +from syft.service.user.user_stash import UserStash +from syft.store.db.db import DBManager from syft.types.uid import UID # relative -from .store_constants_test import TEST_VERIFY_KEY_STRING_CLIENT -from .store_constants_test import TEST_VERIFY_KEY_STRING_HACKER -from .store_constants_test import TEST_VERIFY_KEY_STRING_ROOT -from .store_mocks_test import MockSyftObject +from ..worker_test import action_object_stash # noqa: F401 permissions = [ ActionObjectOWNER, @@ -27,118 +30,119 @@ ] -@pytest.mark.parametrize( - "store", - [ - pytest.lazy_fixture("dict_action_store"), - pytest.lazy_fixture("sqlite_action_store"), - pytest.lazy_fixture("mongo_action_store"), - ], -) -def test_action_store_sanity(store: Any): - assert hasattr(store, "store_config") - assert hasattr(store, "settings") - assert hasattr(store, "data") - assert hasattr(store, "permissions") - assert hasattr(store, "root_verify_key") - assert store.root_verify_key.verify == TEST_VERIFY_KEY_STRING_ROOT +def add_user(db_manager: DBManager, role: ServiceRole) -> SyftVerifyKey: + user_stash = UserStash(store=db_manager) + verify_key = SyftSigningKey.generate().verify_key + user_stash.set( + credentials=db_manager.root_verify_key, + obj=User(verify_key=verify_key, role=role, id=UID()), + ).unwrap() + return verify_key + + +def add_test_object( + stash: ActionObjectStash, verify_key: SyftVerifyKey +) -> ActionObject: + test_object = ActionObject.from_obj([1, 2, 3]) + uid = test_object.id + stash.set_or_update( + uid=uid, + credentials=verify_key, + syft_object=test_object, + has_result_read_permission=True, + ).unwrap() + return uid @pytest.mark.parametrize( - "store", + "stash", [ - pytest.lazy_fixture("dict_action_store"), - pytest.lazy_fixture("sqlite_action_store"), - pytest.lazy_fixture("mongo_action_store"), + pytest.lazy_fixture("action_object_stash"), ], ) @pytest.mark.parametrize("permission", permissions) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -@pytest.mark.skipif(sys.platform == "darwin", reason="skip on mac") -def test_action_store_test_permissions(store: Any, permission: Any): - client_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_CLIENT) - root_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - hacker_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) - - access = permission(uid=UID(), credentials=client_key) - access_root = permission(uid=UID(), credentials=root_key) - access_hacker = permission(uid=UID(), credentials=hacker_key) - - # add permission - store.add_permission(access) - - assert store.has_permission(access) - assert store.has_permission(access_root) - assert not store.has_permission(access_hacker) +def test_action_store_test_permissions( + stash: ActionObjectStash, permission: ActionObjectPermission +) -> None: + client_key = add_user(stash.db, ServiceRole.DATA_SCIENTIST) + root_key = add_user(stash.db, ServiceRole.ADMIN) + hacker_key = add_user(stash.db, ServiceRole.DATA_SCIENTIST) + new_admin_key = add_user(stash.db, ServiceRole.ADMIN) + + test_item_id = add_test_object(stash, client_key) + + access = permission(uid=test_item_id, credentials=client_key) + access_root = permission(uid=test_item_id, credentials=root_key) + access_hacker = permission(uid=test_item_id, credentials=hacker_key) + access_new_admin = permission(uid=test_item_id, credentials=new_admin_key) + + stash.add_permission(access) + assert stash.has_permission(access) + assert stash.has_permission(access_root) + assert stash.has_permission(access_new_admin) + assert not stash.has_permission(access_hacker) # remove permission - store.remove_permission(access) + stash.remove_permission(access) - assert not store.has_permission(access) - assert store.has_permission(access_root) - assert not store.has_permission(access_hacker) + assert not stash.has_permission(access) + assert stash.has_permission(access_root) + assert stash.has_permission(access_new_admin) + assert not stash.has_permission(access_hacker) # take ownership with new UID - client_uid2 = UID() - access = permission(uid=client_uid2, credentials=client_key) + item2_id = add_test_object(stash, client_key) + access = permission(uid=item2_id, credentials=client_key) - store.take_ownership(client_uid2, client_key) - assert store.has_permission(access) - assert store.has_permission(access_root) - assert not store.has_permission(access_hacker) + stash.add_permission(ActionObjectREAD(uid=item2_id, credentials=client_key)) + assert stash.has_permission(access) + assert stash.has_permission(access_root) + assert stash.has_permission(access_new_admin) + assert not stash.has_permission(access_hacker) # delete UID as hacker - access_hacker_ro = ActionObjectREAD(uid=UID(), credentials=hacker_key) - store.add_permission(access_hacker_ro) - res = store.delete(client_uid2, hacker_key) + res = stash.delete_by_uid(hacker_key, item2_id) assert res.is_err() - assert store.has_permission(access) - assert store.has_permission(access_hacker_ro) + assert stash.has_permission(access) + assert stash.has_permission(access_root) + assert stash.has_permission(access_new_admin) + assert not stash.has_permission(access_hacker) # delete UID as owner - res = store.delete(client_uid2, client_key) + res = stash.delete_by_uid(client_key, item2_id) assert res.is_ok() - assert not store.has_permission(access) - assert not store.has_permission(access_hacker) + assert not stash.has_permission(access) + assert stash.has_permission(access_new_admin) + assert not stash.has_permission(access_hacker) @pytest.mark.parametrize( - "store", + "stash", [ - pytest.lazy_fixture("dict_action_store"), - pytest.lazy_fixture("sqlite_action_store"), - pytest.lazy_fixture("mongo_action_store"), + pytest.lazy_fixture("action_object_stash"), ], ) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_action_store_test_data_set_get(store: Any): - client_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_CLIENT) - root_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) +def test_action_store_test_dataset_get(stash: ActionObjectStash) -> None: + client_key = add_user(stash.db, ServiceRole.DATA_SCIENTIST) + root_key = add_user(stash.db, ServiceRole.ADMIN) - access = ActionObjectWRITE(uid=UID(), credentials=client_key) - access_root = ActionObjectWRITE(uid=UID(), credentials=root_key) + data_uid = add_test_object(stash, client_key) + access = ActionObjectWRITE(uid=data_uid, credentials=client_key) + access_root = ActionObjectWRITE(uid=data_uid, credentials=root_key) + read_permission = ActionObjectREAD(uid=data_uid, credentials=client_key) # add permission - store.add_permission(access) + stash.add_permission(access) - assert store.has_permission(access) - assert store.has_permission(access_root) + assert stash.has_permission(access) + assert stash.has_permission(access_root) - # add data - data_uid = UID() - obj = MockSyftObject(data=1) + stash.add_permission(read_permission) + assert stash.has_permission(read_permission) - res = store.set(data_uid, client_key, obj, has_result_read_permission=True) - assert res.is_ok() - res = store.get(data_uid, client_key) - assert res.is_ok() - assert res.ok() == obj - - assert store.exists(data_uid) - res = store.delete(data_uid, client_key) - assert res.is_ok() - res = store.delete(data_uid, client_key) + # check that trying to get action data that doesn't exist returns an error, even if have permissions + stash.delete_by_uid(client_key, data_uid) + res = stash.get(data_uid, client_key) assert res.is_err() diff --git a/packages/syft/tests/syft/stores/base_stash_test.py b/packages/syft/tests/syft/stores/base_stash_test.py index b60fafcfda1..dd5f6aa2a90 100644 --- a/packages/syft/tests/syft/stores/base_stash_test.py +++ b/packages/syft/tests/syft/stores/base_stash_test.py @@ -2,6 +2,7 @@ from collections.abc import Callable from collections.abc import Container import random +import threading from typing import Any from typing import TypeVar @@ -12,14 +13,17 @@ # syft absolute from syft.serde.serializable import serializable -from syft.service.response import SyftSuccess -from syft.store.dict_document_store import DictDocumentStore -from syft.store.document_store import BaseUIDStoreStash -from syft.store.document_store import PartitionKey -from syft.store.document_store import PartitionSettings -from syft.store.document_store import QueryKey -from syft.store.document_store import QueryKeys -from syft.store.document_store import UIDPartitionKey +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey +from syft.service.queue.queue_stash import Status +from syft.service.request.request_service import RequestService +from syft.store.db.sqlite import SQLiteDBConfig +from syft.store.db.sqlite import SQLiteDBManager +from syft.store.db.stash import ObjectStash +from syft.store.document_store_errors import NotFoundException +from syft.store.document_store_errors import StashException +from syft.store.linked_obj import LinkedObject +from syft.types.errors import SyftException from syft.types.syft_object import SyftObject from syft.types.uid import UID @@ -27,37 +31,25 @@ @serializable() class MockObject(SyftObject): __canonical_name__ = "base_stash_mock_object_type" + __version__ = 1 id: UID name: str desc: str importance: int value: int + linked_obj: LinkedObject | None = None + status: Status = Status.CREATED __attr_searchable__ = ["id", "name", "desc", "importance"] __attr_unique__ = ["id", "name"] -NamePartitionKey = PartitionKey(key="name", type_=str) -DescPartitionKey = PartitionKey(key="desc", type_=str) -ImportancePartitionKey = PartitionKey(key="importance", type_=int) - - -class MockStash(BaseUIDStoreStash): - object_type = MockObject - settings = PartitionSettings( - name=MockObject.__canonical_name__, object_type=MockObject - ) +class MockStash(ObjectStash[MockObject]): + pass def get_object_values(obj: SyftObject) -> tuple[Any]: - return tuple(obj.dict().values()) - - -def add_mock_object(root_verify_key, stash: MockStash, obj: MockObject) -> MockObject: - result = stash.set(root_verify_key, obj) - assert result.is_ok() - - return result.ok() + return tuple(obj.to_dict().values()) T = TypeVar("T") @@ -75,9 +67,18 @@ def create_unique( return x +@pytest.fixture +def root_verify_key() -> SyftVerifyKey: + return SyftSigningKey.generate().verify_key + + @pytest.fixture def base_stash(root_verify_key) -> MockStash: - yield MockStash(store=DictDocumentStore(UID(), root_verify_key)) + config = SQLiteDBConfig() + db_manager = SQLiteDBManager(config, UID(), root_verify_key) + mock_stash = MockStash(store=db_manager) + db_manager.init_tables() + yield mock_stash def random_sentence(faker: Faker) -> str: @@ -116,8 +117,7 @@ def mock_objects(faker: Faker) -> list[MockObject]: def test_basestash_set( root_verify_key, base_stash: MockStash, mock_object: MockObject ) -> None: - result = add_mock_object(root_verify_key, base_stash, mock_object) - + result = base_stash.set(root_verify_key, mock_object).unwrap() assert result is not None assert result == mock_object @@ -129,11 +129,10 @@ def test_basestash_set_duplicate( MockObject(**kwargs) for kwargs in multiple_object_kwargs(faker, n=2, same=True) ) - result = base_stash.set(root_verify_key, original) - assert result.is_ok() + base_stash.set(root_verify_key, original).unwrap() - result = base_stash.set(root_verify_key, duplicate) - assert result.is_err() + with pytest.raises(StashException): + base_stash.set(root_verify_key, duplicate).unwrap() def test_basestash_set_duplicate_unique_key( @@ -154,28 +153,19 @@ def test_basestash_set_duplicate_unique_key( def test_basestash_delete( root_verify_key, base_stash: MockStash, mock_object: MockObject ) -> None: - add_mock_object(root_verify_key, base_stash, mock_object) - - result = base_stash.delete( - root_verify_key, UIDPartitionKey.with_obj(mock_object.id) - ) - assert result.is_ok() - - assert len(base_stash.get_all(root_verify_key).ok()) == 0 + base_stash.set(root_verify_key, mock_object).unwrap() + base_stash.delete_by_uid(root_verify_key, mock_object.id).unwrap() + assert len(base_stash.get_all(root_verify_key).unwrap()) == 0 def test_basestash_cannot_delete_non_existent( root_verify_key, base_stash: MockStash, mock_object: MockObject ) -> None: - add_mock_object(root_verify_key, base_stash, mock_object) + result = base_stash.set(root_verify_key, mock_object).unwrap() random_uid = create_unique(UID, [mock_object.id]) - for result in [ - base_stash.delete(root_verify_key, UIDPartitionKey.with_obj(random_uid)), - base_stash.delete_by_uid(root_verify_key, random_uid), - ]: - result = base_stash.delete(root_verify_key, UIDPartitionKey.with_obj(UID())) - assert result.is_err() + result = base_stash.delete_by_uid(root_verify_key, random_uid) + assert result.is_err() assert ( len( @@ -190,7 +180,7 @@ def test_basestash_cannot_delete_non_existent( def test_basestash_update( root_verify_key, base_stash: MockStash, mock_object: MockObject, faker: Faker ) -> None: - add_mock_object(root_verify_key, base_stash, mock_object) + result = base_stash.set(root_verify_key, mock_object).unwrap() updated_obj = mock_object.copy() updated_obj.name = faker.name() @@ -202,10 +192,34 @@ def test_basestash_update( assert retrieved == updated_obj +def test_basestash_upsert( + root_verify_key, base_stash: MockStash, mock_object: MockObject, faker: Faker +) -> None: + base_stash.set(root_verify_key, mock_object).unwrap() + + updated_obj = mock_object.copy() + updated_obj.name = faker.name() + + retrieved = base_stash.upsert(root_verify_key, updated_obj).unwrap() + assert retrieved == updated_obj + + updated_obj.id = UID() + + with pytest.raises(StashException): + # fails because the name should be unique + base_stash.upsert(root_verify_key, updated_obj).unwrap() + + updated_obj.name = faker.name() + + retrieved = base_stash.upsert(root_verify_key, updated_obj).unwrap() + assert retrieved == updated_obj + assert len(base_stash.get_all(root_verify_key).unwrap()) == 2 + + def test_basestash_cannot_update_non_existent( root_verify_key, base_stash: MockStash, mock_object: MockObject, faker: Faker ) -> None: - add_mock_object(root_verify_key, base_stash, mock_object) + result = base_stash.set(root_verify_key, mock_object).unwrap() updated_obj = mock_object.copy() updated_obj.id = create_unique(UID, [mock_object.id]) @@ -224,10 +238,7 @@ def test_basestash_set_get_all( stored_objects = base_stash.get_all( root_verify_key, - ) - assert stored_objects.is_ok() - - stored_objects = stored_objects.ok() + ).unwrap() assert len(stored_objects) == len(mock_objects) stored_objects_values = {get_object_values(obj) for obj in stored_objects} @@ -238,31 +249,40 @@ def test_basestash_set_get_all( def test_basestash_get_by_uid( root_verify_key, base_stash: MockStash, mock_object: MockObject ) -> None: - add_mock_object(root_verify_key, base_stash, mock_object) + result = base_stash.set(root_verify_key, mock_object).unwrap() - result = base_stash.get_by_uid(root_verify_key, mock_object.id) - assert result.is_ok() - assert result.ok() == mock_object + result = base_stash.get_by_uid(root_verify_key, mock_object.id).unwrap() + assert result == mock_object random_uid = create_unique(UID, [mock_object.id]) - result = base_stash.get_by_uid(root_verify_key, random_uid) - assert result.is_ok() - assert result.ok() is None + bad_uid = base_stash.get_by_uid(root_verify_key, random_uid) + assert bad_uid.is_err() + + # FIX: Partition should return Ok(None), now it's not consistent. We can get NotFoundException or StashException + assert ( + isinstance(bad_uid.err(), SyftException) + or isinstance(bad_uid.err(), StashException) + or isinstance(bad_uid.err(), NotFoundException) + ) def test_basestash_delete_by_uid( root_verify_key, base_stash: MockStash, mock_object: MockObject ) -> None: - add_mock_object(root_verify_key, base_stash, mock_object) + result = base_stash.set(root_verify_key, mock_object).unwrap() - result = base_stash.delete_by_uid(root_verify_key, mock_object.id) - assert result.is_ok() - response = result.ok() - assert isinstance(response, SyftSuccess) + response = base_stash.delete_by_uid(root_verify_key, mock_object.id).unwrap() + assert isinstance(response, UID) result = base_stash.get_by_uid(root_verify_key, mock_object.id) - assert result.is_ok() - assert result.ok() is None + assert result.is_err() + + # FIX: partition None returns are inconsistent; here, we might get NotFoundException or StashException + assert ( + isinstance(result.err(), SyftException) + or isinstance(result.err(), StashException) + or isinstance(result.err(), NotFoundException) + ) def test_basestash_query_one( @@ -272,43 +292,73 @@ def test_basestash_query_one( base_stash.set(root_verify_key, obj) obj = random.choice(mock_objects) + result = base_stash.get_one( + root_verify_key, + filters={"name": obj.name}, + ).unwrap() - for result in ( - base_stash.query_one_kwargs(root_verify_key, name=obj.name), - base_stash.query_one( - root_verify_key, QueryKey.from_obj(NamePartitionKey, obj.name) - ), - ): - assert result.is_ok() - assert result.ok() == obj + assert result == obj existing_names = {obj.name for obj in mock_objects} random_name = create_unique(faker.name, existing_names) - for result in ( - base_stash.query_one_kwargs(root_verify_key, name=random_name), - base_stash.query_one( - root_verify_key, QueryKey.from_obj(NamePartitionKey, random_name) - ), - ): - assert result.is_ok() - assert result.ok() is None + with pytest.raises(NotFoundException): + result = base_stash.get_one( + root_verify_key, + filters={"name": random_name}, + ).unwrap() params = {"name": obj.name, "desc": obj.desc} - for result in [ - base_stash.query_one_kwargs(root_verify_key, **params), - base_stash.query_one(root_verify_key, QueryKeys.from_dict(params)), - ]: - assert result.is_ok() - assert result.ok() == obj + result = base_stash.get_one( + root_verify_key, + filters=params, + ).unwrap() + assert result == obj params = {"name": random_name, "desc": random_sentence(faker)} - for result in [ - base_stash.query_one_kwargs(root_verify_key, **params), - base_stash.query_one(root_verify_key, QueryKeys.from_dict(params)), - ]: - assert result.is_ok() - assert result.ok() is None + with pytest.raises(NotFoundException): + result = base_stash.get_one( + root_verify_key, + filters=params, + ).unwrap() + + +def test_basestash_query_enum( + root_verify_key, base_stash: MockStash, mock_object: MockObject +) -> None: + base_stash.set(root_verify_key, mock_object).unwrap() + result = base_stash.get_one( + root_verify_key, + filters={"status": Status.CREATED}, + ).unwrap() + + assert result == mock_object + with pytest.raises(NotFoundException): + result = base_stash.get_one( + root_verify_key, + filters={"status": Status.PROCESSING}, + ).unwrap() + + +def test_basestash_query_linked_obj( + root_verify_key, base_stash: MockStash, mock_object: MockObject +) -> None: + mock_object.linked_obj = LinkedObject( + object_type=MockObject, + object_uid=UID(), + id=UID(), + tags=["tag1", "tag2"], + server_uid=UID(), + service_type=RequestService, + ) + base_stash.set(root_verify_key, mock_object).unwrap() + + result = base_stash.get_one( + root_verify_key, + filters={"linked_obj.id": mock_object.linked_obj.id}, + ).unwrap() + + assert result == mock_object def test_basestash_query_all( @@ -323,46 +373,30 @@ def test_basestash_query_all( for obj in all_objects: base_stash.set(root_verify_key, obj) - for result in [ - base_stash.query_all_kwargs(root_verify_key, desc=desc), - base_stash.query_all( - root_verify_key, QueryKey.from_obj(DescPartitionKey, desc) - ), - ]: - assert result.is_ok() - objects = result.ok() - assert len(objects) == n_same - assert all(obj.desc == desc for obj in objects) - original_object_values = {get_object_values(obj) for obj in similar_objects} - retrived_objects_values = {get_object_values(obj) for obj in objects} - assert original_object_values == retrived_objects_values + objects = base_stash.get_all(root_verify_key, filters={"desc": desc}).unwrap() + assert len(objects) == n_same + assert all(obj.desc == desc for obj in objects) + original_object_values = {get_object_values(obj) for obj in similar_objects} + retrived_objects_values = {get_object_values(obj) for obj in objects} + assert original_object_values == retrived_objects_values random_desc = create_unique( random_sentence, [obj.desc for obj in all_objects], faker ) - for result in [ - base_stash.query_all_kwargs(root_verify_key, desc=random_desc), - base_stash.query_all( - root_verify_key, QueryKey.from_obj(DescPartitionKey, random_desc) - ), - ]: - assert result.is_ok() - objects = result.ok() - assert len(objects) == 0 + + objects = base_stash.get_all( + root_verify_key, filters={"desc": random_desc} + ).unwrap() + assert len(objects) == 0 obj = random.choice(similar_objects) params = {"name": obj.name, "desc": obj.desc} - for result in [ - base_stash.query_all_kwargs(root_verify_key, **params), - base_stash.query_all(root_verify_key, QueryKeys.from_dict(params)), - ]: - assert result.is_ok() - objects = result.ok() - assert len(objects) == sum( - 1 for obj_ in all_objects if (obj_.name, obj_.desc) == (obj.name, obj.desc) - ) - assert objects[0] == obj + objects = base_stash.get_all(root_verify_key, filters=params).unwrap() + assert len(objects) == sum( + 1 for obj_ in all_objects if (obj_.name, obj_.desc) == (obj.name, obj.desc) + ) + assert objects[0] == obj def test_basestash_query_all_kwargs_multiple_params( @@ -381,66 +415,35 @@ def test_basestash_query_all_kwargs_multiple_params( base_stash.set(root_verify_key, obj) params = {"importance": importance, "desc": desc} - for result in [ - base_stash.query_all_kwargs(root_verify_key, **params), - base_stash.query_all(root_verify_key, QueryKeys.from_dict(params)), - ]: - assert result.is_ok() - objects = result.ok() - assert len(objects) == n_same - assert all(obj.desc == desc for obj in objects) - original_object_values = {get_object_values(obj) for obj in similar_objects} - retrived_objects_values = {get_object_values(obj) for obj in objects} - assert original_object_values == retrived_objects_values + objects = base_stash.get_all(root_verify_key, filters=params).unwrap() + assert len(objects) == n_same + assert all(obj.desc == desc for obj in objects) + original_object_values = {get_object_values(obj) for obj in similar_objects} + retrived_objects_values = {get_object_values(obj) for obj in objects} + assert original_object_values == retrived_objects_values params = { "name": create_unique(faker.name, [obj.name for obj in all_objects]), "desc": random_sentence(faker), } - for result in [ - base_stash.query_all_kwargs(root_verify_key, **params), - base_stash.query_all(root_verify_key, QueryKeys.from_dict(params)), - ]: - assert result.is_ok() - objects = result.ok() - assert len(objects) == 0 + objects = base_stash.get_all(root_verify_key, filters=params).unwrap() + assert len(objects) == 0 obj = random.choice(similar_objects) params = {"id": obj.id, "name": obj.name, "desc": obj.desc} - for result in [ - base_stash.query_all_kwargs(root_verify_key, **params), - base_stash.query_all(root_verify_key, QueryKeys.from_dict(params)), - ]: - assert result.is_ok() - objects = result.ok() - assert len(objects) == 1 - assert objects[0] == obj + objects = base_stash.get_all(root_verify_key, filters=params).unwrap() + assert len(objects) == 1 + assert objects[0] == obj -def test_basestash_cannot_query_non_searchable( - root_verify_key, base_stash: MockStash, mock_objects: list[MockObject] +def test_stash_thread_support( + root_verify_key, base_stash: MockStash, mock_object: MockObject ) -> None: - for obj in mock_objects: - base_stash.set(root_verify_key, obj) - - obj = random.choice(mock_objects) + assert not base_stash._data + t = threading.Thread(target=base_stash.set, args=(root_verify_key, mock_object)) + t.start() + t.join(timeout=5) - assert base_stash.query_one_kwargs(root_verify_key, value=10).is_err() - assert base_stash.query_all_kwargs(root_verify_key, value=10).is_err() - assert base_stash.query_one_kwargs( - root_verify_key, value=10, name=obj.name - ).is_err() - assert base_stash.query_all_kwargs( - root_verify_key, value=10, name=obj.name - ).is_err() - - ValuePartitionKey = PartitionKey(key="value", type_=int) - qk = ValuePartitionKey.with_obj(10) - - assert base_stash.query_one(root_verify_key, qk).is_err() - assert base_stash.query_all(root_verify_key, qk).is_err() - assert base_stash.query_all(root_verify_key, QueryKeys(qks=[qk])).is_err() - assert base_stash.query_all( - root_verify_key, QueryKeys(qks=[qk, UIDPartitionKey.with_obj(obj.id)]) - ).is_err() + result = base_stash.get_by_uid(root_verify_key, mock_object.id).unwrap() + assert result == mock_object diff --git a/packages/syft/tests/syft/stores/dict_document_store_test.py b/packages/syft/tests/syft/stores/dict_document_store_test.py deleted file mode 100644 index e04414d666c..00000000000 --- a/packages/syft/tests/syft/stores/dict_document_store_test.py +++ /dev/null @@ -1,358 +0,0 @@ -# stdlib -from threading import Thread - -# syft absolute -from syft.store.dict_document_store import DictStorePartition -from syft.store.document_store import QueryKeys -from syft.types.uid import UID - -# relative -from .store_mocks_test import MockObjectType -from .store_mocks_test import MockSyftObject - - -def test_dict_store_partition_sanity(dict_store_partition: DictStorePartition) -> None: - res = dict_store_partition.init_store() - assert res.is_ok() - - assert hasattr(dict_store_partition, "data") - assert hasattr(dict_store_partition, "unique_keys") - assert hasattr(dict_store_partition, "searchable_keys") - - -def test_dict_store_partition_set( - root_verify_key, dict_store_partition: DictStorePartition -) -> None: - res = dict_store_partition.init_store() - assert res.is_ok() - - obj = MockSyftObject(id=UID(), data=1) - res = dict_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - - assert res.is_ok() - assert res.ok() == obj - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - res = dict_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_err() - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - res = dict_store_partition.set(root_verify_key, obj, ignore_duplicates=True) - assert res.is_ok() - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - obj2 = MockSyftObject(data=2) - res = dict_store_partition.set(root_verify_key, obj2, ignore_duplicates=False) - assert res.is_ok() - assert res.ok() == obj2 - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 2 - ) - - repeats = 5 - for idx in range(repeats): - obj = MockSyftObject(data=idx) - res = dict_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_ok() - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 3 + idx - ) - - -def test_dict_store_partition_delete( - root_verify_key, dict_store_partition: DictStorePartition -) -> None: - res = dict_store_partition.init_store() - assert res.is_ok() - - objs = [] - repeats = 5 - for v in range(repeats): - obj = MockSyftObject(data=v) - dict_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - objs.append(obj) - - assert len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) == len(objs) - - # random object - obj = MockSyftObject(data="bogus") - key = dict_store_partition.settings.store_key.with_obj(obj) - res = dict_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) == len(objs) - - # cleanup store - for idx, v in enumerate(objs): - key = dict_store_partition.settings.store_key.with_obj(v) - res = dict_store_partition.delete(root_verify_key, key) - assert res.is_ok() - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == len(objs) - idx - 1 - ) - - res = dict_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == len(objs) - idx - 1 - ) - - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 0 - ) - - -def test_dict_store_partition_update( - root_verify_key, dict_store_partition: DictStorePartition -) -> None: - dict_store_partition.init_store() - - # add item - obj = MockSyftObject(data=1) - dict_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert len(dict_store_partition.all(root_verify_key).ok()) == 1 - - # fail to update missing keys - rand_obj = MockSyftObject(data="bogus") - key = dict_store_partition.settings.store_key.with_obj(rand_obj) - res = dict_store_partition.update(root_verify_key, key, obj) - assert res.is_err() - - # update the key multiple times - repeats = 5 - for v in range(repeats): - key = dict_store_partition.settings.store_key.with_obj(obj) - obj_new = MockSyftObject(data=v) - - res = dict_store_partition.update(root_verify_key, key, obj_new) - assert res.is_ok() - - # The ID should stay the same on update, unly the values are updated. - assert ( - len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - assert ( - dict_store_partition.all( - root_verify_key, - ) - .ok()[0] - .id - == obj.id - ) - assert ( - dict_store_partition.all( - root_verify_key, - ) - .ok()[0] - .id - != obj_new.id - ) - assert ( - dict_store_partition.all( - root_verify_key, - ) - .ok()[0] - .data - == v - ) - - stored = dict_store_partition.get_all_from_store( - root_verify_key, QueryKeys(qks=[key]) - ) - assert stored.ok()[0].data == v - - -def test_dict_store_partition_set_multithreaded( - root_verify_key, - dict_store_partition: DictStorePartition, -) -> None: - thread_cnt = 3 - repeats = 5 - - dict_store_partition.init_store() - - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for idx in range(repeats): - obj = MockObjectType(data=idx) - - for _ in range(10): - res = dict_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - stored_cnt = len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - assert stored_cnt == repeats * thread_cnt - - -def test_dict_store_partition_update_multithreaded( - root_verify_key, - dict_store_partition: DictStorePartition, -) -> None: - thread_cnt = 3 - repeats = 5 - dict_store_partition.init_store() - - obj = MockSyftObject(data=0) - key = dict_store_partition.settings.store_key.with_obj(obj) - dict_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for repeat in range(repeats): - obj = MockSyftObject(data=repeat) - - for _ in range(10): - res = dict_store_partition.update(root_verify_key, key, obj) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - -def test_dict_store_partition_set_delete_multithreaded( - root_verify_key, - dict_store_partition: DictStorePartition, -) -> None: - dict_store_partition.init_store() - - thread_cnt = 3 - repeats = 5 - - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for idx in range(repeats): - obj = MockSyftObject(data=idx) - - for _ in range(10): - res = dict_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - key = dict_store_partition.settings.store_key.with_obj(obj) - - res = dict_store_partition.delete(root_verify_key, key) - if res.is_err(): - execution_err = res - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - stored_cnt = len( - dict_store_partition.all( - root_verify_key, - ).ok() - ) - assert stored_cnt == 0 diff --git a/packages/syft/tests/syft/stores/kv_document_store_test.py b/packages/syft/tests/syft/stores/kv_document_store_test.py deleted file mode 100644 index e2e6e3bb2a9..00000000000 --- a/packages/syft/tests/syft/stores/kv_document_store_test.py +++ /dev/null @@ -1,319 +0,0 @@ -# stdlib -from copy import copy -from threading import Thread - -# third party -import pytest - -# syft absolute -from syft.store.document_store import PartitionSettings -from syft.store.document_store import QueryKeys -from syft.store.kv_document_store import KeyValueStorePartition -from syft.types.uid import UID - -# relative -from .store_mocks_test import MockObjectType -from .store_mocks_test import MockStoreConfig -from .store_mocks_test import MockSyftObject - - -@pytest.fixture -def kv_store_partition(worker): - store_config = MockStoreConfig() - settings = PartitionSettings(name="test", object_type=MockObjectType) - store = KeyValueStorePartition( - node_uid=worker.id, - root_verify_key=worker.root_client.credentials.verify_key, - settings=settings, - store_config=store_config, - ) - - res = store.init_store() - assert res.is_ok() - - yield store - - -def test_kv_store_partition_sanity(kv_store_partition: KeyValueStorePartition) -> None: - assert hasattr(kv_store_partition, "data") - assert hasattr(kv_store_partition, "unique_keys") - assert hasattr(kv_store_partition, "searchable_keys") - - -def test_kv_store_partition_init_failed(root_verify_key) -> None: - store_config = MockStoreConfig(is_crashed=True) - settings = PartitionSettings(name="test", object_type=MockObjectType) - - kv_store_partition = KeyValueStorePartition( - UID(), root_verify_key, settings=settings, store_config=store_config - ) - - res = kv_store_partition.init_store() - assert res.is_err() - - -def test_kv_store_partition_set( - root_verify_key, kv_store_partition: KeyValueStorePartition -) -> None: - obj = MockSyftObject(data=1) - res = kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - - assert res.is_ok() - assert res.ok() == obj - assert len(kv_store_partition.all(root_verify_key).ok()) == 1 - - res = kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_err() - assert len(kv_store_partition.all(root_verify_key).ok()) == 1 - - res = kv_store_partition.set(root_verify_key, obj, ignore_duplicates=True) - assert res.is_ok() - assert len(kv_store_partition.all(root_verify_key).ok()) == 1 - - obj2 = MockSyftObject(data=2) - res = kv_store_partition.set(root_verify_key, obj2, ignore_duplicates=False) - assert res.is_ok() - assert res.ok() == obj2 - assert len(kv_store_partition.all(root_verify_key).ok()) == 2 - - -def test_kv_store_partition_set_backend_fail(root_verify_key) -> None: - store_config = MockStoreConfig(is_crashed=True) - settings = PartitionSettings(name="test", object_type=MockObjectType) - - kv_store_partition = KeyValueStorePartition( - UID(), root_verify_key, settings=settings, store_config=store_config - ) - kv_store_partition.init_store() - - obj = MockSyftObject(data=1) - - res = kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_err() - - -def test_kv_store_partition_delete( - root_verify_key, worker, kv_store_partition: KeyValueStorePartition -) -> None: - objs = [] - for v in range(10): - obj = MockSyftObject(data=v) - kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - objs.append(obj) - - assert len(kv_store_partition.all(root_verify_key).ok()) == len(objs) - - # can't delete a random object since it was not added - obj = MockSyftObject(data="bogus") - key = kv_store_partition.settings.store_key.with_obj(obj) - res = kv_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert len(kv_store_partition.all(root_verify_key).ok()) == len(objs) - - # cleanup store - for idx, v in enumerate(objs): - key = kv_store_partition.settings.store_key.with_obj(v) - res = kv_store_partition.delete(root_verify_key, key) - assert res.is_ok() - assert len(kv_store_partition.all(root_verify_key).ok()) == len(objs) - idx - 1 - # check that the corresponding permissions were also deleted - assert ( - len(kv_store_partition.data) - == len(kv_store_partition.permissions) - == len(kv_store_partition.storage_permissions) - ) - - res = kv_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert len(kv_store_partition.all(root_verify_key).ok()) == len(objs) - idx - 1 - assert ( - len(kv_store_partition.data) - == len(kv_store_partition.permissions) - == len(kv_store_partition.storage_permissions) - ) - - assert len(kv_store_partition.all(root_verify_key).ok()) == 0 - - -def test_kv_store_partition_delete_and_recreate( - root_verify_key, worker, kv_store_partition: KeyValueStorePartition -) -> None: - obj = MockSyftObject(data="bogus") - repeats = 5 - # running it multiple items ensures we can recreate it again once its delete from store. - for _ in range(repeats): - # Add an object - kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - - assert len(kv_store_partition.all(root_verify_key).ok()) == 1 - - # Delete object - key = kv_store_partition.settings.store_key.with_obj(obj) - res = kv_store_partition.delete(root_verify_key, key) - - assert res.is_ok() - assert len(kv_store_partition.all(root_verify_key).ok()) == 0 - assert len(kv_store_partition.data) == len(kv_store_partition.permissions) - - assert len(kv_store_partition.all(root_verify_key).ok()) == 0 - - -def test_kv_store_partition_update( - root_verify_key, kv_store_partition: KeyValueStorePartition -) -> None: - # add item - obj = MockSyftObject(data=1) - kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert len(kv_store_partition.all(root_verify_key).ok()) == 1 - - # fail to update missing keys - rand_obj = MockSyftObject(data="bogus") - key = kv_store_partition.settings.store_key.with_obj(rand_obj) - res = kv_store_partition.update(root_verify_key, key, obj) - assert res.is_err() - - # update the key multiple times - repeats = 5 - for v in range(repeats): - key = kv_store_partition.settings.store_key.with_obj(obj) - obj_new = MockSyftObject(data=v) - - res = kv_store_partition.update(root_verify_key, key, copy(obj_new)) - assert res.is_ok() - - # The ID should stay the same on update, unly the values are updated. - assert len(kv_store_partition.all(root_verify_key).ok()) == 1 - assert kv_store_partition.all(root_verify_key).ok()[0].id == obj.id - assert kv_store_partition.all(root_verify_key).ok()[0].id != obj_new.id - assert kv_store_partition.all(root_verify_key).ok()[0].data == v - - stored = kv_store_partition.get_all_from_store( - root_verify_key, QueryKeys(qks=[key]) - ) - assert stored.ok()[0].data == v - - -def test_kv_store_partition_set_multithreaded( - root_verify_key, - kv_store_partition: KeyValueStorePartition, -) -> None: - thread_cnt = 3 - repeats = 5 - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for idx in range(repeats): - obj = MockSyftObject(data=idx) - - for _ in range(10): - res = kv_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - stored = kv_store_partition.all(root_verify_key) - - assert execution_err is None - stored_cnt = len(stored.ok()) - assert stored_cnt == thread_cnt * repeats - - -def test_kv_store_partition_update_multithreaded( - root_verify_key, - kv_store_partition: KeyValueStorePartition, -) -> None: - thread_cnt = 3 - repeats = 5 - - obj = MockSyftObject(data=0) - key = kv_store_partition.settings.store_key.with_obj(obj) - kv_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for repeat in range(repeats): - obj = MockSyftObject(data=repeat) - - for _ in range(10): - res = kv_store_partition.update(root_verify_key, key, obj) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - -def test_kv_store_partition_set_delete_multithreaded( - root_verify_key, - kv_store_partition: KeyValueStorePartition, -) -> None: - thread_cnt = 3 - repeats = 5 - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for idx in range(repeats): - obj = MockSyftObject(data=idx) - - for _ in range(10): - res = kv_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - key = kv_store_partition.settings.store_key.with_obj(obj) - - res = kv_store_partition.delete(root_verify_key, key) - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - stored_cnt = len(kv_store_partition.all(root_verify_key).ok()) - assert stored_cnt == 0 diff --git a/packages/syft/tests/syft/stores/mongo_document_store_test.py b/packages/syft/tests/syft/stores/mongo_document_store_test.py deleted file mode 100644 index edf6f17e27b..00000000000 --- a/packages/syft/tests/syft/stores/mongo_document_store_test.py +++ /dev/null @@ -1,1001 +0,0 @@ -# stdlib -from secrets import token_hex -from threading import Thread - -# third party -from pymongo.collection import Collection as MongoCollection -import pytest -from result import Err - -# syft absolute -from syft.node.credentials import SyftVerifyKey -from syft.service.action.action_permissions import ActionObjectPermission -from syft.service.action.action_permissions import ActionPermission -from syft.service.action.action_store import ActionObjectEXECUTE -from syft.service.action.action_store import ActionObjectOWNER -from syft.service.action.action_store import ActionObjectREAD -from syft.service.action.action_store import ActionObjectWRITE -from syft.store.document_store import PartitionSettings -from syft.store.document_store import QueryKey -from syft.store.document_store import QueryKeys -from syft.store.mongo_client import MongoStoreClientConfig -from syft.store.mongo_document_store import MongoStoreConfig -from syft.store.mongo_document_store import MongoStorePartition -from syft.types.uid import UID - -# relative -from .store_constants_test import TEST_VERIFY_KEY_STRING_HACKER -from .store_fixtures_test import mongo_store_partition_fn -from .store_mocks_test import MockObjectType -from .store_mocks_test import MockSyftObject - -PERMISSIONS = [ - ActionObjectOWNER, - ActionObjectREAD, - ActionObjectWRITE, - ActionObjectEXECUTE, -] - - -def test_mongo_store_partition_sanity( - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - - assert hasattr(mongo_store_partition, "_collection") - assert hasattr(mongo_store_partition, "_permissions") - - -@pytest.mark.skip(reason="Test gets stuck at store.init_store()") -def test_mongo_store_partition_init_failed(root_verify_key) -> None: - # won't connect - mongo_config = MongoStoreClientConfig( - connectTimeoutMS=1, - timeoutMS=1, - ) - - store_config = MongoStoreConfig(client_config=mongo_config) - settings = PartitionSettings(name="test", object_type=MockObjectType) - - store = MongoStorePartition( - UID(), root_verify_key, settings=settings, store_config=store_config - ) - - res = store.init_store() - assert res.is_err() - - -def test_mongo_store_partition_set( - root_verify_key, mongo_store_partition: MongoStorePartition -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - - obj = MockSyftObject(data=1) - - res = mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - - assert res.is_ok() - assert res.ok() == obj - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - res = mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_err() - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - res = mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=True) - assert res.is_ok() - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - obj2 = MockSyftObject(data=2) - res = mongo_store_partition.set(root_verify_key, obj2, ignore_duplicates=False) - assert res.is_ok() - assert res.ok() == obj2 - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 2 - ) - - repeats = 5 - for idx in range(repeats): - obj = MockSyftObject(data=idx) - res = mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_ok() - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 3 + idx - ) - - -def test_mongo_store_partition_delete( - root_verify_key, - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - repeats = 5 - - objs = [] - for v in range(repeats): - obj = MockSyftObject(data=v) - mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - objs.append(obj) - - assert len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) == len(objs) - - # random object - obj = MockSyftObject(data="bogus") - key = mongo_store_partition.settings.store_key.with_obj(obj) - res = mongo_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) == len(objs) - - # cleanup store - for idx, v in enumerate(objs): - key = mongo_store_partition.settings.store_key.with_obj(v) - res = mongo_store_partition.delete(root_verify_key, key) - assert res.is_ok() - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == len(objs) - idx - 1 - ) - - res = mongo_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == len(objs) - idx - 1 - ) - - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 0 - ) - - -def test_mongo_store_partition_update( - root_verify_key, - mongo_store_partition: MongoStorePartition, -) -> None: - mongo_store_partition.init_store() - - # add item - obj = MockSyftObject(data=1) - mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - # fail to update missing keys - rand_obj = MockSyftObject(data="bogus") - key = mongo_store_partition.settings.store_key.with_obj(rand_obj) - res = mongo_store_partition.update(root_verify_key, key, obj) - assert res.is_err() - - # update the key multiple times - repeats = 5 - for v in range(repeats): - key = mongo_store_partition.settings.store_key.with_obj(obj) - obj_new = MockSyftObject(data=v) - - res = mongo_store_partition.update(root_verify_key, key, obj_new) - assert res.is_ok() - - # The ID should stay the same on update, only the values are updated. - assert ( - len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - assert ( - mongo_store_partition.all( - root_verify_key, - ) - .ok()[0] - .id - == obj.id - ) - assert ( - mongo_store_partition.all( - root_verify_key, - ) - .ok()[0] - .id - != obj_new.id - ) - assert ( - mongo_store_partition.all( - root_verify_key, - ) - .ok()[0] - .data - == v - ) - - stored = mongo_store_partition.get_all_from_store( - root_verify_key, QueryKeys(qks=[key]) - ) - assert stored.ok()[0].data == v - - -def test_mongo_store_partition_set_threading(root_verify_key, mongo_client) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - mongo_db_name = token_hex(8) - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - - mongo_store_partition = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - ) - for idx in range(repeats): - obj = MockObjectType(data=idx) - - for _ in range(10): - res = mongo_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok(), res - - return execution_err - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - mongo_store_partition = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - ) - stored_cnt = len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - assert stored_cnt == thread_cnt * repeats - - -# @pytest.mark.skip( -# reason="PicklingError: Could not pickle the task to send it to the workers." -# ) -# def test_mongo_store_partition_set_joblib( -# root_verify_key, -# mongo_client, -# ) -> None: -# thread_cnt = 3 -# repeats = 5 -# mongo_db_name = token_hex(8) - -# def _kv_cbk(tid: int) -> None: -# for idx in range(repeats): -# mongo_store_partition = mongo_store_partition_fn( -# mongo_client, -# root_verify_key, -# mongo_db_name=mongo_db_name, -# ) -# obj = MockObjectType(data=idx) - -# for _ in range(10): -# res = mongo_store_partition.set( -# root_verify_key, obj, ignore_duplicates=False -# ) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res - -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) - -# for execution_err in errs: -# assert execution_err is None - -# mongo_store_partition = mongo_store_partition_fn( -# mongo_client, -# root_verify_key, -# mongo_db_name=mongo_db_name, -# ) -# stored_cnt = len( -# mongo_store_partition.all( -# root_verify_key, -# ).ok() -# ) -# assert stored_cnt == thread_cnt * repeats - - -def test_mongo_store_partition_update_threading( - root_verify_key, - mongo_client, -) -> None: - thread_cnt = 3 - repeats = 5 - - mongo_db_name = token_hex(8) - mongo_store_partition = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - ) - - obj = MockSyftObject(data=0) - key = mongo_store_partition.settings.store_key.with_obj(obj) - mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - - mongo_store_partition_local = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - ) - for repeat in range(repeats): - obj = MockSyftObject(data=repeat) - - for _ in range(10): - res = mongo_store_partition_local.update(root_verify_key, key, obj) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok(), res - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - -# @pytest.mark.skip( -# reason="PicklingError: Could not pickle the task to send it to the workers." -# ) -# def test_mongo_store_partition_update_joblib(root_verify_key, mongo_client) -> None: -# thread_cnt = 3 -# repeats = 5 - -# mongo_db_name = token_hex(8) - -# mongo_store_partition = mongo_store_partition_fn( -# mongo_client, -# root_verify_key, -# mongo_db_name=mongo_db_name, -# ) -# obj = MockSyftObject(data=0) -# key = mongo_store_partition.settings.store_key.with_obj(obj) -# mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - -# def _kv_cbk(tid: int) -> None: -# mongo_store_partition_local = mongo_store_partition_fn( -# mongo_client, -# root_verify_key, -# mongo_db_name=mongo_db_name, -# ) -# for repeat in range(repeats): -# obj = MockSyftObject(data=repeat) - -# for _ in range(10): -# res = mongo_store_partition_local.update(root_verify_key, key, obj) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) - -# for execution_err in errs: -# assert execution_err is None - - -def test_mongo_store_partition_set_delete_threading( - root_verify_key, - mongo_client, -) -> None: - thread_cnt = 3 - repeats = 5 - execution_err = None - mongo_db_name = token_hex(8) - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - mongo_store_partition = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - ) - - for idx in range(repeats): - obj = MockSyftObject(data=idx) - - for _ in range(10): - res = mongo_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - key = mongo_store_partition.settings.store_key.with_obj(obj) - - res = mongo_store_partition.delete(root_verify_key, key) - if res.is_err(): - execution_err = res - assert res.is_ok(), res - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - mongo_store_partition = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - ) - stored_cnt = len( - mongo_store_partition.all( - root_verify_key, - ).ok() - ) - assert stored_cnt == 0 - - -# @pytest.mark.skip( -# reason="PicklingError: Could not pickle the task to send it to the workers." -# ) -# def test_mongo_store_partition_set_delete_joblib(root_verify_key, mongo_client) -> None: -# thread_cnt = 3 -# repeats = 5 -# mongo_db_name = token_hex(8) - -# def _kv_cbk(tid: int) -> None: -# mongo_store_partition = mongo_store_partition_fn( -# mongo_client, root_verify_key, mongo_db_name=mongo_db_name -# ) - -# for idx in range(repeats): -# obj = MockSyftObject(data=idx) - -# for _ in range(10): -# res = mongo_store_partition.set( -# root_verify_key, obj, ignore_duplicates=False -# ) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res - -# key = mongo_store_partition.settings.store_key.with_obj(obj) - -# res = mongo_store_partition.delete(root_verify_key, key) -# if res.is_err(): -# return res -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) -# for execution_err in errs: -# assert execution_err is None - -# mongo_store_partition = mongo_store_partition_fn( -# mongo_client, -# root_verify_key, -# mongo_db_name=mongo_db_name, -# ) -# stored_cnt = len( -# mongo_store_partition.all( -# root_verify_key, -# ).ok() -# ) -# assert stored_cnt == 0 - - -def test_mongo_store_partition_permissions_collection( - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - - collection_permissions_status = mongo_store_partition.permissions - assert not collection_permissions_status.is_err() - collection_permissions = collection_permissions_status.ok() - assert isinstance(collection_permissions, MongoCollection) - - -def test_mongo_store_partition_add_remove_permission( - root_verify_key: SyftVerifyKey, mongo_store_partition: MongoStorePartition -) -> None: - """ - Test the add_permission and remove_permission functions of MongoStorePartition - """ - # setting up - res = mongo_store_partition.init_store() - assert res.is_ok() - permissions_collection: MongoCollection = mongo_store_partition.permissions.ok() - obj = MockSyftObject(data=1) - - # add the first permission - obj_read_permission = ActionObjectPermission( - uid=obj.id, permission=ActionPermission.READ, credentials=root_verify_key - ) - mongo_store_partition.add_permission(obj_read_permission) - find_res_1 = permissions_collection.find_one({"_id": obj_read_permission.uid}) - assert find_res_1 is not None - assert len(find_res_1["permissions"]) == 1 - assert find_res_1["permissions"] == { - obj_read_permission.permission_string, - } - - # add the second permission - obj_write_permission = ActionObjectPermission( - uid=obj.id, permission=ActionPermission.WRITE, credentials=root_verify_key - ) - mongo_store_partition.add_permission(obj_write_permission) - - find_res_2 = permissions_collection.find_one({"_id": obj.id}) - assert find_res_2 is not None - assert len(find_res_2["permissions"]) == 2 - assert find_res_2["permissions"] == { - obj_read_permission.permission_string, - obj_write_permission.permission_string, - } - - # add duplicated permission - mongo_store_partition.add_permission(obj_write_permission) - find_res_3 = permissions_collection.find_one({"_id": obj.id}) - assert len(find_res_3["permissions"]) == 2 - assert find_res_3["permissions"] == find_res_2["permissions"] - - # remove the write permission - mongo_store_partition.remove_permission(obj_write_permission) - find_res_4 = permissions_collection.find_one({"_id": obj.id}) - assert len(find_res_4["permissions"]) == 1 - assert find_res_1["permissions"] == { - obj_read_permission.permission_string, - } - - # remove a non-existent permission - remove_res = mongo_store_partition.remove_permission( - ActionObjectPermission( - uid=obj.id, permission=ActionPermission.OWNER, credentials=root_verify_key - ) - ) - assert isinstance(remove_res, Err) - find_res_5 = permissions_collection.find_one({"_id": obj.id}) - assert len(find_res_5["permissions"]) == 1 - assert find_res_1["permissions"] == { - obj_read_permission.permission_string, - } - - # there is only one permission object - assert permissions_collection.count_documents({}) == 1 - - # add permissions in a loop - new_permissions = [] - repeats = 5 - for idx in range(1, repeats + 1): - new_obj = MockSyftObject(data=idx) - new_obj_read_permission = ActionObjectPermission( - uid=new_obj.id, - permission=ActionPermission.READ, - credentials=root_verify_key, - ) - new_permissions.append(new_obj_read_permission) - mongo_store_partition.add_permission(new_obj_read_permission) - assert permissions_collection.count_documents({}) == 1 + idx - - # remove all the permissions added in the loop - for permission in new_permissions: - mongo_store_partition.remove_permission(permission) - - assert permissions_collection.count_documents({}) == 1 - - -def test_mongo_store_partition_add_permissions( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - permissions_collection: MongoCollection = mongo_store_partition.permissions.ok() - obj = MockSyftObject(data=1) - - # add multiple permissions for the first object - permission_1 = ActionObjectPermission( - uid=obj.id, permission=ActionPermission.WRITE, credentials=root_verify_key - ) - permission_2 = ActionObjectPermission( - uid=obj.id, permission=ActionPermission.OWNER, credentials=root_verify_key - ) - permission_3 = ActionObjectPermission( - uid=obj.id, permission=ActionPermission.READ, credentials=guest_verify_key - ) - permissions: list[ActionObjectPermission] = [ - permission_1, - permission_2, - permission_3, - ] - mongo_store_partition.add_permissions(permissions) - - # check if the permissions have been added properly - assert permissions_collection.count_documents({}) == 1 - find_res = permissions_collection.find_one({"_id": obj.id}) - assert find_res is not None - assert len(find_res["permissions"]) == 3 - - # add permissions for the second object - obj_2 = MockSyftObject(data=2) - permission_4 = ActionObjectPermission( - uid=obj_2.id, permission=ActionPermission.READ, credentials=root_verify_key - ) - permission_5 = ActionObjectPermission( - uid=obj_2.id, permission=ActionPermission.WRITE, credentials=root_verify_key - ) - mongo_store_partition.add_permissions([permission_4, permission_5]) - - assert permissions_collection.count_documents({}) == 2 - find_res_2 = permissions_collection.find_one({"_id": obj_2.id}) - assert find_res_2 is not None - assert len(find_res_2["permissions"]) == 2 - - -@pytest.mark.parametrize("permission", PERMISSIONS) -def test_mongo_store_partition_has_permission( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, - permission: ActionObjectPermission, -) -> None: - hacker_verify_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) - - res = mongo_store_partition.init_store() - assert res.is_ok() - - # root permission - obj = MockSyftObject(data=1) - permission_root = permission(uid=obj.id, credentials=root_verify_key) - permission_client = permission(uid=obj.id, credentials=guest_verify_key) - permission_hacker = permission(uid=obj.id, credentials=hacker_verify_key) - mongo_store_partition.add_permission(permission_root) - # only the root user has access to this permission - assert mongo_store_partition.has_permission(permission_root) - assert not mongo_store_partition.has_permission(permission_client) - assert not mongo_store_partition.has_permission(permission_hacker) - - # client permission for another object - obj_2 = MockSyftObject(data=2) - permission_client_2 = permission(uid=obj_2.id, credentials=guest_verify_key) - permission_root_2 = permission(uid=obj_2.id, credentials=root_verify_key) - permisson_hacker_2 = permission(uid=obj_2.id, credentials=hacker_verify_key) - mongo_store_partition.add_permission(permission_client_2) - # the root (admin) and guest client should have this permission - assert mongo_store_partition.has_permission(permission_root_2) - assert mongo_store_partition.has_permission(permission_client_2) - assert not mongo_store_partition.has_permission(permisson_hacker_2) - - # remove permissions - mongo_store_partition.remove_permission(permission_root) - assert not mongo_store_partition.has_permission(permission_root) - assert not mongo_store_partition.has_permission(permission_client) - assert not mongo_store_partition.has_permission(permission_hacker) - - mongo_store_partition.remove_permission(permission_client_2) - assert not mongo_store_partition.has_permission(permission_root_2) - assert not mongo_store_partition.has_permission(permission_client_2) - assert not mongo_store_partition.has_permission(permisson_hacker_2) - - -@pytest.mark.parametrize("permission", PERMISSIONS) -def test_mongo_store_partition_take_ownership( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, - permission: ActionObjectPermission, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - - hacker_verify_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) - obj = MockSyftObject(data=1) - - # the guest client takes ownership of obj - mongo_store_partition.take_ownership(uid=obj.id, credentials=guest_verify_key) - assert mongo_store_partition.has_permission( - permission(uid=obj.id, credentials=guest_verify_key) - ) - # the root client will also has the permission - assert mongo_store_partition.has_permission( - permission(uid=obj.id, credentials=root_verify_key) - ) - assert not mongo_store_partition.has_permission( - permission(uid=obj.id, credentials=hacker_verify_key) - ) - - # hacker or root try to take ownership of the obj and will fail - res = mongo_store_partition.take_ownership( - uid=obj.id, credentials=hacker_verify_key - ) - res_2 = mongo_store_partition.take_ownership( - uid=obj.id, credentials=root_verify_key - ) - assert res.is_err() - assert res_2.is_err() - assert res.value == res_2.value == f"UID: {obj.id} already owned." - - # another object - obj_2 = MockSyftObject(data=2) - # root client takes ownership - mongo_store_partition.take_ownership(uid=obj_2.id, credentials=root_verify_key) - assert mongo_store_partition.has_permission( - permission(uid=obj_2.id, credentials=root_verify_key) - ) - assert not mongo_store_partition.has_permission( - permission(uid=obj_2.id, credentials=guest_verify_key) - ) - assert not mongo_store_partition.has_permission( - permission(uid=obj_2.id, credentials=hacker_verify_key) - ) - - -def test_mongo_store_partition_permissions_set( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, -) -> None: - """ - Test the permissions functionalities when using MongoStorePartition._set function - """ - hacker_verify_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) - res = mongo_store_partition.init_store() - assert res.is_ok() - - # set the object to mongo_store_partition.collection - obj = MockSyftObject(data=1) - res = mongo_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_ok() - assert res.ok() == obj - - # check if the corresponding permissions has been added to the permissions - # collection after the root client claim it - pemissions_collection = mongo_store_partition.permissions.ok() - assert isinstance(pemissions_collection, MongoCollection) - permissions = pemissions_collection.find_one({"_id": obj.id}) - assert permissions is not None - assert isinstance(permissions["permissions"], set) - assert len(permissions["permissions"]) == 4 - for permission in PERMISSIONS: - assert mongo_store_partition.has_permission( - permission(uid=obj.id, credentials=root_verify_key) - ) - - # the hacker tries to set duplicated object but should not be able to claim it - res_2 = mongo_store_partition.set(guest_verify_key, obj, ignore_duplicates=True) - assert res_2.is_ok() - for permission in PERMISSIONS: - assert not mongo_store_partition.has_permission( - permission(uid=obj.id, credentials=hacker_verify_key) - ) - assert mongo_store_partition.has_permission( - permission(uid=obj.id, credentials=root_verify_key) - ) - - -def test_mongo_store_partition_permissions_get_all( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - hacker_verify_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) - # set several objects for the root and guest client - num_root_objects: int = 5 - num_guest_objects: int = 3 - for i in range(num_root_objects): - obj = MockSyftObject(data=i) - mongo_store_partition.set( - credentials=root_verify_key, obj=obj, ignore_duplicates=False - ) - for i in range(num_guest_objects): - obj = MockSyftObject(data=i) - mongo_store_partition.set( - credentials=guest_verify_key, obj=obj, ignore_duplicates=False - ) - - assert ( - len(mongo_store_partition.all(root_verify_key).ok()) - == num_root_objects + num_guest_objects - ) - assert len(mongo_store_partition.all(guest_verify_key).ok()) == num_guest_objects - assert len(mongo_store_partition.all(hacker_verify_key).ok()) == 0 - - -def test_mongo_store_partition_permissions_delete( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - collection: MongoCollection = mongo_store_partition.collection.ok() - pemissions_collection: MongoCollection = mongo_store_partition.permissions.ok() - hacker_verify_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_HACKER) - - # the root client set an object - obj = MockSyftObject(data=1) - mongo_store_partition.set( - credentials=root_verify_key, obj=obj, ignore_duplicates=False - ) - qk: QueryKey = mongo_store_partition.settings.store_key.with_obj(obj) - # guest or hacker can't delete it - assert not mongo_store_partition.delete(guest_verify_key, qk).is_ok() - assert not mongo_store_partition.delete(hacker_verify_key, qk).is_ok() - # only the root client can delete it - assert mongo_store_partition.delete(root_verify_key, qk).is_ok() - # check if the object and its permission have been deleted - assert collection.count_documents({}) == 0 - assert pemissions_collection.count_documents({}) == 0 - - # the guest client set an object - obj_2 = MockSyftObject(data=2) - mongo_store_partition.set( - credentials=guest_verify_key, obj=obj_2, ignore_duplicates=False - ) - qk_2: QueryKey = mongo_store_partition.settings.store_key.with_obj(obj_2) - # the hacker can't delete it - assert not mongo_store_partition.delete(hacker_verify_key, qk_2).is_ok() - # the guest client can delete it - assert mongo_store_partition.delete(guest_verify_key, qk_2).is_ok() - assert collection.count_documents({}) == 0 - assert pemissions_collection.count_documents({}) == 0 - - # the guest client set another object - obj_3 = MockSyftObject(data=3) - mongo_store_partition.set( - credentials=guest_verify_key, obj=obj_3, ignore_duplicates=False - ) - qk_3: QueryKey = mongo_store_partition.settings.store_key.with_obj(obj_3) - # the root client also has the permission to delete it - assert mongo_store_partition.delete(root_verify_key, qk_3).is_ok() - assert collection.count_documents({}) == 0 - assert pemissions_collection.count_documents({}) == 0 - - -def test_mongo_store_partition_permissions_update( - root_verify_key: SyftVerifyKey, - guest_verify_key: SyftVerifyKey, - mongo_store_partition: MongoStorePartition, -) -> None: - res = mongo_store_partition.init_store() - assert res.is_ok() - # the root client set an object - obj = MockSyftObject(data=1) - mongo_store_partition.set( - credentials=root_verify_key, obj=obj, ignore_duplicates=False - ) - assert len(mongo_store_partition.all(credentials=root_verify_key).ok()) == 1 - - qk: QueryKey = mongo_store_partition.settings.store_key.with_obj(obj) - permsissions: MongoCollection = mongo_store_partition.permissions.ok() - repeats = 5 - - for v in range(repeats): - # the guest client should not have permission to update obj - obj_new = MockSyftObject(data=v) - res = mongo_store_partition.update( - credentials=guest_verify_key, qk=qk, obj=obj_new - ) - assert res.is_err() - # the root client has the permission to update obj - res = mongo_store_partition.update( - credentials=root_verify_key, qk=qk, obj=obj_new - ) - assert res.is_ok() - # the id of the object in the permission collection should not be changed - assert permsissions.find_one(qk.as_dict_mongo)["_id"] == obj.id diff --git a/packages/syft/tests/syft/stores/permissions_test.py b/packages/syft/tests/syft/stores/permissions_test.py new file mode 100644 index 00000000000..cd5ccd0e9e6 --- /dev/null +++ b/packages/syft/tests/syft/stores/permissions_test.py @@ -0,0 +1,26 @@ +# stdlib +import secrets + +# syft absolute +from syft.service.action.action_permissions import ActionObjectPermission +from syft.service.action.action_permissions import ActionPermission +from syft.service.action.action_permissions import COMPOUND_ACTION_PERMISSION +from syft.service.action.action_permissions import SyftVerifyKey +from syft.service.action.action_permissions import UID + + +def test_permission_string_round_trip(): + for permission in ActionPermission: + uid = UID() + if permission in COMPOUND_ACTION_PERMISSION: + verify_key = None + else: + verify_key = SyftVerifyKey.from_string(secrets.token_hex(32)) + + original_obj = ActionObjectPermission(uid, permission, verify_key) + perm_string = original_obj.permission_string + recreated_obj = ActionObjectPermission.from_permission_string(uid, perm_string) + + assert original_obj.permission == recreated_obj.permission + assert original_obj.uid == recreated_obj.uid + assert original_obj.credentials == recreated_obj.credentials diff --git a/packages/syft/tests/syft/stores/queue_stash_test.py b/packages/syft/tests/syft/stores/queue_stash_test.py index 97efd3df41b..d4e2cd25747 100644 --- a/packages/syft/tests/syft/stores/queue_stash_test.py +++ b/packages/syft/tests/syft/stores/queue_stash_test.py @@ -1,23 +1,20 @@ # stdlib -from threading import Thread -from typing import Any +from concurrent.futures import ThreadPoolExecutor # third party import pytest # syft absolute from syft.service.queue.queue_stash import QueueItem +from syft.service.queue.queue_stash import QueueStash from syft.service.worker.worker_pool import WorkerPool from syft.service.worker.worker_pool_service import SyftWorkerPoolService from syft.store.linked_obj import LinkedObject +from syft.types.errors import SyftException from syft.types.uid import UID -# relative -from .store_fixtures_test import mongo_queue_stash_fn -from .store_fixtures_test import sqlite_queue_stash_fn - -def mock_queue_object(): +def mock_queue_object() -> QueueItem: worker_pool_obj = WorkerPool( name="mypool", image_id=UID(), @@ -26,12 +23,12 @@ def mock_queue_object(): ) linked_worker_pool = LinkedObject.from_obj( worker_pool_obj, - node_uid=UID(), + server_uid=UID(), service_type=SyftWorkerPoolService, ) obj = QueueItem( id=UID(), - node_uid=UID(), + server_uid=UID(), method="dummy_method", service="dummy_service", args=[], @@ -44,525 +41,246 @@ def mock_queue_object(): @pytest.mark.parametrize( "queue", [ - pytest.lazy_fixture("dict_queue_stash"), - pytest.lazy_fixture("sqlite_queue_stash"), - pytest.lazy_fixture("mongo_queue_stash"), + pytest.lazy_fixture("queue_stash"), ], ) -def test_queue_stash_sanity(queue: Any) -> None: +def test_queue_stash_sanity(queue: QueueStash) -> None: assert len(queue) == 0 - assert hasattr(queue, "store") - assert hasattr(queue, "partition") @pytest.mark.parametrize( "queue", [ - pytest.lazy_fixture("dict_queue_stash"), - pytest.lazy_fixture("sqlite_queue_stash"), - pytest.lazy_fixture("mongo_queue_stash"), + pytest.lazy_fixture("queue_stash"), ], ) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_stash_set_get(root_verify_key, queue: Any) -> None: - objs = [] +# +def test_queue_stash_set_get(root_verify_key, queue: QueueStash) -> None: + objs: list[QueueItem] = [] repeats = 5 for idx in range(repeats): obj = mock_queue_object() objs.append(obj) - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_ok() + queue.set(root_verify_key, obj, ignore_duplicates=False).unwrap() assert len(queue) == idx + 1 - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_err() + with pytest.raises(SyftException): + queue.set(root_verify_key, obj, ignore_duplicates=False).unwrap() assert len(queue) == idx + 1 assert len(queue.get_all(root_verify_key).ok()) == idx + 1 - item = queue.find_one(root_verify_key, id=obj.id) - assert item.is_ok() - assert item.ok() == obj + item = queue.get_by_uid(root_verify_key, uid=obj.id).unwrap() + assert item == obj cnt = len(objs) for obj in objs: - res = queue.find_and_delete(root_verify_key, id=obj.id) - assert res.is_ok() - + queue.delete_by_uid(root_verify_key, uid=obj.id).unwrap() cnt -= 1 assert len(queue) == cnt - item = queue.find_one(root_verify_key, id=obj.id) - assert item.is_ok() - assert item.ok() is None + item = queue.get_by_uid(root_verify_key, uid=obj.id) + assert item.is_err() @pytest.mark.parametrize( "queue", [ - pytest.lazy_fixture("dict_queue_stash"), - pytest.lazy_fixture("sqlite_queue_stash"), - pytest.lazy_fixture("mongo_queue_stash"), + pytest.lazy_fixture("queue_stash"), ], ) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_stash_update(root_verify_key, queue: Any) -> None: +def test_queue_stash_update(queue: QueueStash) -> None: + root_verify_key = queue.db.root_verify_key obj = mock_queue_object() - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_ok() + queue.set(root_verify_key, obj, ignore_duplicates=False).unwrap() repeats = 5 for idx in range(repeats): obj.args = [idx] - res = queue.update(root_verify_key, obj) - assert res.is_ok() + queue.update(root_verify_key, obj).unwrap() assert len(queue) == 1 - item = queue.find_one(root_verify_key, id=obj.id) - assert item.is_ok() - assert item.ok().args == [idx] + item = queue.get_by_uid(root_verify_key, uid=obj.id).unwrap() + assert item.args == [idx] - res = queue.find_and_delete(root_verify_key, id=obj.id) - assert res.is_ok() + queue.delete_by_uid(root_verify_key, uid=obj.id).unwrap() assert len(queue) == 0 @pytest.mark.parametrize( "queue", [ - pytest.lazy_fixture("dict_queue_stash"), - pytest.lazy_fixture("sqlite_queue_stash"), - pytest.lazy_fixture("mongo_queue_stash"), + pytest.lazy_fixture("queue_stash"), ], ) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_set_existing_queue_threading(root_verify_key, queue: Any) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for _ in range(repeats): - obj = mock_queue_object() - - for _ in range(10): - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - assert len(queue) == thread_cnt * repeats +def test_queue_set_existing_queue_threading(root_verify_key, queue: QueueStash) -> None: + root_verify_key = queue.db.root_verify_key + items_to_create = 100 + with ThreadPoolExecutor(max_workers=3) as executor: + results = list( + executor.map( + lambda obj: queue.set( + root_verify_key, + mock_queue_object(), + ), + range(items_to_create), + ) + ) + assert all(res.is_ok() for res in results), "Error occurred during execution" + assert len(queue) == items_to_create @pytest.mark.parametrize( "queue", [ - pytest.lazy_fixture("dict_queue_stash"), - pytest.lazy_fixture("sqlite_queue_stash"), - pytest.lazy_fixture("mongo_queue_stash"), + pytest.lazy_fixture("queue_stash"), ], ) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_update_existing_queue_threading(root_verify_key, queue: Any) -> None: - thread_cnt = 3 - repeats = 5 - +def test_queue_update_existing_queue_threading(queue: QueueStash) -> None: + root_verify_key = queue.db.root_verify_key obj = mock_queue_object() - queue.set(root_verify_key, obj, ignore_duplicates=False) - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for repeat in range(repeats): - obj.args = [repeat] - for _ in range(10): - res = queue.update(root_verify_key, obj) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() + def update_queue(): + obj.args = [UID()] + res = queue.update(root_verify_key, obj) + return res - tids.append(thread) + queue.set(root_verify_key, obj, ignore_duplicates=False) - for thread in tids: - thread.join() + with ThreadPoolExecutor(max_workers=3) as executor: + # Run the update_queue function in multiple threads + results = list( + executor.map( + lambda _: update_queue(), + range(5), + ) + ) + assert all(res.is_ok() for res in results), "Error occurred during execution" - assert execution_err is None + assert len(queue) == 1 + item = queue.get_by_uid(root_verify_key, uid=obj.id).unwrap() + assert item.args != [] @pytest.mark.parametrize( "queue", [ - pytest.lazy_fixture("dict_queue_stash"), - pytest.lazy_fixture("sqlite_queue_stash"), - pytest.lazy_fixture("mongo_queue_stash"), + pytest.lazy_fixture("queue_stash"), ], ) -@pytest.mark.flaky(reruns=3, reruns_delay=3) def test_queue_set_delete_existing_queue_threading( - root_verify_key, - queue: Any, + queue: QueueStash, ) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - objs = [] - - for _ in range(repeats * thread_cnt): - obj = mock_queue_object() - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - objs.append(obj) - - assert res.is_ok() - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - for idx in range(repeats): - item_idx = tid * repeats + idx - - for _ in range(10): - res = queue.find_and_delete(root_verify_key, id=objs[item_idx].id) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - assert len(queue) == 0 - - -def helper_queue_set_threading(root_verify_key, create_queue_cbk) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - queue = create_queue_cbk() - - for _ in range(repeats): - obj = mock_queue_object() - - for _ in range(10): - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - queue = create_queue_cbk() - - assert execution_err is None - assert len(queue) == thread_cnt * repeats - - -# def helper_queue_set_joblib(root_verify_key, create_queue_cbk) -> None: -# thread_cnt = 3 -# repeats = 5 - -# def _kv_cbk(tid: int) -> None: -# queue = create_queue_cbk() -# for _ in range(repeats): -# worker_pool_obj = WorkerPool( -# name="mypool", -# image_id=UID(), -# max_count=0, -# worker_list=[], -# ) -# linked_worker_pool = LinkedObject.from_obj( -# worker_pool_obj, -# node_uid=UID(), -# service_type=SyftWorkerPoolService, -# ) -# obj = QueueItem( -# id=UID(), -# node_uid=UID(), -# method="dummy_method", -# service="dummy_service", -# args=[], -# kwargs={}, -# worker_pool=linked_worker_pool, -# ) -# for _ in range(10): -# res = queue.set(root_verify_key, obj, ignore_duplicates=False) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) - -# for execution_err in errs: -# assert execution_err is None - -# queue = create_queue_cbk() -# assert len(queue) == thread_cnt * repeats - - -@pytest.mark.parametrize("backend", [helper_queue_set_threading]) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_set_sqlite(root_verify_key, sqlite_workspace, backend): - def create_queue_cbk(): - return sqlite_queue_stash_fn(root_verify_key, sqlite_workspace) - - backend(root_verify_key, create_queue_cbk) - - -@pytest.mark.parametrize("backend", [helper_queue_set_threading]) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_set_threading_mongo(root_verify_key, mongo_document_store, backend): - def create_queue_cbk(): - return mongo_queue_stash_fn(mongo_document_store) - - backend(root_verify_key, create_queue_cbk) - - -def helper_queue_update_threading(root_verify_key, create_queue_cbk) -> None: - thread_cnt = 3 - repeats = 5 - - queue = create_queue_cbk() - + root_verify_key = queue.db.root_verify_key + with ThreadPoolExecutor(max_workers=3) as executor: + results = list( + executor.map( + lambda obj: queue.set( + root_verify_key, + mock_queue_object(), + ), + range(15), + ) + ) + objs = [item.unwrap() for item in results] + + results = list( + executor.map( + lambda obj: queue.delete_by_uid(root_verify_key, uid=obj.id), + objs, + ) + ) + assert all(res.is_ok() for res in results), "Error occurred during execution" + + +def test_queue_set(queue_stash: QueueStash): + root_verify_key = queue_stash.db.root_verify_key + config = queue_stash.db.config + server_uid = queue_stash.db.server_uid + + def set_in_new_thread(_): + queue_stash = QueueStash.random( + root_verify_key=root_verify_key, + config=config, + server_uid=server_uid, + ) + return queue_stash.set(root_verify_key, mock_queue_object()) + + total_repeats = 50 + with ThreadPoolExecutor(max_workers=3) as executor: + results = list( + executor.map( + set_in_new_thread, + range(total_repeats), + ) + ) + + assert all(res.is_ok() for res in results), "Error occurred during execution" + assert len(queue_stash) == total_repeats + + +def test_queue_update_threading(queue_stash: QueueStash): + root_verify_key = queue_stash.db.root_verify_key + config = queue_stash.db.config + server_uid = queue_stash.db.server_uid obj = mock_queue_object() - queue.set(root_verify_key, obj, ignore_duplicates=False) - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - queue_local = create_queue_cbk() - - for repeat in range(repeats): - obj.args = [repeat] - - for _ in range(10): - res = queue_local.update(root_verify_key, obj) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - -# def helper_queue_update_joblib(root_verify_key, create_queue_cbk) -> None: -# thread_cnt = 3 -# repeats = 5 - -# def _kv_cbk(tid: int) -> None: -# queue_local = create_queue_cbk() - -# for repeat in range(repeats): -# obj.args = [repeat] - -# for _ in range(10): -# res = queue_local.update(root_verify_key, obj) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res -# return None - -# queue = create_queue_cbk() - -# obj = mock_queue_object() -# queue.set(root_verify_key, obj, ignore_duplicates=False) - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) -# for execution_err in errs: -# assert execution_err is None - - -@pytest.mark.parametrize("backend", [helper_queue_update_threading]) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_update_threading_sqlite(root_verify_key, sqlite_workspace, backend): - def create_queue_cbk(): - return sqlite_queue_stash_fn(root_verify_key, sqlite_workspace) - - backend(root_verify_key, create_queue_cbk) - - -@pytest.mark.parametrize("backend", [helper_queue_update_threading]) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_update_threading_mongo(root_verify_key, mongo_document_store, backend): - def create_queue_cbk(): - return mongo_queue_stash_fn(mongo_document_store) - - backend(root_verify_key, create_queue_cbk) - - -def helper_queue_set_delete_threading( - root_verify_key, - create_queue_cbk, -) -> None: - thread_cnt = 3 - repeats = 5 - - queue = create_queue_cbk() - execution_err = None - objs = [] - - for _ in range(repeats * thread_cnt): - obj = mock_queue_object() - res = queue.set(root_verify_key, obj, ignore_duplicates=False) - objs.append(obj) - - assert res.is_ok() - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - queue = create_queue_cbk() - for idx in range(repeats): - item_idx = tid * repeats + idx - - for _ in range(10): - res = queue.find_and_delete(root_verify_key, id=objs[item_idx].id) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - assert len(queue) == 0 - - -# def helper_queue_set_delete_joblib( -# root_verify_key, -# create_queue_cbk, -# ) -> None: -# thread_cnt = 3 -# repeats = 5 - -# def _kv_cbk(tid: int) -> None: -# nonlocal execution_err -# queue = create_queue_cbk() -# for idx in range(repeats): -# item_idx = tid * repeats + idx - -# for _ in range(10): -# res = queue.find_and_delete(root_verify_key, id=objs[item_idx].id) -# if res.is_ok(): -# break - -# if res.is_err(): -# execution_err = res -# assert res.is_ok() - -# queue = create_queue_cbk() -# execution_err = None -# objs = [] - -# for _ in range(repeats * thread_cnt): -# obj = mock_queue_object() -# res = queue.set(root_verify_key, obj, ignore_duplicates=False) -# objs.append(obj) - -# assert res.is_ok() - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) - -# for execution_err in errs: -# assert execution_err is None - -# assert len(queue) == 0 - - -@pytest.mark.parametrize("backend", [helper_queue_set_delete_threading]) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_delete_threading_sqlite(root_verify_key, sqlite_workspace, backend): - def create_queue_cbk(): - return sqlite_queue_stash_fn(root_verify_key, sqlite_workspace) - - backend(root_verify_key, create_queue_cbk) - - -@pytest.mark.parametrize("backend", [helper_queue_set_delete_threading]) -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_queue_delete_threading_mongo(root_verify_key, mongo_document_store, backend): - def create_queue_cbk(): - return mongo_queue_stash_fn(mongo_document_store) - - backend(root_verify_key, create_queue_cbk) + queue_stash.set(root_verify_key, obj).unwrap() + + def update_in_new_thread(_): + queue_stash = QueueStash.random( + root_verify_key=root_verify_key, + config=config, + server_uid=server_uid, + ) + obj.args = [UID()] + return queue_stash.update(root_verify_key, obj) + + total_repeats = 50 + with ThreadPoolExecutor(max_workers=3) as executor: + results = list( + executor.map( + update_in_new_thread, + range(total_repeats), + ) + ) + + assert all(res.is_ok() for res in results), "Error occurred during execution" + assert len(queue_stash) == 1 + + +def test_queue_delete_threading(queue_stash: QueueStash): + root_verify_key = queue_stash.db.root_verify_key + root_verify_key = queue_stash.db.root_verify_key + config = queue_stash.db.config + server_uid = queue_stash.db.server_uid + + def delete_in_new_thread(obj: QueueItem): + queue_stash = QueueStash.random( + root_verify_key=root_verify_key, + config=config, + server_uid=server_uid, + ) + return queue_stash.delete_by_uid(root_verify_key, uid=obj.id) + + with ThreadPoolExecutor(max_workers=3) as executor: + results = list( + executor.map( + lambda obj: queue_stash.set( + root_verify_key, + mock_queue_object(), + ), + range(50), + ) + ) + objs = [item.unwrap() for item in results] + + results = list( + executor.map( + delete_in_new_thread, + objs, + ) + ) + assert all(res.is_ok() for res in results), "Error occurred during execution" + + assert len(queue_stash) == 0 diff --git a/packages/syft/tests/syft/stores/sqlite_document_store_test.py b/packages/syft/tests/syft/stores/sqlite_document_store_test.py deleted file mode 100644 index 46ee540aa9c..00000000000 --- a/packages/syft/tests/syft/stores/sqlite_document_store_test.py +++ /dev/null @@ -1,520 +0,0 @@ -# stdlib -from threading import Thread - -# third party -import pytest - -# syft absolute -from syft.store.document_store import QueryKeys -from syft.store.sqlite_document_store import SQLiteStorePartition - -# relative -from .store_fixtures_test import sqlite_store_partition_fn -from .store_mocks_test import MockObjectType -from .store_mocks_test import MockSyftObject - - -def test_sqlite_store_partition_sanity( - sqlite_store_partition: SQLiteStorePartition, -) -> None: - assert hasattr(sqlite_store_partition, "data") - assert hasattr(sqlite_store_partition, "unique_keys") - assert hasattr(sqlite_store_partition, "searchable_keys") - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sqlite_store_partition_set( - root_verify_key, - sqlite_store_partition: SQLiteStorePartition, -) -> None: - obj = MockSyftObject(data=1) - res = sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - - assert res.is_ok() - assert res.ok() == obj - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - res = sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_err() - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - res = sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=True) - assert res.is_ok() - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - obj2 = MockSyftObject(data=2) - res = sqlite_store_partition.set(root_verify_key, obj2, ignore_duplicates=False) - assert res.is_ok() - assert res.ok() == obj2 - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 2 - ) - repeats = 5 - for idx in range(repeats): - obj = MockSyftObject(data=idx) - res = sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert res.is_ok() - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 3 + idx - ) - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sqlite_store_partition_delete( - root_verify_key, - sqlite_store_partition: SQLiteStorePartition, -) -> None: - objs = [] - repeats = 5 - for v in range(repeats): - obj = MockSyftObject(data=v) - sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - objs.append(obj) - - assert len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) == len(objs) - - # random object - obj = MockSyftObject(data="bogus") - key = sqlite_store_partition.settings.store_key.with_obj(obj) - res = sqlite_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) == len(objs) - - # cleanup store - for idx, v in enumerate(objs): - key = sqlite_store_partition.settings.store_key.with_obj(v) - res = sqlite_store_partition.delete(root_verify_key, key) - assert res.is_ok() - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == len(objs) - idx - 1 - ) - - res = sqlite_store_partition.delete(root_verify_key, key) - assert res.is_err() - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == len(objs) - idx - 1 - ) - - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 0 - ) - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sqlite_store_partition_update( - root_verify_key, - sqlite_store_partition: SQLiteStorePartition, -) -> None: - # add item - obj = MockSyftObject(data=1) - sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - - # fail to update missing keys - rand_obj = MockSyftObject(data="bogus") - key = sqlite_store_partition.settings.store_key.with_obj(rand_obj) - res = sqlite_store_partition.update(root_verify_key, key, obj) - assert res.is_err() - - # update the key multiple times - repeats = 5 - for v in range(repeats): - key = sqlite_store_partition.settings.store_key.with_obj(obj) - obj_new = MockSyftObject(data=v) - - res = sqlite_store_partition.update(root_verify_key, key, obj_new) - assert res.is_ok() - - # The ID should stay the same on update, unly the values are updated. - assert ( - len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - == 1 - ) - assert ( - sqlite_store_partition.all( - root_verify_key, - ) - .ok()[0] - .id - == obj.id - ) - assert ( - sqlite_store_partition.all( - root_verify_key, - ) - .ok()[0] - .id - != obj_new.id - ) - assert ( - sqlite_store_partition.all( - root_verify_key, - ) - .ok()[0] - .data - == v - ) - - stored = sqlite_store_partition.get_all_from_store( - root_verify_key, QueryKeys(qks=[key]) - ) - assert stored.ok()[0].data == v - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sqlite_store_partition_set_threading( - sqlite_workspace: tuple, - root_verify_key, -) -> None: - thread_cnt = 3 - repeats = 5 - - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - - sqlite_store_partition = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace - ) - for idx in range(repeats): - for _ in range(10): - obj = MockObjectType(data=idx) - res = sqlite_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok(), res - - return execution_err - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - sqlite_store_partition = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace - ) - stored_cnt = len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - assert stored_cnt == thread_cnt * repeats - - -# @pytest.mark.skip(reason="Joblib is flaky") -# def test_sqlite_store_partition_set_joblib( -# root_verify_key, -# sqlite_workspace: Tuple, -# ) -> None: -# thread_cnt = 3 -# repeats = 5 - -# def _kv_cbk(tid: int) -> None: -# for idx in range(repeats): -# sqlite_store_partition = sqlite_store_partition_fn( -# root_verify_key, sqlite_workspace -# ) -# obj = MockObjectType(data=idx) - -# for _ in range(10): -# res = sqlite_store_partition.set( -# root_verify_key, obj, ignore_duplicates=False -# ) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res - -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) - -# for execution_err in errs: -# assert execution_err is None - -# sqlite_store_partition = sqlite_store_partition_fn( -# root_verify_key, sqlite_workspace -# ) -# stored_cnt = len( -# sqlite_store_partition.all( -# root_verify_key, -# ).ok() -# ) -# assert stored_cnt == thread_cnt * repeats - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sqlite_store_partition_update_threading( - root_verify_key, - sqlite_workspace: tuple, -) -> None: - thread_cnt = 3 - repeats = 5 - - sqlite_store_partition = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace - ) - obj = MockSyftObject(data=0) - key = sqlite_store_partition.settings.store_key.with_obj(obj) - sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - - sqlite_store_partition_local = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace - ) - for repeat in range(repeats): - obj = MockSyftObject(data=repeat) - - for _ in range(10): - res = sqlite_store_partition_local.update(root_verify_key, key, obj) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok(), res - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - -# @pytest.mark.skip(reason="Joblib is flaky") -# def test_sqlite_store_partition_update_joblib( -# root_verify_key, -# sqlite_workspace: Tuple, -# ) -> None: -# thread_cnt = 3 -# repeats = 5 - -# sqlite_store_partition = sqlite_store_partition_fn( -# root_verify_key, sqlite_workspace -# ) -# obj = MockSyftObject(data=0) -# key = sqlite_store_partition.settings.store_key.with_obj(obj) -# sqlite_store_partition.set(root_verify_key, obj, ignore_duplicates=False) - -# def _kv_cbk(tid: int) -> None: -# sqlite_store_partition_local = sqlite_store_partition_fn( -# root_verify_key, sqlite_workspace -# ) -# for repeat in range(repeats): -# obj = MockSyftObject(data=repeat) - -# for _ in range(10): -# res = sqlite_store_partition_local.update(root_verify_key, key, obj) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) - -# for execution_err in errs: -# assert execution_err is None - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -def test_sqlite_store_partition_set_delete_threading( - root_verify_key, - sqlite_workspace: tuple, -) -> None: - thread_cnt = 3 - repeats = 5 - execution_err = None - - def _kv_cbk(tid: int) -> None: - nonlocal execution_err - sqlite_store_partition = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace - ) - - for idx in range(repeats): - obj = MockSyftObject(data=idx) - - for _ in range(10): - res = sqlite_store_partition.set( - root_verify_key, obj, ignore_duplicates=False - ) - if res.is_ok(): - break - - if res.is_err(): - execution_err = res - assert res.is_ok() - - key = sqlite_store_partition.settings.store_key.with_obj(obj) - - res = sqlite_store_partition.delete(root_verify_key, key) - if res.is_err(): - execution_err = res - assert res.is_ok(), res - - tids = [] - for tid in range(thread_cnt): - thread = Thread(target=_kv_cbk, args=(tid,)) - thread.start() - - tids.append(thread) - - for thread in tids: - thread.join() - - assert execution_err is None - - sqlite_store_partition = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace - ) - stored_cnt = len( - sqlite_store_partition.all( - root_verify_key, - ).ok() - ) - assert stored_cnt == 0 - - -# @pytest.mark.skip(reason="Joblib is flaky") -# def test_sqlite_store_partition_set_delete_joblib( -# root_verify_key, -# sqlite_workspace: Tuple, -# ) -> None: -# thread_cnt = 3 -# repeats = 5 - -# def _kv_cbk(tid: int) -> None: -# sqlite_store_partition = sqlite_store_partition_fn( -# root_verify_key, sqlite_workspace -# ) - -# for idx in range(repeats): -# obj = MockSyftObject(data=idx) - -# for _ in range(10): -# res = sqlite_store_partition.set( -# root_verify_key, obj, ignore_duplicates=False -# ) -# if res.is_ok(): -# break - -# if res.is_err(): -# return res - -# key = sqlite_store_partition.settings.store_key.with_obj(obj) - -# res = sqlite_store_partition.delete(root_verify_key, key) -# if res.is_err(): -# return res -# return None - -# errs = Parallel(n_jobs=thread_cnt)( -# delayed(_kv_cbk)(idx) for idx in range(thread_cnt) -# ) -# for execution_err in errs: -# assert execution_err is None - -# sqlite_store_partition = sqlite_store_partition_fn( -# root_verify_key, sqlite_workspace -# ) -# stored_cnt = len( -# sqlite_store_partition.all( -# root_verify_key, -# ).ok() -# ) -# assert stored_cnt == 0 diff --git a/packages/syft/tests/syft/stores/store_constants_test.py b/packages/syft/tests/syft/stores/store_constants_test.py index ba9910bb652..e82fd839259 100644 --- a/packages/syft/tests/syft/stores/store_constants_test.py +++ b/packages/syft/tests/syft/stores/store_constants_test.py @@ -1,3 +1,6 @@ +# syft absolute +from syft.server.credentials import SyftSigningKey + TEST_VERIFY_KEY_STRING_ROOT = ( "08e5bcddfd55cdff0f7f6a62d63a43585734c6e7a17b2ffb3f3efe322c3cecc5" ) @@ -7,3 +10,6 @@ TEST_VERIFY_KEY_STRING_HACKER = ( "8f4412396d3418d17c08a8f46592621a5d57e0daf1c93e2134c30f50d666801d" ) + +TEST_SIGNING_KEY_NEW_ADMIN = SyftSigningKey.generate() +TEST_VERIFY_KEY_NEW_ADMIN = TEST_SIGNING_KEY_NEW_ADMIN.verify_key diff --git a/packages/syft/tests/syft/stores/store_fixtures_test.py b/packages/syft/tests/syft/stores/store_fixtures_test.py deleted file mode 100644 index e4d3c9fa6dd..00000000000 --- a/packages/syft/tests/syft/stores/store_fixtures_test.py +++ /dev/null @@ -1,371 +0,0 @@ -# stdlib -from collections.abc import Generator -import os -from pathlib import Path -from secrets import token_hex -import tempfile - -# third party -import pytest - -# syft absolute -from syft.node.credentials import SyftVerifyKey -from syft.service.action.action_store import DictActionStore -from syft.service.action.action_store import MongoActionStore -from syft.service.action.action_store import SQLiteActionStore -from syft.service.queue.queue_stash import QueueStash -from syft.store.dict_document_store import DictDocumentStore -from syft.store.dict_document_store import DictStoreConfig -from syft.store.dict_document_store import DictStorePartition -from syft.store.document_store import PartitionSettings -from syft.store.locks import FileLockingConfig -from syft.store.locks import LockingConfig -from syft.store.locks import NoLockingConfig -from syft.store.locks import ThreadingLockingConfig -from syft.store.mongo_client import MongoStoreClientConfig -from syft.store.mongo_document_store import MongoDocumentStore -from syft.store.mongo_document_store import MongoStoreConfig -from syft.store.mongo_document_store import MongoStorePartition -from syft.store.sqlite_document_store import SQLiteDocumentStore -from syft.store.sqlite_document_store import SQLiteStoreClientConfig -from syft.store.sqlite_document_store import SQLiteStoreConfig -from syft.store.sqlite_document_store import SQLiteStorePartition -from syft.types.uid import UID - -# relative -from .store_constants_test import TEST_VERIFY_KEY_STRING_ROOT -from .store_mocks_test import MockObjectType - -MONGO_CLIENT_CACHE = None - -locking_scenarios = [ - "nop", - # "file", # makes tests pretty unstable - "threading", -] - - -def str_to_locking_config(conf: str) -> LockingConfig: - if conf == "nop": - return NoLockingConfig() - elif conf == "file": - lock_name = token_hex(8) + ".lock" - root = os.getenv("SYFT_TEMP_ROOT", "syft") - workspace_folder = Path(tempfile.gettempdir(), root, "test_locks") - workspace_folder.mkdir(parents=True, exist_ok=True) - - client_path = workspace_folder / lock_name - - return FileLockingConfig(client_path=client_path) - elif conf == "threading": - return ThreadingLockingConfig() - else: - raise NotImplementedError(f"unknown locking config {conf}") - - -def cleanup_locks(locking_config: LockingConfig): - if isinstance(locking_config, FileLockingConfig): - try: - locking_config.client_path.exists() and locking_config.client_path.unlink() - except BaseException as e: - print("failed to cleanup file lock", e) - - -@pytest.fixture(scope="function") -def sqlite_workspace() -> Generator: - sqlite_db_name = token_hex(8) + ".sqlite" - root = os.getenv("SYFT_TEMP_ROOT", "syft") - sqlite_workspace_folder = Path( - tempfile.gettempdir(), root, "fixture_sqlite_workspace" - ) - sqlite_workspace_folder.mkdir(parents=True, exist_ok=True) - - db_path = sqlite_workspace_folder / sqlite_db_name - - if db_path.exists(): - db_path.unlink() - - yield sqlite_workspace_folder, sqlite_db_name - - try: - db_path.exists() and db_path.unlink() - except BaseException as e: - print("failed to cleanup sqlite db", e) - - -def sqlite_store_partition_fn( - root_verify_key, - sqlite_workspace: tuple[Path, str], - locking_config_name: str = "nop", -): - workspace, db_name = sqlite_workspace - sqlite_config = SQLiteStoreClientConfig(filename=db_name, path=workspace) - - locking_config = str_to_locking_config(locking_config_name) - store_config = SQLiteStoreConfig( - client_config=sqlite_config, locking_config=locking_config - ) - - settings = PartitionSettings(name="test", object_type=MockObjectType) - - store = SQLiteStorePartition( - UID(), root_verify_key, settings=settings, store_config=store_config - ) - - res = store.init_store() - assert res.is_ok() - - return store - - -@pytest.fixture(scope="function", params=locking_scenarios) -def sqlite_store_partition( - root_verify_key, sqlite_workspace: tuple[Path, str], request -): - locking_config_name = request.param - store = sqlite_store_partition_fn( - root_verify_key, sqlite_workspace, locking_config_name=locking_config_name - ) - - yield store - - cleanup_locks(store.store_config.locking_config) - - -def sqlite_document_store_fn( - root_verify_key, - sqlite_workspace: tuple[Path, str], - locking_config_name: str = "nop", -): - workspace, db_name = sqlite_workspace - sqlite_config = SQLiteStoreClientConfig(filename=db_name, path=workspace) - - locking_config = str_to_locking_config(locking_config_name) - store_config = SQLiteStoreConfig( - client_config=sqlite_config, locking_config=locking_config - ) - - return SQLiteDocumentStore(UID(), root_verify_key, store_config=store_config) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def sqlite_document_store(root_verify_key, sqlite_workspace: tuple[Path, str], request): - locking_config_name = request.param - store = sqlite_document_store_fn( - root_verify_key, sqlite_workspace, locking_config_name=locking_config_name - ) - yield store - cleanup_locks(store.store_config.locking_config) - - -def sqlite_queue_stash_fn( - root_verify_key, - sqlite_workspace: tuple[Path, str], - locking_config_name: str = "threading", -): - store = sqlite_document_store_fn( - root_verify_key, - sqlite_workspace, - locking_config_name=locking_config_name, - ) - return QueueStash(store=store) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def sqlite_queue_stash(root_verify_key, sqlite_workspace: tuple[Path, str], request): - locking_config_name = request.param - yield sqlite_queue_stash_fn( - root_verify_key, sqlite_workspace, locking_config_name=locking_config_name - ) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def sqlite_action_store(sqlite_workspace: tuple[Path, str], request): - workspace, db_name = sqlite_workspace - locking_config_name = request.param - - sqlite_config = SQLiteStoreClientConfig(filename=db_name, path=workspace) - - locking_config = str_to_locking_config(locking_config_name) - store_config = SQLiteStoreConfig( - client_config=sqlite_config, - locking_config=locking_config, - ) - - ver_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - yield SQLiteActionStore( - node_uid=UID(), - store_config=store_config, - root_verify_key=ver_key, - ) - - cleanup_locks(locking_config) - - -def mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name: str = "mongo_db", - locking_config_name: str = "nop", -): - mongo_config = MongoStoreClientConfig(client=mongo_client) - - locking_config = str_to_locking_config(locking_config_name) - - store_config = MongoStoreConfig( - client_config=mongo_config, - db_name=mongo_db_name, - locking_config=locking_config, - ) - settings = PartitionSettings(name="test", object_type=MockObjectType) - - return MongoStorePartition( - UID(), root_verify_key, settings=settings, store_config=store_config - ) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def mongo_store_partition(root_verify_key, mongo_client, request): - mongo_db_name = token_hex(8) - locking_config_name = request.param - - partition = mongo_store_partition_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - locking_config_name=locking_config_name, - ) - yield partition - - # cleanup db - try: - mongo_client.drop_database(mongo_db_name) - except BaseException as e: - print("failed to cleanup mongo fixture", e) - - cleanup_locks(partition.store_config.locking_config) - - -def mongo_document_store_fn( - mongo_client, - root_verify_key, - mongo_db_name: str = "mongo_db", - locking_config_name: str = "nop", -): - locking_config = str_to_locking_config(locking_config_name) - mongo_config = MongoStoreClientConfig(client=mongo_client) - store_config = MongoStoreConfig( - client_config=mongo_config, db_name=mongo_db_name, locking_config=locking_config - ) - - mongo_client.drop_database(mongo_db_name) - - return MongoDocumentStore(UID(), root_verify_key, store_config=store_config) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def mongo_document_store(root_verify_key, mongo_client, request): - locking_config_name = request.param - mongo_db_name = token_hex(8) - yield mongo_document_store_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - locking_config_name=locking_config_name, - ) - - -def mongo_queue_stash_fn(mongo_document_store): - return QueueStash(store=mongo_document_store) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def mongo_queue_stash(root_verify_key, mongo_client, request): - mongo_db_name = token_hex(8) - locking_config_name = request.param - - store = mongo_document_store_fn( - mongo_client, - root_verify_key, - mongo_db_name=mongo_db_name, - locking_config_name=locking_config_name, - ) - yield mongo_queue_stash_fn(store) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def mongo_action_store(mongo_client, request): - mongo_db_name = token_hex(8) - locking_config_name = request.param - locking_config = str_to_locking_config(locking_config_name) - - mongo_config = MongoStoreClientConfig(client=mongo_client) - store_config = MongoStoreConfig( - client_config=mongo_config, db_name=mongo_db_name, locking_config=locking_config - ) - ver_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - mongo_action_store = MongoActionStore( - node_uid=UID(), - store_config=store_config, - root_verify_key=ver_key, - ) - - yield mongo_action_store - - -def dict_store_partition_fn( - root_verify_key, - locking_config_name: str = "nop", -): - locking_config = str_to_locking_config(locking_config_name) - store_config = DictStoreConfig(locking_config=locking_config) - settings = PartitionSettings(name="test", object_type=MockObjectType) - - return DictStorePartition( - UID(), root_verify_key, settings=settings, store_config=store_config - ) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def dict_store_partition(root_verify_key, request): - locking_config_name = request.param - yield dict_store_partition_fn( - root_verify_key, locking_config_name=locking_config_name - ) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def dict_action_store(request): - locking_config_name = request.param - locking_config = str_to_locking_config(locking_config_name) - - store_config = DictStoreConfig(locking_config=locking_config) - ver_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - yield DictActionStore( - node_uid=UID(), - store_config=store_config, - root_verify_key=ver_key, - ) - - -def dict_document_store_fn(root_verify_key, locking_config_name: str = "nop"): - locking_config = str_to_locking_config(locking_config_name) - store_config = DictStoreConfig(locking_config=locking_config) - return DictDocumentStore(UID(), root_verify_key, store_config=store_config) - - -@pytest.fixture(scope="function", params=locking_scenarios) -def dict_document_store(root_verify_key, request): - locking_config_name = request.param - yield dict_document_store_fn( - root_verify_key, locking_config_name=locking_config_name - ) - - -def dict_queue_stash_fn(dict_document_store): - return QueueStash(store=dict_document_store) - - -@pytest.fixture(scope="function") -def dict_queue_stash(dict_document_store): - yield dict_queue_stash_fn(dict_document_store) diff --git a/packages/syft/tests/syft/stores/store_mocks_test.py b/packages/syft/tests/syft/stores/store_mocks_test.py deleted file mode 100644 index 3ee70ce44b0..00000000000 --- a/packages/syft/tests/syft/stores/store_mocks_test.py +++ /dev/null @@ -1,70 +0,0 @@ -# stdlib -from typing import Any - -# syft absolute -from syft.serde.serializable import serializable -from syft.store.document_store import DocumentStore -from syft.store.document_store import PartitionSettings -from syft.store.document_store import StoreConfig -from syft.store.kv_document_store import KeyValueBackingStore -from syft.types.syft_object import SyftObject -from syft.types.uid import UID - - -@serializable() -class MockKeyValueBackingStore(dict, KeyValueBackingStore): - def __init__( - self, - index_name: str, - settings: PartitionSettings, - store_config: StoreConfig, - **kwargs: Any, - ) -> None: - super(dict).__init__() - self._ddtype = kwargs.get("ddtype", None) - self.is_crashed = store_config.is_crashed - - def _check_if_crashed(self) -> None: - if self.is_crashed: - raise RuntimeError("The backend is down") - - def __setitem__(self, key: Any, value: Any) -> None: - self._check_if_crashed() - value = super().__setitem__(key, value) - return value - - def __getitem__(self, key: Any) -> Any: - try: - self._check_if_crashed() - value = super().__getitem__(key) - return value - except KeyError as e: - if self._ddtype: - return self._ddtype() - raise e - - -@serializable() -class MockObjectType(SyftObject): - __canonical_name__ = "mock_type" - - -@serializable() -class MockStore(DocumentStore): - __canonical_name__ = "MockStore" - pass - - -@serializable() -class MockSyftObject(SyftObject): - __canonical_name__ = str(UID()) - data: Any - - -@serializable() -class MockStoreConfig(StoreConfig): - __canonical_name__ = "MockStoreConfig" - store_type: type[DocumentStore] = MockStore - db_name: str = "testing" - backing_store: type[KeyValueBackingStore] = MockKeyValueBackingStore - is_crashed: bool = False diff --git a/packages/syft/tests/syft/transforms/transform_methods_test.py b/packages/syft/tests/syft/transforms/transform_methods_test.py index 6cd3e9a750e..93a521e297f 100644 --- a/packages/syft/tests/syft/transforms/transform_methods_test.py +++ b/packages/syft/tests/syft/transforms/transform_methods_test.py @@ -12,7 +12,7 @@ from syft.types.transforms import NotNone from syft.types.transforms import TransformContext from syft.types.transforms import add_credentials_for_key -from syft.types.transforms import add_node_uid_for_key +from syft.types.transforms import add_server_uid_for_key from syft.types.transforms import drop from syft.types.transforms import generate_id from syft.types.transforms import geteitherattr @@ -28,7 +28,7 @@ "syft_obj, context", [ ("admin_user", "authed_context"), - ("guest_user", "node_context"), + ("guest_user", "server_context"), ], ) def test_transformcontext(syft_obj, context, request): @@ -45,12 +45,12 @@ def test_transformcontext(syft_obj, context, request): if hasattr(context, "credentials"): assert transform_context.credentials == context.credentials - if hasattr(context, "node"): - assert transform_context.node == context.node + if hasattr(context, "server"): + assert transform_context.server == context.server - node_context = transform_context.to_node_context() + server_context = transform_context.to_server_context() - assert node_context == context + assert server_context == context @pytest.mark.parametrize( @@ -91,7 +91,7 @@ class MockObject: ("no_key", "no_value"), ], ) -def test_make_set_default(faker, key, value, node_context): +def test_make_set_default(faker, key, value, server_context): result = make_set_default(key, value) assert isinstance(result, FunctionType) assert isinstance(result, Callable) @@ -106,7 +106,7 @@ def __iter__(self): mock_obj = MockObject(obj_key=faker.name()) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) resultant_context = result(transform_context) @@ -123,7 +123,7 @@ def __iter__(self): assert resultant_context.output[key] == mock_obj.obj_key -def test_drop(faker, node_context): +def test_drop(faker, server_context): @dataclass class MockObject: name: str @@ -146,7 +146,7 @@ def __iter__(self): assert isinstance(result, Callable) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) expected_output = dict(mock_obj).copy() @@ -164,7 +164,7 @@ def __iter__(self): assert resultant_context.output == expected_output -def test_keep(faker, node_context): +def test_keep(faker, server_context): @dataclass class MockObject: name: str @@ -187,7 +187,7 @@ def __iter__(self): assert isinstance(result, Callable) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) mock_obj_dict = dict(mock_obj) @@ -206,7 +206,7 @@ def __iter__(self): assert resultant_context.output == expected_output -def test_rename(faker, node_context): +def test_rename(faker, server_context): @dataclass class MockObject: name: str @@ -230,7 +230,7 @@ def __iter__(self): assert isinstance(result, Callable) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) mock_obj_dict = dict(mock_obj) @@ -245,7 +245,7 @@ def __iter__(self): assert resultant_context.output == expected_output -def test_generate_id(faker, node_context): +def test_generate_id(faker, server_context): @dataclass class MockObject: name: str @@ -272,7 +272,7 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = generate_id(context=transform_context) @@ -288,7 +288,7 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = generate_id(context=transform_context) @@ -305,7 +305,7 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = generate_id(context=transform_context) @@ -340,7 +340,7 @@ def __iter__(self): assert result.output[key] == authed_context.credentials -def test_add_node_uid_for_key(faker, node_context): +def test_add_server_uid_for_key(faker, server_context): @dataclass class MockObject: name: str @@ -353,20 +353,20 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) key = "random_uid_key" - result_func = add_node_uid_for_key(key=key) + result_func = add_server_uid_for_key(key=key) assert isinstance(result_func, FunctionType) result = result_func(context=transform_context) assert isinstance(result, TransformContext) assert key in result.output - assert result.output[key] == node_context.node.id + assert result.output[key] == server_context.server.id -def test_validate_url(faker, node_context): +def test_validate_url(faker, server_context): @dataclass class MockObject: url: str | None @@ -377,7 +377,7 @@ def __iter__(self): mock_obj = MockObject(url=None) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) # no change in context if url is None @@ -390,7 +390,7 @@ def __iter__(self): mock_obj = MockObject(url=url_with_port) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = validate_url(transform_context) @@ -398,7 +398,7 @@ def __iter__(self): assert result.output["url"] == url -def test_validate_email(faker, node_context): +def test_validate_email(faker, server_context): @dataclass class MockObject: email: str @@ -408,7 +408,7 @@ def __iter__(self): mock_obj = MockObject(email=None) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = validate_email(transform_context) assert isinstance(result, TransformContext) @@ -416,7 +416,7 @@ def __iter__(self): mock_obj = MockObject(email=faker.email()) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = validate_email(transform_context) assert isinstance(result, TransformContext) @@ -425,7 +425,7 @@ def __iter__(self): mock_obj = MockObject(email=faker.name()) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) with pytest.raises(PydanticCustomError): diff --git a/packages/syft/tests/syft/transforms/transforms_test.py b/packages/syft/tests/syft/transforms/transforms_test.py index d6555dc8657..140854d6c1d 100644 --- a/packages/syft/tests/syft/transforms/transforms_test.py +++ b/packages/syft/tests/syft/transforms/transforms_test.py @@ -8,7 +8,7 @@ # syft absolute from syft.types import transforms from syft.types.syft_object import SyftBaseObject -from syft.types.syft_object import SyftObjectRegistry +from syft.types.syft_object_registry import SyftObjectRegistry from syft.types.transforms import TransformContext from syft.types.transforms import validate_klass_and_version @@ -75,7 +75,7 @@ def test_validate_klass_and_version( assert result == expected_result -def test_generate_transform_wrapper(faker, monkeypatch, node_context): +def test_generate_transform_wrapper(faker, monkeypatch, server_context): mock_value = faker.random_int() def mock_transform_method(context: TransformContext) -> TransformContext: @@ -95,7 +95,7 @@ def mock_transform_method(context: TransformContext) -> TransformContext: output = resultant_wrapper( MockObjectFromSyftBaseObj(), - node_context, + server_context, ) assert isinstance(output, MockObjectToSyftBaseObj) assert output.value == mock_value diff --git a/packages/syft/tests/syft/types/dicttuple_test.py b/packages/syft/tests/syft/types/dicttuple_test.py index de32f2545bc..43beb3116c2 100644 --- a/packages/syft/tests/syft/types/dicttuple_test.py +++ b/packages/syft/tests/syft/types/dicttuple_test.py @@ -41,7 +41,7 @@ def test_dict_tuple_not_subclassing_mapping(): def test_should_iter_over_value(dict_tuple: DictTuple) -> None: values = [] for v in dict_tuple: - values.append(v) + values.append(v) # noqa: PERF402 assert values == [1, 2] diff --git a/packages/syft/tests/syft/types/errors_test.py b/packages/syft/tests/syft/types/errors_test.py new file mode 100644 index 00000000000..ca8e557ef11 --- /dev/null +++ b/packages/syft/tests/syft/types/errors_test.py @@ -0,0 +1,76 @@ +# stdlib +from unittest.mock import Mock + +# third party +import pytest + +# syft absolute +import syft +from syft.service.context import AuthedServiceContext +from syft.service.user.user_roles import ServiceRole +from syft.types.errors import SyftException + +default_public_message = SyftException.public_message +public_message = "An error occurred. Contact the admin for more information." +private_message = "Private admin error." + + +def test_default_public_message(): + default_public_message = SyftException.public_message + exception = SyftException(private_message) + + assert exception.public == default_public_message + assert exception._private_message == private_message + + +def test_custom_public_message(): + exception = SyftException(private_message, public_message=public_message) + + assert exception.public == public_message + assert exception._private_message == private_message + + +def test_public_message_property(): + default_public_message = SyftException.public_message + exception = SyftException(private_message) + + assert exception.public == default_public_message + + +@pytest.mark.parametrize( + "role,private_msg,public_msg,expected_message", + [ + (ServiceRole.NONE, private_message, None, default_public_message), + (ServiceRole.GUEST, private_message, None, default_public_message), + (ServiceRole.DATA_SCIENTIST, private_message, None, default_public_message), + (ServiceRole.DATA_OWNER, private_message, None, private_message), + (ServiceRole.ADMIN, private_message, None, private_message), + ], +) +def test_get_message(role, private_msg, public_msg, expected_message): + mock_context = Mock(AuthedServiceContext) + mock_context.role = role + mock_context.dev_mode = False + exception = SyftException(private_msg, public_message=public_msg) + assert exception.get_message(mock_context) == expected_message + + +def test_syfterror_raise_works_in_pytest(): + """ + SyftError has own exception handler that wasnt working in notebook testing environments, + this is just a sanity check to make sure it works in pytest. + """ + with pytest.raises(SyftException): + raise SyftException(public_message="-") + + with syft.raises(SyftException(public_message="-")): + raise SyftException(public_message="-") + + # syft.raises works with wildcard + with syft.raises(SyftException(public_message="*test message*")): + raise SyftException(public_message="longer test message") + + # syft.raises with different public message should raise + with pytest.raises(AssertionError): + with syft.raises(SyftException(public_message="*different message*")): + raise SyftException(public_message="longer test message") diff --git a/packages/syft/tests/syft/types/exc_filter_test.py b/packages/syft/tests/syft/types/exc_filter_test.py new file mode 100644 index 00000000000..284c58a4e7e --- /dev/null +++ b/packages/syft/tests/syft/types/exc_filter_test.py @@ -0,0 +1,73 @@ +# stdlib +from types import ModuleType + +# syft absolute +from syft.types.errors import ExceptionFilter + + +def create_empty_module(module_name: str): + code = """ +class NonExceptionClass: ... + """.strip() + return create_module(module_name, code=code) + + +def create_module(module_name: str, code: str | None = None): + # stdlib + import sys + + created_module = ModuleType(module_name) + + module_code = ( + code + or """ +class CustomException(Exception): ... +class AnotherCustomException(Exception): ... +class InheritedException(CustomException): ... +class NonExceptionClass: ... + """ + ) + + exec(module_code, created_module.__dict__) + + sys.modules[module_name] = created_module + + return created_module + + +def test_exception_filter_init(): + instance = ExceptionFilter("pydantic") + + assert isinstance(instance, ExceptionFilter) + assert isinstance(instance, tuple) + assert instance.module == "pydantic" + assert instance + + +def test_exception_filter_exceptions(): + module_name = "test_module" + module = create_module(module_name) + + instance = ExceptionFilter(module_name) + + # classes are sorted by name + assert instance == ( + module.AnotherCustomException, + module.CustomException, + module.InheritedException, + ) + + +def test_not_found_module_doesnt_crash(): + instance = ExceptionFilter("fake_syft_module") + + assert instance == () + + +def test_exception_filter_no_exceptions(): + module_name = "syft_test_empty_module" + + create_empty_module(module_name) + instance = ExceptionFilter(module=module_name) + + assert tuple(instance) == () diff --git a/packages/syft/tests/syft/types/result_test.py b/packages/syft/tests/syft/types/result_test.py new file mode 100644 index 00000000000..a6eeb263490 --- /dev/null +++ b/packages/syft/tests/syft/types/result_test.py @@ -0,0 +1,216 @@ +# third party +import pytest + +# syft absolute +from syft.service.action.action_object import ActionObject +from syft.types.result import Err +from syft.types.result import Ok +from syft.types.result import as_result + + +def test_ok(): + good = Ok("om") + + assert good.is_ok() is True + assert good.is_err() is False + assert good.ok() == "om" + + +def test_ok_is_not_err(): + good = Ok("om") + + assert good.is_err() is False + assert good.err() is None + assert good.unwrap() == "om" + + +def test_ok_value_property(): + good = Ok("om") + + assert good.ok_value == "om" + + +def test_ok_match(): + matched = Ok(True) + + match matched: + case Ok(x): + assert x is True + + +def test_error(): + bad = Err(OSError("some exception")) + + assert bad.is_ok() is False + assert bad.is_err() is True + assert type(bad.err()) is OSError + + +def test_err_is_not_ok(): + bad = Err(OSError("some exception")) + + assert bad.is_ok() is False + assert bad.ok() is None + + +def test_err_value_property(): + bad = Err(OSError("some exception")) + + assert type(bad.error_value) is OSError + assert bad.error_value.args == ("some exception",) + + +def test_err_match(): + matched = Err(OSError("some exception")) + + match matched: + case Err(e): + assert type(e) is OSError + assert e.args == ("some exception",) + + +def test_unwrap_ok(): + obj = ActionObject.from_obj("om") + result = Ok(obj) + + same_obj = result.unwrap() + assert same_obj == obj + + +def test_unwrap_error(): + result = Err(ValueError("some exception")) + + with pytest.raises(ValueError): + result.unwrap() + + +def test_unwrap_error_not_exception(): + str_ = "some_exception" + result = Err(str_) # type: ignore + + with pytest.raises(TypeError): + result.unwrap() + + +def test_as_result_decorator_good(): + @as_result(ValueError) + def good() -> str: + return "om" + + result = good() + + assert result.is_ok() is True + assert result.is_err() is False + assert result.ok() == "om" + assert result.unwrap() == "om" + + +def test_as_result_decorator_bad(): + @as_result(ValueError) + def bad() -> str: + raise ValueError("some exception") + + result = bad() + + assert result.is_err() is True + assert result.is_ok() is False + + e = result.err() + assert type(e) is ValueError + assert e.args == ("some exception",) + + with pytest.raises(ValueError): + result.unwrap() + + +def test_as_result_decorator(): + @as_result(ValueError) + def create_object(valid: bool) -> ActionObject: + if valid: + return ActionObject.from_obj("om") + else: + raise ValueError("some exception") + + result = create_object(True) + + assert result.is_ok() is True + assert result.is_err() is False + + obj = result.unwrap() + assert isinstance(obj, ActionObject) + assert obj.syft_action_data == "om" + + result = create_object(False) + + assert result.is_err() is True + assert result.is_ok() is False + assert type(result.err()) is ValueError + + with pytest.raises(ValueError): + result.unwrap() + + +def test_as_result_decorator_bubble_up(): + @as_result(ValueError, TypeError) + def more_decorators(a: int) -> str: + if a == 1: + return "om" + raise OSError("some exception") + + result = more_decorators(1) + assert result.is_ok() is True + assert result.ok() == "om" + + with pytest.raises(OSError): + more_decorators(0) + + +def test_as_result_decorator_multiple_exceptions(): + @as_result(ValueError, TypeError, OSError) + def multiple_exceptions(a: int) -> str: + if a == 1: + return "om" + if a == 2: + raise TypeError + if a == 3: + raise ValueError + if a == 4: + raise OSError + raise ArithmeticError + + result = multiple_exceptions(1) + assert result.ok() == "om" + + result_type = multiple_exceptions(2) + assert type(result_type.err()) is TypeError + + result_value = multiple_exceptions(3) + assert type(result_value.err()) is ValueError + + result_os = multiple_exceptions(4) + assert type(result_os.err()) is OSError + + with pytest.raises(ArithmeticError): + multiple_exceptions(5) + + +def test_as_result_decorator_sub(): + class TestException(Exception): + pass + + class SubException(TestException): + pass + + @as_result(TestException) + def subclassed() -> str: + raise SubException("some exception") + + result = subclassed() + + assert result.is_err() is True + assert result.is_ok() is False + + assert type(result.err()) is SubException + + with pytest.raises(SubException): + result.unwrap() diff --git a/packages/syft/tests/syft/users/fixtures.py b/packages/syft/tests/syft/users/fixtures.py index 14c671d348e..46319b46704 100644 --- a/packages/syft/tests/syft/users/fixtures.py +++ b/packages/syft/tests/syft/users/fixtures.py @@ -2,10 +2,10 @@ import pytest # syft absolute -from syft.node.credentials import UserLoginCredentials -from syft.node.worker import Worker +from syft.server.credentials import UserLoginCredentials +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext -from syft.service.context import NodeServiceContext +from syft.service.context import ServerServiceContext from syft.service.context import UnauthedServiceContext from syft.service.user.user import User from syft.service.user.user import UserCreate @@ -16,7 +16,6 @@ from syft.service.user.user_roles import ServiceRole from syft.service.user.user_service import UserService from syft.service.user.user_stash import UserStash -from syft.store.document_store import DocumentStore @pytest.fixture @@ -107,23 +106,23 @@ def guest_user_search(guest_user) -> UserSearch: @pytest.fixture -def user_stash(document_store: DocumentStore) -> UserStash: +def user_stash(document_store) -> UserStash: yield UserStash(store=document_store) @pytest.fixture -def user_service(document_store: DocumentStore): +def user_service(document_store): yield UserService(store=document_store) @pytest.fixture def authed_context(admin_user: User, worker: Worker) -> AuthedServiceContext: - yield AuthedServiceContext(credentials=admin_user.verify_key, node=worker) + yield AuthedServiceContext(credentials=admin_user.verify_key, server=worker) @pytest.fixture -def node_context(worker: Worker) -> NodeServiceContext: - yield NodeServiceContext(node=worker) +def server_context(worker: Worker) -> ServerServiceContext: + yield ServerServiceContext(server=worker) @pytest.fixture @@ -133,4 +132,4 @@ def unauthed_context( login_credentials = UserLoginCredentials( email=guest_create_user.email, password=guest_create_user.password ) - yield UnauthedServiceContext(login_credentials=login_credentials, node=worker) + yield UnauthedServiceContext(login_credentials=login_credentials, server=worker) diff --git a/packages/syft/tests/syft/users/local_execution_test.py b/packages/syft/tests/syft/users/local_execution_test.py index 9bf3e6d0d31..966bc215a4a 100644 --- a/packages/syft/tests/syft/users/local_execution_test.py +++ b/packages/syft/tests/syft/users/local_execution_test.py @@ -1,17 +1,19 @@ # stdlib from collections import OrderedDict -from textwrap import dedent +import sys # third party import numpy as np +import pytest # syft absolute import syft as sy from syft.client.api import APIRegistry +@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") def test_local_execution(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client dataset = sy.Dataset( name="local_test", asset_list=[ @@ -22,15 +24,15 @@ def test_local_execution(worker): ) ], ) - root_domain_client.upload_dataset(dataset) - asset = root_domain_client.datasets[0].assets[0] + root_datasite_client.upload_dataset(dataset) + asset = root_datasite_client.datasets[0].assets[0] APIRegistry.__api_registry__ = OrderedDict() APIRegistry.set_api_for( - node_uid=worker.id, - user_verify_key=root_domain_client.verify_key, - api=root_domain_client.api, + server_uid=worker.id, + user_verify_key=root_datasite_client.verify_key, + api=root_datasite_client.api, ) @sy.syft_function( @@ -40,8 +42,9 @@ def test_local_execution(worker): def my_func(x): return x + 1 - my_func.code = dedent(my_func.code) - # time.sleep(10) - local_res = my_func(x=asset, time_alive=1) + local_res = my_func( + x=asset, + time_alive=1, + ) assert (local_res == np.array([2, 2, 2])).all() diff --git a/packages/syft/tests/syft/users/user_code_test.py b/packages/syft/tests/syft/users/user_code_test.py index 20d7bc50df4..5de444197ba 100644 --- a/packages/syft/tests/syft/users/user_code_test.py +++ b/packages/syft/tests/syft/users/user_code_test.py @@ -1,18 +1,29 @@ # stdlib -from textwrap import dedent import uuid # third party from faker import Faker import numpy as np +import pytest # syft absolute import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject from syft.service.request.request import Request from syft.service.request.request import UserCodeStatusChange from syft.service.response import SyftError +from syft.service.response import SyftSuccess from syft.service.user.user import User +from syft.service.user.user import UserView +from syft.service.user.user_roles import ServiceRole +from syft.types.errors import SyftException + +# relative +from .user_test import ds_client as ds_client_fixture + +ds_client = ds_client_fixture # work around some ruff quirks @sy.syft_function( @@ -29,30 +40,67 @@ def mock_syft_func_2(): return 1 +def test_repr_markdown_not_throwing_error(guest_client: DatasiteClient) -> None: + guest_client.code.submit(mock_syft_func) + result = guest_client.code.get_by_service_func_name("mock_syft_func") + assert len(result) == 1 + assert result[0]._repr_markdown_() + + +def test_new_admin_can_list_user_code( + worker: Worker, + ds_client: DatasiteClient, + faker: Faker, +) -> None: + root_client = worker.root_client + + project = sy.Project(name="", members=[ds_client]) + project.create_code_request(mock_syft_func, ds_client) + + email = faker.email() + pw = uuid.uuid4().hex + root_client.register( + name=faker.name(), email=email, password=pw, password_verify=pw + ) + + admin = root_client.login(email=email, password=pw) + + result: UserView = root_client.api.services.user.update( + uid=admin.account.id, role=ServiceRole.ADMIN + ) + assert result.role == ServiceRole.ADMIN + + user_code_stash = worker.services.user_code.stash + user_codes = user_code_stash._data + + assert 1 == len(admin.code.get_all()) + assert {c.id for c in user_codes} == {c.id for c in admin.code} + + def test_user_code(worker) -> None: - root_domain_client = worker.root_client - root_domain_client.register( + root_datasite_client = worker.root_client + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - guest_client = root_domain_client.login( + guest_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) - users = root_domain_client.users.get_all() + users = root_datasite_client.users.get_all() users[-1].allow_mock_execution() guest_client.api.services.code.request_code_execution(mock_syft_func) - root_domain_client = worker.root_client - message = root_domain_client.notifications[-1] + root_datasite_client = worker.root_client + message = root_datasite_client.notifications[-1] request = message.link user_code = request.changes[0].code - result = user_code.unsafe_function() - request.accept_by_depositing_result(result) + result = user_code.run() + request.approve() result = guest_client.api.services.code.mock_syft_func() assert isinstance(result, ActionObject) @@ -67,22 +115,51 @@ def test_user_code(worker) -> None: assert multi_call_res.get() == result.get() -def test_duplicated_user_code(worker, guest_client: User) -> None: +def test_duplicated_user_code(worker) -> None: + worker.root_client.register( + name="Jane Doe", + email="jane@caltech.edu", + password="abc123", + password_verify="abc123", + institution="Caltech", + website="https://www.caltech.edu/", + ) + ds_client = worker.root_client.login( + email="jane@caltech.edu", + password="abc123", + ) + # mock_syft_func() - result = guest_client.api.services.code.request_code_execution(mock_syft_func) + result = ds_client.api.services.code.request_code_execution(mock_syft_func) assert isinstance(result, Request) - assert len(guest_client.code.get_all()) == 1 + assert len(ds_client.code.get_all()) == 1 # request the exact same code should return an error - result = guest_client.api.services.code.request_code_execution(mock_syft_func) - assert isinstance(result, SyftError) - assert len(guest_client.code.get_all()) == 1 + with pytest.raises(SyftException): + result = ds_client.api.services.code.request_code_execution(mock_syft_func) + + assert len(ds_client.code.get_all()) == 1 # request the a different function name but same content will also succeed - mock_syft_func_2() - result = guest_client.api.services.code.request_code_execution(mock_syft_func_2) + # flaky if not blocking + mock_syft_func_2(syft_no_server=True) + result = ds_client.api.services.code.request_code_execution(mock_syft_func_2) assert isinstance(result, Request) - assert len(guest_client.code.get_all()) == 2 + assert len(ds_client.code.get_all()) == 2 + + code_history = ds_client.code_history + assert code_history.code_versions, "No code version found." + + code_histories = worker.root_client.code_histories + user_code_history = code_histories[ds_client.logged_in_user] + assert not isinstance(code_histories, SyftError) + assert not isinstance(user_code_history, SyftError) + assert user_code_history.code_versions, "No code version found." + assert user_code_history.mock_syft_func.user_code_history[0].status is not None + assert user_code_history.mock_syft_func[0]._repr_markdown_(), "repr markdown failed" + + result = user_code_history.mock_syft_func_2[0]() + assert result.get() == 1 def random_hash() -> str: @@ -121,8 +198,6 @@ def test_scientist_can_list_code_assets(worker: sy.Worker, faker: Faker) -> None def func(asset): return 0 - func.code = dedent(func.code) - request = guest_client.code.request_code_execution(func) assert not isinstance(request, sy.SyftError) @@ -143,8 +218,8 @@ def mock_inner_func(): @sy.syft_function( input_policy=sy.ExactMatch(), output_policy=sy.SingleExecutionExactOutput() ) -def mock_outer_func(domain): - job = domain.launch_job(mock_inner_func) +def mock_outer_func(datasite): + job = datasite.launch_job(mock_inner_func) return job @@ -152,19 +227,21 @@ def test_nested_requests(worker, guest_client: User): guest_client.api.services.code.submit(mock_inner_func) guest_client.api.services.code.request_code_execution(mock_outer_func) - root_domain_client = worker.root_client - request = root_domain_client.requests[-1] + root_datasite_client = worker.root_client + request = root_datasite_client.requests[-1] - root_domain_client.api.services.request.apply(request.id) - request = root_domain_client.requests[-1] + root_datasite_client.api.services.request.apply(request.id) + request = root_datasite_client.requests[-1] - codes = root_domain_client.code + codes = root_datasite_client.code inner = codes[0] if codes[0].service_func_name == "mock_inner_func" else codes[1] outer = codes[0] if codes[0].service_func_name == "mock_outer_func" else codes[1] assert list(request.code.nested_codes.keys()) == ["mock_inner_func"] - (linked_obj, node) = request.code.nested_codes["mock_inner_func"] - assert node == {} - resolved = root_domain_client.api.services.notifications.resolve_object(linked_obj) + (linked_obj, server) = request.code.nested_codes["mock_inner_func"] + assert server == {} + resolved = root_datasite_client.api.services.notifications.resolve_object( + linked_obj + ) assert resolved.id == inner.id assert outer.status.approved assert not inner.status.approved @@ -172,16 +249,16 @@ def test_nested_requests(worker, guest_client: User): def test_user_code_mock_execution(worker) -> None: # Setup - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - # TODO guest_client fixture is not in root_domain_client.users - root_domain_client.register( + # TODO guest_client fixture is not in root_datasite_client.users + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) @@ -196,7 +273,8 @@ def test_user_code_mock_execution(worker) -> None: ) ], ) - root_domain_client.upload_dataset(dataset) + + root_datasite_client.upload_dataset(dataset) # DS requests code execution data = ds_client.datasets[0].assets[0] @@ -205,20 +283,25 @@ def test_user_code_mock_execution(worker) -> None: def compute_mean(data): return data.mean() - compute_mean.code = dedent(compute_mean.code) ds_client.api.services.code.request_code_execution(compute_mean) # Guest attempts to set own permissions guest_user = ds_client.users.get_current_user() - res = guest_user.allow_mock_execution() - assert isinstance(res, SyftError) + with pytest.raises(SyftException) as exc: + guest_user.allow_mock_execution() + + assert "You are not permitted to perform this action." in exc.value.public_message # Mock execution fails, no permissions - result = ds_client.api.services.code.compute_mean(data=data.mock) - assert isinstance(result, SyftError) + with pytest.raises(SyftException) as exc: + result = ds_client.api.services.code.compute_mean(data=data.mock) + + assert ( + "You do not have the permissions for mock execution" in exc.value.public_message + ) # DO grants permissions - users = root_domain_client.users.get_all() + users = root_datasite_client.users.get_all() guest_user = [u for u in users if u.id == guest_user.id][0] guest_user.allow_mock_execution() @@ -229,15 +312,15 @@ def compute_mean(data): def test_mock_multiple_arguments(worker) -> None: # Setup - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) @@ -252,8 +335,9 @@ def test_mock_multiple_arguments(worker) -> None: ) ], ) - root_domain_client.upload_dataset(dataset) - users = root_domain_client.users.get_all() + + root_datasite_client.upload_dataset(dataset) + users = root_datasite_client.users.get_all() users[-1].allow_mock_execution() # DS requests code execution @@ -263,22 +347,243 @@ def test_mock_multiple_arguments(worker) -> None: def compute_sum(data1, data2): return data1 + data2 - compute_sum.code = dedent(compute_sum.code) ds_client.api.services.code.request_code_execution(compute_sum) - root_domain_client.requests[-1].approve() + root_datasite_client.requests[-1].approve() # Mock execution succeeds, result not cached result = ds_client.api.services.code.compute_sum(data1=1, data2=1) assert result.get() == 2 # Mixed execution fails on input policy - result = ds_client.api.services.code.compute_sum(data1=1, data2=data) - assert isinstance(result, SyftError) + with pytest.raises(SyftException) as exc: + ds_client.api.services.code.compute_sum(data1=1, data2=data) + + assert exc.type is SyftException + assert "not in allowed inputs" in exc.value.public_message # Real execution succeeds result = ds_client.api.services.code.compute_sum(data1=data, data2=data) - assert np.equal(result.get(), np.array([0, 2, 4, 6, 8])).all() # Mixed execution fails, no result from cache - result = ds_client.api.services.code.compute_sum(data1=1, data2=data) - assert isinstance(result, SyftError) + with pytest.raises(SyftException): + result = ds_client.api.services.code.compute_sum(data1=1, data2=data) + + +def test_mock_no_arguments(worker) -> None: + root_datasite_client = worker.root_client + + root_datasite_client.register( + name="data-scientist", + email="test_user@openmined.org", + password="0000", + password_verify="0000", + ) + ds_client = root_datasite_client.login( + email="test_user@openmined.org", + password="0000", + ) + + users = root_datasite_client.users.get_all() + + @sy.syft_function_single_use() + def compute_sum(): + return 1 + + ds_client.api.services.code.request_code_execution(compute_sum) + + # not approved, no mock execution + with pytest.raises(SyftException): + ds_client.api.services.code.compute_sum() + + # not approved, mock execution + users[-1].allow_mock_execution() + result = ds_client.api.services.code.compute_sum() + assert result, result + assert result == 1 + + # approved, no mock execution + users[-1].allow_mock_execution(allow=False) + message = root_datasite_client.notifications[-1] + request = message.link + user_code = request.changes[0].code + result = user_code.run() + request.approve() + + result = ds_client.api.services.code.compute_sum() + # remove once we fix syft_action_saved_to_blob_store variable + result = result.get() + # uncomment once we fix syft_action_saved_to_blob_store + # assert not isinstance(result.syft_action_data_cache, ActionDataEmpty) + assert result == 1 + + +def test_submit_invalid_name(worker) -> None: + client = worker.root_client + + @sy.syft_function_single_use() + def valid_name(): + pass + + res = client.code.submit(valid_name) + assert isinstance(res, SyftSuccess) + + # reserved name + with pytest.raises(SyftException): + + @sy.syft_function_single_use() + def get_all(): + pass + + # no anonymous + with pytest.raises(SyftException): + + @sy.syft_function_single_use() + def _(): + pass + + # overwrite valid function name before submit, fail on serde + @sy.syft_function_single_use() + def valid_name_2(): + pass + + valid_name_2.func_name = "get_all" + + with pytest.raises(SyftException): + client.code.submit(valid_name_2) + + +def test_submit_code_with_global_var(guest_client: DatasiteClient) -> None: + with pytest.raises(SyftException) as exc: + + @sy.syft_function( + input_policy=sy.ExactMatch(), output_policy=sy.SingleExecutionExactOutput() + ) + def mock_syft_func_with_global(): + global x + return x + + assert "Your code contains (a) global variable(s)" in exc.value.public_message + + with pytest.raises(SyftException) as exc: + + @sy.syft_function_single_use() + def mock_syft_func_single_use_with_global(): + global x + return x + + assert "Your code contains (a) global variable(s)" in exc.value.public_message + + +def test_request_existing_usercodesubmit(worker) -> None: + root_datasite_client = worker.root_client + + root_datasite_client.register( + name="data-scientist", + email="test_user@openmined.org", + password="0000", + password_verify="0000", + ) + ds_client = root_datasite_client.login( + email="test_user@openmined.org", + password="0000", + ) + + @sy.syft_function_single_use() + def my_func(): + return 42 + + res_submit = ds_client.api.services.code.submit(my_func) + assert isinstance(res_submit, SyftSuccess) + res_request = ds_client.api.services.code.request_code_execution(my_func) + assert isinstance(res_request, Request) + + # Second request fails, cannot have multiple requests for the same code + with pytest.raises(SyftException): + res_request = ds_client.api.services.code.request_code_execution(my_func) + + assert len(ds_client.code.get_all()) == 1 + assert len(ds_client.requests.get_all()) == 1 + + +def test_request_existing_usercode(worker) -> None: + root_datasite_client = worker.root_client + + root_datasite_client.register( + name="data-scientist", + email="test_user@openmined.org", + password="0000", + password_verify="0000", + ) + ds_client = root_datasite_client.login( + email="test_user@openmined.org", + password="0000", + ) + + @sy.syft_function_single_use() + def my_func(): + return 42 + + res_submit = ds_client.api.services.code.submit(my_func) + assert isinstance(res_submit, SyftSuccess) + + code = ds_client.code.get_all()[0] + res_request = ds_client.api.services.code.request_code_execution(my_func) + assert isinstance(res_request, Request) + + # Second request fails, cannot have multiple requests for the same code + with pytest.raises(SyftException): + res_request = ds_client.api.services.code.request_code_execution(code) + + assert len(ds_client.code.get_all()) == 1 + assert len(ds_client.requests.get_all()) == 1 + + +def test_submit_existing_code_different_user(worker): + root_datasite_client = worker.root_client + + root_datasite_client.register( + name="data-scientist", + email="test_user@openmined.org", + password="0000", + password_verify="0000", + ) + ds_client_1 = root_datasite_client.login( + email="test_user@openmined.org", + password="0000", + ) + + root_datasite_client.register( + name="data-scientist-2", + email="test_user_2@openmined.org", + password="0000", + password_verify="0000", + ) + ds_client_2 = root_datasite_client.login( + email="test_user_2@openmined.org", + password="0000", + ) + + @sy.syft_function_single_use() + def my_func(): + return 42 + + res_submit = ds_client_1.api.services.code.submit(my_func) + assert isinstance(res_submit, SyftSuccess) + + with pytest.raises(SyftException) as exc: + ds_client_1.api.services.code.submit(my_func) + + assert "already exists" in exc.value.public_message + + # Resubmit with different user + res_submit = ds_client_2.api.services.code.submit(my_func) + assert isinstance(res_submit, SyftSuccess) + + with pytest.raises(SyftException) as exc: + ds_client_2.api.services.code.submit(my_func) + + assert "already exists" in exc.value.public_message + + assert len(ds_client_1.code.get_all()) == 1 + assert len(ds_client_2.code.get_all()) == 1 + assert len(root_datasite_client.code.get_all()) == 2 diff --git a/packages/syft/tests/syft/users/user_service_test.py b/packages/syft/tests/syft/users/user_service_test.py index 1377bbfafc1..59e905ee657 100644 --- a/packages/syft/tests/syft/users/user_service_test.py +++ b/packages/syft/tests/syft/users/user_service_test.py @@ -1,20 +1,24 @@ # stdlib +from typing import Literal +from typing import NoReturn from unittest import mock # third party from faker import Faker +import pytest from pytest import MonkeyPatch -from result import Err -from result import Ok # syft absolute -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +import syft as sy +from syft import orchestra +from syft.client.client import SyftClient +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext -from syft.service.context import NodeServiceContext +from syft.service.context import ServerServiceContext from syft.service.context import UnauthedServiceContext -from syft.service.response import SyftError from syft.service.response import SyftSuccess +from syft.service.user import errors as user_errors from syft.service.user.user import User from syft.service.user.user import UserCreate from syft.service.user.user import UserPrivateKey @@ -22,6 +26,11 @@ from syft.service.user.user import UserView from syft.service.user.user_roles import ServiceRole from syft.service.user.user_service import UserService +from syft.store.document_store_errors import NotFoundException +from syft.store.document_store_errors import StashException +from syft.types.errors import SyftException +from syft.types.result import Ok +from syft.types.result import as_result from syft.types.uid import UID @@ -38,16 +47,14 @@ def test_userservice_create_when_user_exists( authed_context: AuthedServiceContext, guest_create_user: UserCreate, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: - return Ok(guest_create_user.to(User)) + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> User: + return guest_create_user.to(User) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - response = user_service.create(authed_context, guest_create_user) - assert isinstance(response, SyftError) - expected_error_message = ( - f"User already exists with email: {guest_create_user.email}" - ) - assert expected_error_message == response.message + + with pytest.raises(SyftException): + user_service.create(authed_context, **guest_create_user) def test_userservice_create_error_on_get_by_email( @@ -56,14 +63,16 @@ def test_userservice_create_error_on_get_by_email( authed_context: AuthedServiceContext, guest_create_user: UserCreate, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Err: - return Err(f"No user exists with given email: {email}") + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> User: + return guest_create_user.to(User) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - response = user_service.create(authed_context, guest_create_user) - assert isinstance(response, SyftError) - expected_error_message = mock_get_by_email(None, guest_create_user.email).err() - assert response.message == expected_error_message + + with pytest.raises(SyftException) as exc: + user_service.create(authed_context, **guest_create_user) + + assert exc.value.public_message == f"User {guest_create_user.email} already exists" def test_userservice_create_success( @@ -72,25 +81,30 @@ def test_userservice_create_success( authed_context: AuthedServiceContext, guest_create_user: UserCreate, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: - return Ok(None) + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> User: + raise NotFoundException expected_user = guest_create_user.to(User) - expected_output = expected_user.to(UserView) + expected_output: UserView = expected_user.to(UserView) + expected_output.syft_client_verify_key = authed_context.credentials + expected_output.syft_server_location = authed_context.server.id + @as_result(StashException) def mock_set( credentials: SyftVerifyKey, - user: User, + obj: User, has_permission: bool = False, add_permissions=None, - ) -> Ok: - return Ok(expected_user) + ) -> User: + return expected_user monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - response = user_service.create(authed_context, guest_create_user) + + response = user_service.create(authed_context, **guest_create_user) assert isinstance(response, UserView) - assert response.to_dict() == expected_output.to_dict() + assert response.model_dump() == expected_output.model_dump() def test_userservice_create_error_on_set( @@ -99,24 +113,26 @@ def test_userservice_create_error_on_set( authed_context: AuthedServiceContext, guest_create_user: UserCreate, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: - return Ok(None) - - expected_error_msg = "Failed to set user." + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> NoReturn: + raise NotFoundException + @as_result(StashException) def mock_set( credentials: SyftVerifyKey, - user: User, + obj: User, has_permission: bool = False, add_permissions=None, - ) -> Err: - return Err(expected_error_msg) + ) -> NoReturn: + raise StashException monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - response = user_service.create(authed_context, guest_create_user) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + + with pytest.raises(StashException) as exc: + user_service.create(authed_context, **guest_create_user) + + assert exc.type == StashException def test_userservice_view_error_on_get_by_uid( @@ -125,15 +141,18 @@ def test_userservice_view_error_on_get_by_uid( authed_context: AuthedServiceContext, ) -> None: uid_to_view = UID() - expected_error_msg = f"Failed to get uid: {uid_to_view}" + expected_error_msg = f"Item {uid_to_view} not found" - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Err: - return Err(expected_error_msg) + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> NoReturn: + raise NotFoundException(public_message=expected_error_msg) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.view(authed_context, uid_to_view) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + + with pytest.raises(NotFoundException) as exc: + user_service.view(authed_context, uid_to_view) + assert exc.type == NotFoundException + assert exc.value.public_message == expected_error_msg def test_userservice_view_user_not_exists( @@ -142,15 +161,20 @@ def test_userservice_view_user_not_exists( authed_context: AuthedServiceContext, ) -> None: uid_to_view = UID() - expected_error_msg = f"No user exists for given: {uid_to_view}" - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Ok: - return Ok(None) + expected_error_msg = f"User {uid_to_view} not found" + + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> NoReturn: + raise NotFoundException(public_message=expected_error_msg) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.view(authed_context, uid_to_view) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + + with pytest.raises(NotFoundException) as exc: + user_service.view(authed_context, uid_to_view) + + assert exc.type == NotFoundException + assert exc.value.public == expected_error_msg def test_userservice_view_user_success( @@ -160,13 +184,17 @@ def test_userservice_view_user_success( guest_user: User, ) -> None: uid_to_view = guest_user.id + expected_output = guest_user.to(UserView) - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Ok: - return Ok(guest_user) + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> User: + return guest_user monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.view(authed_context, uid_to_view) + + response = user_service.view(authed_context, uid=uid_to_view) + assert isinstance(response, UserView) assert response.model_dump() == expected_output.model_dump() @@ -181,10 +209,12 @@ def test_userservice_get_all_success( mock_get_all_output = [guest_user, admin_user] expected_output = [x.to(UserView) for x in mock_get_all_output] - def mock_get_all(credentials: SyftVerifyKey) -> Ok: - return Ok(mock_get_all_output) + @as_result(StashException) + def mock_get_all(credentials: SyftVerifyKey, **kwargs) -> list[User]: + return mock_get_all_output monkeypatch.setattr(user_service.stash, "get_all", mock_get_all) + response = user_service.get_all(authed_context) assert isinstance(response, list) assert len(response) == len(expected_output) @@ -194,92 +224,88 @@ def mock_get_all(credentials: SyftVerifyKey) -> Ok: ) -def test_userservice_get_all_error( - monkeypatch: MonkeyPatch, - user_service: UserService, - authed_context: AuthedServiceContext, -) -> None: - expected_output_msg = "No users exists" - - def mock_get_all(credentials: SyftVerifyKey) -> Err: - return Err("") - - monkeypatch.setattr(user_service.stash, "get_all", mock_get_all) - response = user_service.get_all(authed_context) - assert isinstance(response, SyftError) - assert response.message == expected_output_msg - - def test_userservice_search( monkeypatch: MonkeyPatch, user_service: UserService, authed_context: AuthedServiceContext, guest_user: User, ) -> None: - def mock_find_all(credentials: SyftVerifyKey, **kwargs) -> Ok | Err: - for key, _ in kwargs.items(): + @as_result(SyftException) + def get_all(credentials: SyftVerifyKey, **kwargs) -> list[User]: + for key in kwargs.keys(): if hasattr(guest_user, key): - return Ok([guest_user]) - return Err("Invalid kwargs") + return [guest_user] + return [] - monkeypatch.setattr(user_service.stash, "find_all", mock_find_all) + monkeypatch.setattr(user_service.stash, "get_all", get_all) expected_output = [guest_user.to(UserView)] # Search via id - response = user_service.search(authed_context, id=guest_user.id) + response = user_service.search(context=authed_context, id=guest_user.id) + assert isinstance(response, list) assert all( - r.model_dump() == expected.model_dump() + r.to_dict() == expected.to_dict() for r, expected in zip(response, expected_output) ) - # assert response.model_dump() == expected_output.model_dump() + # assert response.to_dict() == expected_output.to_dict() # Search via email - response = user_service.search(authed_context, email=guest_user.email) + response = user_service.search(context=authed_context, email=guest_user.email) assert isinstance(response, list) assert all( - r.model_dump() == expected.model_dump() + r.to_dict() == expected.to_dict() for r, expected in zip(response, expected_output) ) # Search via name - response = user_service.search(authed_context, name=guest_user.name) + response = user_service.search(context=authed_context, name=guest_user.name) assert isinstance(response, list) assert all( - r.model_dump() == expected.model_dump() + r.to_dict() == expected.to_dict() for r, expected in zip(response, expected_output) ) # Search via verify_key response = user_service.search( - authed_context, + context=authed_context, verify_key=guest_user.verify_key, ) assert isinstance(response, list) assert all( - r.model_dump() == expected.model_dump() + r.to_dict() == expected.to_dict() for r, expected in zip(response, expected_output) ) # Search via multiple kwargs response = user_service.search( - authed_context, name=guest_user.name, email=guest_user.email + context=authed_context, name=guest_user.name, email=guest_user.email ) assert isinstance(response, list) assert all( - r.model_dump() == expected.model_dump() + r.to_dict() == expected.to_dict() for r, expected in zip(response, expected_output) ) def test_userservice_search_with_invalid_kwargs( - user_service: UserService, authed_context: AuthedServiceContext + worker, user_service: UserService, authed_context: AuthedServiceContext ) -> None: - # Search with invalid kwargs - response = user_service.search(authed_context, role=ServiceRole.GUEST) - assert isinstance(response, SyftError) - assert "Invalid Search parameters" in response.message + # Direct calls will fail with a type error + with pytest.raises(TypeError) as exc: + user_service.search(context=authed_context, role=ServiceRole.GUEST) + + assert "UserService.search() got an unexpected keyword argument 'role'" == str( + exc.value + ) + + root_client = worker.root_client + # Client calls fails at autosplat check + with pytest.raises(SyftException) as exc: + root_client.users.search(role=ServiceRole.GUEST) + + assert "Invalid parameter: `role`" in exc.value.public_message def test_userservice_update_get_by_uid_fails( @@ -289,21 +315,19 @@ def test_userservice_update_get_by_uid_fails( update_user: UserUpdate, ) -> None: random_uid = UID() - get_by_uid_err_msg = "Invalid UID" - expected_error_msg = ( - f"Failed to find user with UID: {random_uid}. Error: {get_by_uid_err_msg}" - ) + expected_error_msg = f"User {random_uid} not found" - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Err: - return Err(get_by_uid_err_msg) + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> NoReturn: + raise NotFoundException(public_message=expected_error_msg) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.update( - authed_context, uid=random_uid, user_update=update_user - ) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(NotFoundException) as exc: + user_service.update(authed_context, uid=random_uid, **update_user) + + assert exc.type == NotFoundException + assert exc.value.public == expected_error_msg def test_userservice_update_no_user_exists( @@ -313,18 +337,19 @@ def test_userservice_update_no_user_exists( update_user: UserUpdate, ) -> None: random_uid = UID() - expected_error_msg = f"No user exists for given UID: {random_uid}" + expected_error_msg = f"User {random_uid} not found" - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Ok: - return Ok(None) + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> NoReturn: + raise NotFoundException(public_message=expected_error_msg) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.update( - authed_context, uid=random_uid, user_update=update_user - ) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(NotFoundException) as exc: + user_service.update(authed_context, uid=random_uid, **update_user) + + assert exc.type == NotFoundException + assert exc.value.public_message == expected_error_msg def test_userservice_update_success( @@ -334,24 +359,33 @@ def test_userservice_update_success( guest_user: User, update_user: UserUpdate, ) -> None: - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Ok: - return Ok(guest_user) - - def mock_update(credentials: SyftVerifyKey, user: User, has_permission: bool) -> Ok: - guest_user.name = update_user.name - guest_user.email = update_user.email - return Ok(guest_user) + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> User: + return guest_user + + @as_result(NotFoundException) + def mock_update( + credentials: SyftVerifyKey, obj: User, has_permission: bool + ) -> User: + guest_user.name = obj.name + guest_user.email = obj.email + return guest_user monkeypatch.setattr(user_service.stash, "update", mock_update) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) + authed_context.role = ServiceRole.ADMIN + user = user_service.update(authed_context, uid=guest_user.id, **update_user) - resultant_user = user_service.update( - authed_context, uid=guest_user.id, user_update=update_user - ) - assert isinstance(resultant_user, UserView) - assert resultant_user.email == update_user.email - assert resultant_user.name == update_user.name + assert isinstance(user, UserView) + assert user.email == update_user.email + assert user.name == update_user.name + + another_update = UserUpdate(name="name", email="email@openmined.org") + user = user_service.update(authed_context, guest_user.id, **another_update) + assert isinstance(user, UserView) + assert user.name == "name" + assert user.email == "email@openmined.org" def test_userservice_update_fails( @@ -362,46 +396,78 @@ def test_userservice_update_fails( update_user: UserUpdate, ) -> None: update_error_msg = "Failed to reach server." - expected_error_msg = ( - f"Failed to update user with UID: {guest_user.id}. Error: {update_error_msg}" - ) - def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Ok: - return Ok(guest_user) + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> User: + return guest_user - def mock_update(credentials: SyftVerifyKey, user, has_permission: bool) -> Err: - return Err(update_error_msg) - - authed_context.role = ServiceRole.ADMIN + @as_result(StashException) + def mock_update( + credentials: SyftVerifyKey, obj: User, has_permission: bool + ) -> NoReturn: + raise StashException(update_error_msg) monkeypatch.setattr(user_service.stash, "update", mock_update) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.update( - authed_context, uid=guest_user.id, user_update=update_user - ) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + authed_context.role = ServiceRole.ADMIN + + with pytest.raises(StashException) as exc: + user_service.update(authed_context, uid=guest_user.id, **update_user) + + assert exc.type == StashException + assert exc.value.public == StashException.public_message + assert exc.value._private_message == update_error_msg + assert exc.value.get_message(authed_context) == update_error_msg def test_userservice_delete_failure( monkeypatch: MonkeyPatch, user_service: UserService, authed_context: AuthedServiceContext, + guest_user: User, ) -> None: id_to_delete = UID() - expected_error_msg = f"No user exists for given id: {id_to_delete}" + expected_error_msg = f"User {id_to_delete} not found" + + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> NoReturn: + raise NotFoundException(public_message=expected_error_msg) + + monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) + + with pytest.raises(NotFoundException) as exc: + user_service.delete(context=authed_context, uid=id_to_delete) + + assert exc.type == NotFoundException + assert exc.value.public == expected_error_msg + + @as_result(NotFoundException) + def mock_get_by_uid_good(credentials: SyftVerifyKey, uid: UID) -> User: + return guest_user + + @as_result(user_errors.UserDeleteError) def mock_delete_by_uid( credentials: SyftVerifyKey, uid: UID, has_permission=False - ) -> Err: - return Err(expected_error_msg) + ) -> NoReturn: + raise user_errors.UserDeleteError(public_message=expected_error_msg) + monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid_good) monkeypatch.setattr(user_service.stash, "delete_by_uid", mock_delete_by_uid) - response = user_service.delete(context=authed_context, uid=id_to_delete) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(user_errors.UserPermissionError) as exc: + user_service.delete(context=authed_context, uid=id_to_delete) + + assert exc.type == user_errors.UserPermissionError + assert exc.value._private_message is not None + + authed_context.role = ServiceRole.ADMIN + with pytest.raises(user_errors.UserDeleteError) as exc: + user_service.delete(context=authed_context, uid=id_to_delete) + + assert exc.type == user_errors.UserDeleteError + assert exc.value.public_message == expected_error_msg def test_userservice_delete_success( @@ -410,23 +476,23 @@ def test_userservice_delete_success( authed_context: AuthedServiceContext, ) -> None: id_to_delete = UID() - expected_output = SyftSuccess(message=f"ID: {id_to_delete} deleted") + @as_result(NotFoundException) def mock_delete_by_uid( credentials: SyftVerifyKey, uid: UID, has_permission: bool = False - ) -> Ok: - return Ok(expected_output) + ) -> Literal[True]: + return True - def mock_get_target_object(credentials: SyftVerifyKey, uid): + @as_result(NotFoundException) + def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> User: return User(email=Faker().email()) monkeypatch.setattr(user_service.stash, "delete_by_uid", mock_delete_by_uid) - monkeypatch.setattr(user_service, "get_target_object", mock_get_target_object) - authed_context.role = ServiceRole.ADMIN + monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) + authed_context.role = ServiceRole.ADMIN response = user_service.delete(context=authed_context, uid=id_to_delete) - assert isinstance(response, SyftSuccess) - assert response == expected_output + assert response def test_userservice_user_verify_key( @@ -437,7 +503,7 @@ def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - response = user_service.user_verify_key(email=guest_user.email) + response = user_service.user_verify_key(email=guest_user.email).unwrap() assert response == guest_user.verify_key @@ -445,36 +511,25 @@ def test_userservice_user_verify_key_invalid_email( monkeypatch: MonkeyPatch, user_service: UserService, faker: Faker ) -> None: email = faker.email() - expected_output = SyftError(message=f"No user with email: {email}") + expected_output = f"User {email} not found" - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Err: - return Err("No user found") + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> NoReturn: + raise NotFoundException(public_message=expected_output) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - response = user_service.user_verify_key(email=email) - assert response == expected_output + with pytest.raises(NotFoundException) as exc: + user_service.user_verify_key(email=email) - -def test_userservice_admin_verify_key_error( - monkeypatch: MonkeyPatch, user_service: UserService -) -> None: - expected_output = "failed to get admin verify_key" - - def mock_admin_verify_key() -> Err: - return Err(expected_output) - - monkeypatch.setattr(user_service.stash, "admin_verify_key", mock_admin_verify_key) - - response = user_service.admin_verify_key() - assert isinstance(response, SyftError) - assert response.message == expected_output + assert exc.type == NotFoundException + assert exc.value.public_message == expected_output def test_userservice_admin_verify_key_success( monkeypatch: MonkeyPatch, user_service: UserService, worker ) -> None: - response = user_service.admin_verify_key() + response = user_service.root_verify_key assert isinstance(response, SyftVerifyKey) assert response == worker.root_client.credentials.verify_key @@ -485,11 +540,13 @@ def test_userservice_register_user_exists( worker: Worker, guest_create_user: UserCreate, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email): - return Ok(guest_create_user) + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email) -> User: + return guest_create_user.to(User) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - expected_error_msg = f"User already exists with email: {guest_create_user.email}" + + expected_error_msg = f"User {guest_create_user.email} already exists" # Patch Worker settings to enable signup with mock.patch( @@ -497,12 +554,14 @@ def mock_get_by_email(credentials: SyftVerifyKey, email): new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server", db_url="sqlite://") + server_context = ServerServiceContext(server=mock_worker) - response = user_service.register(node_context, guest_create_user) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(SyftException) as exc: + user_service.register(server_context, guest_create_user) + + assert exc.type == SyftException + assert exc.value.public_message == expected_error_msg def test_userservice_register_error_on_get_email( @@ -511,10 +570,11 @@ def test_userservice_register_error_on_get_email( guest_create_user: UserCreate, worker: Worker, ) -> None: - expected_error_msg = "Failed to get email" + error_msg = "There was an error retrieving data. Contact your admin." - def mock_get_by_email(credentials: SyftVerifyKey, email): - return Err(expected_error_msg) + @as_result(StashException) + def mock_get_by_email(credentials: SyftVerifyKey, email) -> NoReturn: + raise StashException monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) @@ -524,12 +584,13 @@ def mock_get_by_email(credentials: SyftVerifyKey, email): new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server", db_url="sqlite://") + server_context = ServerServiceContext(server=mock_worker) + + with pytest.raises(StashException) as exc: + user_service.register(server_context, guest_create_user) - response = user_service.register(node_context, guest_create_user) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + assert exc.value.public == error_msg def test_userservice_register_success( @@ -539,35 +600,30 @@ def test_userservice_register_success( guest_create_user: UserCreate, guest_user: User, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: - return Ok(None) - - def mock_set(*args, **kwargs) -> Ok: - return Ok(guest_user) + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> NoReturn: + raise NotFoundException - # Patch Worker settings to enable signup + @as_result(StashException) + def mock_set(*args, **kwargs) -> User: + return guest_user with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server", db_url="sqlite://") + server_context = ServerServiceContext(server=mock_worker) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - expected_msg = f"User '{guest_create_user.name}' successfully registered!" expected_private_key = guest_user.to(UserPrivateKey) + response = user_service.register(server_context, guest_create_user) - response = user_service.register(node_context, guest_create_user) - assert isinstance(response, tuple) - - syft_success_response, user_private_key = response - assert isinstance(syft_success_response, SyftSuccess) - assert syft_success_response.message == expected_msg - + assert isinstance(response, SyftSuccess) + user_private_key = response.value assert isinstance(user_private_key, UserPrivateKey) assert user_private_key == expected_private_key @@ -578,33 +634,38 @@ def test_userservice_register_set_fail( worker: Worker, guest_create_user: UserCreate, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: - return Ok(None) - - expected_error_msg = "Failed to connect to server." + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> NoReturn: + raise NotFoundException + @as_result(StashException) def mock_set( credentials: SyftVerifyKey, - user: User, + obj: User, add_permissions=None, has_permission: bool = False, - ) -> Err: - return Err(expected_error_msg) + ) -> NoReturn: + raise StashException with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server", db_url="sqlite://") + server_context = ServerServiceContext(server=mock_worker) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - response = user_service.register(node_context, guest_create_user) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(StashException) as exc: + user_service.register(server_context, guest_create_user) + + assert exc.type is StashException + assert ( + exc.value.public_message + == f"Failed to create user {guest_create_user.email}" + ) def test_userservice_exchange_credentials( @@ -613,15 +674,16 @@ def test_userservice_exchange_credentials( unauthed_context: UnauthedServiceContext, guest_user: User, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: - return Ok(guest_user) + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> User: + return guest_user monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) expected_user_private_key = guest_user.to(UserPrivateKey) response = user_service.exchange_credentials(unauthed_context) - assert isinstance(response, UserPrivateKey) - assert response == expected_user_private_key + assert isinstance(response.value, UserPrivateKey) + assert response.value == expected_user_private_key def test_userservice_exchange_credentials_invalid_user( @@ -630,33 +692,109 @@ def test_userservice_exchange_credentials_invalid_user( unauthed_context: UnauthedServiceContext, guest_user: User, ) -> None: - def mock_get_by_email(credentials: SyftVerifyKey, email): - return Ok(None) + expected_error_msg = f"User {guest_user.email} not found" + + @as_result(NotFoundException) + def mock_get_by_email(credentials: SyftVerifyKey, email) -> NoReturn: + raise NotFoundException(public_message=expected_error_msg) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - expected_error_msg = ( - f"No user exists with {guest_user.email} and supplied password." - ) - response = user_service.exchange_credentials(unauthed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(NotFoundException) as exc: + user_service.exchange_credentials(unauthed_context) + + assert exc.type == NotFoundException + assert exc.value.public_message == expected_error_msg def test_userservice_exchange_credentials_get_email_fails( monkeypatch: MonkeyPatch, user_service: UserService, unauthed_context: UnauthedServiceContext, - guest_user: User, ) -> None: get_by_email_error = "Failed to connect to server." - def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Err: - return Err(get_by_email_error) + @as_result(StashException) + def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> NoReturn: + raise StashException(public_message=get_by_email_error) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - expected_error_msg = f"Failed to retrieve user with {guest_user.email} with error: {get_by_email_error}" - response = user_service.exchange_credentials(unauthed_context) - assert isinstance(response, SyftError) - assert response.message == expected_error_msg + with pytest.raises(StashException) as exc: + user_service.exchange_credentials(unauthed_context) + + assert exc.type == StashException + assert exc.value.public_message == get_by_email_error + + +def test_userservice_update_via_client_with_mixed_args(): + server = orchestra.launch(name="datasite-test", reset=True) + + root_client = server.login(email="info@openmined.org", password="changethis") + root_client.register( + name="New user", + email="new_user@openmined.org", + password="password", + password_verify="password", + ) + assert len(root_client.users.get_all()) == 2 + + user_list = root_client.users.search(email="new_user@openmined.org") + assert len(user_list) == 1 + + user = user_list[0] + assert user.name == "New user" + + root_client.users.update(uid=user.id, name="Updated user name") + user = root_client.users.search(email="new_user@openmined.org")[0] + assert user.name == "Updated user name" + + root_client.users.update(user.id, name="User name") + user = root_client.users.search(email="new_user@openmined.org")[0] + assert user.name == "User name" + + root_client.users.update(user.id, password="newpassword") + user_client = root_client.login( + email="new_user@openmined.org", password="newpassword" + ) + assert user_client.account.name == "User name" + + +def test_reset_password(): + server = orchestra.launch(name="datasite-test", reset=True) + + datasite_client = server.login(email="info@openmined.org", password="changethis") + datasite_client.register( + email="new_syft_user@openmined.org", + password="verysecurepassword", + password_verify="verysecurepassword", + name="New User", + ) + guest_client: SyftClient = server.login_as_guest() + guest_client.forgot_password(email="new_syft_user@openmined.org") + temp_token = datasite_client.users.request_password_reset( + datasite_client.notifications[-1].linked_obj.resolve.id + ) + guest_client.reset_password(token=temp_token, new_password="Password123") + server.login(email="new_syft_user@openmined.org", password="Password123") + + +def test_root_cannot_be_deleted(): + server = orchestra.launch(name="datasite-test", reset=True) + datasite_client = server.login(email="info@openmined.org", password="changethis") + + new_admin_email = "admin@openmined.org" + new_admin_pass = "changethis2" + datasite_client.register( + name="second admin", + email=new_admin_email, + password=new_admin_pass, + password_verify=new_admin_pass, + ) + # update role + new_user_id = datasite_client.users.search(email=new_admin_email)[0].id + datasite_client.users.update(uid=new_user_id, role="admin") + + new_admin_client = server.login(email=new_admin_email, password=new_admin_pass) + with sy.raises(sy.SyftException): + new_admin_client.users.delete(datasite_client.account.id) diff --git a/packages/syft/tests/syft/users/user_stash_test.py b/packages/syft/tests/syft/users/user_stash_test.py index 2529b417b89..584e616d093 100644 --- a/packages/syft/tests/syft/users/user_stash_test.py +++ b/packages/syft/tests/syft/users/user_stash_test.py @@ -1,19 +1,21 @@ # third party from faker import Faker +import pytest # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.service.response import SyftSuccess +from syft.server.credentials import SyftSigningKey from syft.service.user.user import User from syft.service.user.user import UserUpdate from syft.service.user.user_roles import ServiceRole from syft.service.user.user_stash import UserStash +from syft.store.document_store_errors import NotFoundException +from syft.types.errors import SyftException from syft.types.uid import UID -def add_mock_user(root_domain_client, user_stash: UserStash, user: User) -> User: +def add_mock_user(root_datasite_client, user_stash: UserStash, user: User) -> User: # prepare: add mock data - result = user_stash.partition.set(root_domain_client.credentials.verify_key, user) + result = user_stash.set(root_datasite_client.credentials.verify_key, user) assert result.is_ok() user = result.ok() @@ -23,41 +25,41 @@ def add_mock_user(root_domain_client, user_stash: UserStash, user: User) -> User def test_userstash_set( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: - result = user_stash.set(root_domain_client.credentials.verify_key, guest_user) - assert result.is_ok() - - created_user = result.ok() + created_user = user_stash.set( + root_datasite_client.credentials.verify_key, guest_user + ).unwrap() assert isinstance(created_user, User) assert guest_user == created_user - assert guest_user.id in user_stash.partition.data + assert user_stash.exists( + root_datasite_client.credentials.verify_key, created_user.id + ) def test_userstash_set_duplicate( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: - result = user_stash.set(root_domain_client.credentials.verify_key, guest_user) - assert result.is_ok() - - original_count = len(user_stash.partition.data) - - result = user_stash.set(root_domain_client.credentials.verify_key, guest_user) - assert result.is_err() + _ = user_stash.set(root_datasite_client.credentials.verify_key, guest_user).unwrap() + original_count = len(user_stash._data) - assert "Duplication Key Error" in result.err() + with pytest.raises(SyftException) as exc: + _ = user_stash.set( + root_datasite_client.credentials.verify_key, guest_user + ).unwrap() + assert exc.public_message - assert len(user_stash.partition.data) == original_count + assert len(user_stash._data) == original_count def test_userstash_get_by_uid( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_uid( - root_domain_client.credentials.verify_key, uid=user.id + root_datasite_client.credentials.verify_key, uid=user.id ) assert result.is_ok() @@ -67,22 +69,23 @@ def test_userstash_get_by_uid( random_uid = UID() result = user_stash.get_by_uid( - root_domain_client.credentials.verify_key, uid=random_uid + root_datasite_client.credentials.verify_key, uid=random_uid ) - assert result.is_ok() + assert result.is_err() - searched_user = result.ok() - assert searched_user is None + exc = result.err() + assert type(exc) == NotFoundException + assert exc.public_message def test_userstash_get_by_email( - root_domain_client, faker: Faker, user_stash: UserStash, guest_user: User + root_datasite_client, faker: Faker, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_email( - root_domain_client.credentials.verify_key, email=user.email + root_datasite_client.credentials.verify_key, email=user.email ) assert result.is_ok() searched_user = result.ok() @@ -90,21 +93,23 @@ def test_userstash_get_by_email( random_email = faker.email() result = user_stash.get_by_email( - root_domain_client.credentials.verify_key, email=random_email + root_datasite_client.credentials.verify_key, email=random_email ) - searched_user = result.ok() - assert result.is_ok() - assert searched_user is None + + exc = result.err() + assert result.is_err() + assert type(exc) == NotFoundException + assert "not found" in exc.public_message def test_userstash_get_by_signing_key( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_signing_key( - root_domain_client.credentials.verify_key, signing_key=user.signing_key + root_datasite_client.credentials.verify_key, signing_key=user.signing_key ) assert result.is_ok() searched_user = result.ok() @@ -112,7 +117,7 @@ def test_userstash_get_by_signing_key( signing_key_as_str = str(user.signing_key) result = user_stash.get_by_signing_key( - root_domain_client.credentials.verify_key, signing_key=signing_key_as_str + root_datasite_client.credentials.verify_key, signing_key=signing_key_as_str ) assert result.is_ok() searched_user = result.ok() @@ -120,21 +125,23 @@ def test_userstash_get_by_signing_key( random_singing_key = SyftSigningKey.generate() result = user_stash.get_by_signing_key( - root_domain_client.credentials.verify_key, signing_key=random_singing_key + root_datasite_client.credentials.verify_key, signing_key=random_singing_key ) - searched_user = result.ok() - assert result.is_ok() - assert searched_user is None + + exc = result.err() + assert result.is_err() + assert type(exc) == NotFoundException + assert exc.public_message def test_userstash_get_by_verify_key( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_verify_key( - root_domain_client.credentials.verify_key, verify_key=user.verify_key + root_datasite_client.credentials.verify_key, verify_key=user.verify_key ) assert result.is_ok() searched_user = result.ok() @@ -142,7 +149,7 @@ def test_userstash_get_by_verify_key( verify_key_as_str = str(user.verify_key) result = user_stash.get_by_verify_key( - root_domain_client.credentials.verify_key, verify_key=verify_key_as_str + root_datasite_client.credentials.verify_key, verify_key=verify_key_as_str ) assert result.is_ok() searched_user = result.ok() @@ -150,61 +157,64 @@ def test_userstash_get_by_verify_key( random_verify_key = SyftSigningKey.generate().verify_key result = user_stash.get_by_verify_key( - root_domain_client.credentials.verify_key, verify_key=random_verify_key + root_datasite_client.credentials.verify_key, verify_key=random_verify_key ) - searched_user = result.ok() - assert result.is_ok() - assert searched_user is None + searched_user = result.err() + assert result.is_err() + assert type(searched_user) == NotFoundException + assert searched_user.public_message def test_userstash_get_by_role( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) - result = user_stash.get_by_role( - root_domain_client.credentials.verify_key, role=ServiceRole.GUEST - ) - assert result.is_ok() - searched_user = result.ok() + searched_user = user_stash.get_by_role( + root_datasite_client.credentials.verify_key, role=ServiceRole.GUEST + ).unwrap() assert user == searched_user def test_userstash_delete_by_uid( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.delete_by_uid( - root_domain_client.credentials.verify_key, uid=user.id + root_datasite_client.credentials.verify_key, uid=user.id ) assert result.is_ok() response = result.ok() - assert isinstance(response, SyftSuccess) - assert str(user.id) in response.message + assert isinstance(response, UID) + assert user.id == response result = user_stash.get_by_uid( - root_domain_client.credentials.verify_key, uid=user.id + root_datasite_client.credentials.verify_key, uid=user.id ) - assert result.is_ok() - searched_user = result.ok() - assert searched_user is None + assert result.is_err() + searched_user = result.err() + assert type(searched_user) is NotFoundException def test_userstash_update( - root_domain_client, user_stash: UserStash, guest_user: User, update_user: UserUpdate + root_datasite_client, + user_stash: UserStash, + guest_user: User, + update_user: UserUpdate, ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) update_kwargs = update_user.to_dict(exclude_empty=True).items() for field_name, value in update_kwargs: setattr(user, field_name, value) - result = user_stash.update(root_domain_client.credentials.verify_key, user=user) + result = user_stash.update(root_datasite_client.credentials.verify_key, obj=user) + assert result.is_ok() updated_user = result.ok() assert isinstance(updated_user, User) diff --git a/packages/syft/tests/syft/users/user_test.py b/packages/syft/tests/syft/users/user_test.py index 9566a8e1c1e..4c727b4f1fe 100644 --- a/packages/syft/tests/syft/users/user_test.py +++ b/packages/syft/tests/syft/users/user_test.py @@ -1,23 +1,24 @@ # stdlib from secrets import token_hex +import time # third party from faker import Faker +import pydantic import pytest # syft absolute import syft as sy -from syft import SyftError from syft import SyftSuccess from syft.client.api import SyftAPICall -from syft.client.domain_client import DomainClient -from syft.node.node import get_default_root_email -from syft.node.worker import Worker +from syft.client.datasite_client import DatasiteClient +from syft.server.server import get_default_root_email +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext from syft.service.user.user import ServiceRole from syft.service.user.user import UserCreate -from syft.service.user.user import UserUpdate from syft.service.user.user import UserView +from syft.types.errors import SyftException GUEST_ROLES = [ServiceRole.GUEST] DS_ROLES = [ServiceRole.GUEST, ServiceRole.DATA_SCIENTIST] @@ -31,30 +32,34 @@ def get_users(worker): - return worker.get_service("UserService").get_all( - AuthedServiceContext(node=worker, credentials=worker.signing_key.verify_key) + return worker.services.user.get_all( + AuthedServiceContext(server=worker, credentials=worker.signing_key.verify_key) ) -def get_mock_client(root_client, role) -> DomainClient: - worker = root_client.api.connection.node +def get_mock_client(root_client, role) -> DatasiteClient: + worker = root_client.api.connection.server client = worker.guest_client mail = Faker().email() name = Faker().name() password = "pw" + user = root_client.register( name=name, email=mail, password=password, password_verify=password ) + assert user + user_id = [u for u in get_users(worker) if u.email == mail][0].id - assert worker.root_client.api.services.user.update( - user_id, UserUpdate(user_id=user_id, role=role) - ) + assert worker.root_client.api.services.user.update(uid=user_id, role=role) + client = client.login(email=mail, password=password) client._fetch_api(client.credentials) + # hacky, but useful for testing: patch user id and role on client client.user_id = user_id client.role = role + return client @@ -63,7 +68,7 @@ def manually_call_service(worker, client, service, args=None, kwargs=None): # while we mostly want to validate the server side permissions. args = args if args is not None else [] kwargs = kwargs if kwargs is not None else {} - api_call = SyftAPICall(node_uid=worker.id, path=service, args=args, kwargs=kwargs) + api_call = SyftAPICall(server_uid=worker.id, path=service, args=args, kwargs=kwargs) signed_call = api_call.sign(client.api.signing_key) signed_result = client.api.connection.make_call(signed_call) result = signed_result.message.data @@ -71,17 +76,17 @@ def manually_call_service(worker, client, service, args=None, kwargs=None): @pytest.fixture -def guest_client(worker) -> DomainClient: +def guest_client(worker) -> DatasiteClient: return get_mock_client(worker.root_client, ServiceRole.GUEST) @pytest.fixture -def ds_client(worker) -> DomainClient: +def ds_client(worker) -> DatasiteClient: return get_mock_client(worker.root_client, ServiceRole.DATA_SCIENTIST) @pytest.fixture -def do_client(worker) -> DomainClient: +def do_client(worker) -> DatasiteClient: return get_mock_client(worker.root_client, ServiceRole.DATA_OWNER) @@ -94,7 +99,8 @@ def root_client(worker): def test_read_user(worker, root_client, do_client, ds_client, guest_client): for client in [ds_client, guest_client]: - assert not manually_call_service(worker, client, "user.get_all") + with pytest.raises(SyftException): + manually_call_service(worker, client, "user.get_all") for client in [do_client, root_client]: assert manually_call_service(worker, client, "user.get_all") @@ -111,19 +117,18 @@ def test_read_returns_view(root_client): def test_user_create(worker, do_client, guest_client, ds_client, root_client): for client in [ds_client, guest_client]: - assert not manually_call_service(worker, client, "user.create") + with pytest.raises(SyftException): + manually_call_service(worker, client, "user.create") + for client in [do_client, root_client]: + user_create = UserCreate( + email=Faker().email(), name="z", password="pw", password_verify="pw" + ) res = manually_call_service( - worker, - client, - "user.create", - args=[ - UserCreate( - email=Faker().email(), name="z", password="pw", password_verify="pw" - ) - ], + worker, client, "user.create", args=[], kwargs={**user_create} ) - assert isinstance(res, UserView) + assert isinstance(res, SyftSuccess) + assert isinstance(res.value, UserView) def test_user_delete(do_client, guest_client, ds_client, worker, root_client): @@ -145,76 +150,85 @@ def test_user_delete(do_client, guest_client, ds_client, worker, root_client): clients = [get_mock_client(root_client, role) for role in DS_ROLES] for c in clients: assert do_client.api.services.user.delete(c.user_id) + # but not higher or same roles clients = [ get_mock_client(root_client, role) for role in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN] ] + for c in clients: - assert not do_client.api.services.user.delete(c.user_id) + with pytest.raises(SyftException) as exc: + do_client.api.services.user.delete(c.user_id) + assert exc.type == SyftException # DS cannot delete anything clients = [get_mock_client(root_client, role) for role in ADMIN_ROLES] for c in clients: - assert not ds_client.api.services.user.delete(c.user_id) + with pytest.raises(SyftException) as exc: + ds_client.api.services.user.delete(c.user_id) + assert exc.type == SyftException # Guests cannot delete anything clients = [get_mock_client(root_client, role) for role in ADMIN_ROLES] for c in clients: - assert not guest_client.api.services.user.delete(c.user_id) + with pytest.raises(SyftException) as exc: + guest_client.api.services.user.delete(c.user_id) + assert exc.type == SyftException def test_user_update_roles(do_client, guest_client, ds_client, root_client, worker): # admins can update the roles of lower roles clients = [get_mock_client(root_client, role) for role in DO_ROLES] - for c in clients: + for _c in clients: assert worker.root_client.api.services.user.update( - c.user_id, UserUpdate(role=ServiceRole.ADMIN) + uid=_c.user_id, role=ServiceRole.ADMIN ) # DOs can update the roles of lower roles clients = [get_mock_client(root_client, role) for role in DS_ROLES] - for c in clients: + for _c in clients: assert do_client.api.services.user.update( - c.user_id, UserUpdate(role=ServiceRole.DATA_SCIENTIST) + uid=_c.user_id, role=ServiceRole.DATA_SCIENTIST ) clients = [get_mock_client(root_client, role) for role in ADMIN_ROLES] # DOs cannot update roles to greater than / equal to own role - for c in clients: + for _c in clients: for target_role in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN]: - assert not do_client.api.services.user.update( - c.user_id, UserUpdate(role=target_role) - ) + with pytest.raises(SyftException) as exc: + do_client.api.services.user.update(uid=_c.user_id, role=target_role) + assert exc.type == SyftException + assert exc.value.public_message # DOs cannot downgrade higher roles to lower levels clients = [ get_mock_client(root_client, role) for role in [ServiceRole.ADMIN, ServiceRole.DATA_OWNER] ] - for c in clients: + for _c in clients: for target_role in DO_ROLES: - if target_role < c.role: - assert not do_client.api.services.user.update( - c.user_id, UserUpdate(role=target_role) - ) + if target_role < _c.role: + with pytest.raises(SyftException) as exc: + do_client.api.services.user.update(uid=_c.user_id, role=target_role) + assert exc.type == SyftException # DSs cannot update any roles clients = [get_mock_client(root_client, role) for role in ADMIN_ROLES] - for c in clients: + for _c in clients: for target_role in ADMIN_ROLES: - assert not ds_client.api.services.user.update( - c.user_id, UserUpdate(role=target_role) - ) + with pytest.raises(SyftException) as exc: + ds_client.api.services.user.update(uid=_c.user_id, role=target_role) + assert exc.type == SyftException # Guests cannot update any roles clients = [get_mock_client(root_client, role) for role in ADMIN_ROLES] - for c in clients: + for _c in clients: for target_role in ADMIN_ROLES: - assert not guest_client.api.services.user.update( - c.user_id, UserUpdate(role=target_role) - ) + with pytest.raises(SyftException) as exc: + guest_client.api.services.user.update(uid=_c.user_id, role=target_role) + assert exc.type == SyftException def test_user_update(root_client): @@ -224,45 +238,56 @@ def test_user_update(root_client): for executing_client in executing_clients: for target_client in target_clients: if executing_client.role != ServiceRole.ADMIN: - assert not executing_client.api.services.user.update( - target_client.user_id, UserUpdate(name="abc") - ) + with pytest.raises(SyftException) as _: + assert not executing_client.api.services.user.update( + uid=target_client.user_id, name="abc" + ) else: assert executing_client.api.services.user.update( - target_client.user_id, UserUpdate(name="abc") + uid=target_client.user_id, name="abc" ) # you can update yourself assert executing_client.api.services.user.update( - executing_client.user_id, UserUpdate(name=Faker().name()) + uid=executing_client.user_id, name=Faker().name() ) def test_guest_user_update_to_root_email_failed( - root_client: DomainClient, - do_client: DomainClient, - guest_client: DomainClient, - ds_client: DomainClient, + root_client: DatasiteClient, + do_client: DatasiteClient, + guest_client: DatasiteClient, + ds_client: DatasiteClient, ) -> None: default_root_email: str = get_default_root_email() - user_update_to_root_email = UserUpdate(email=default_root_email) + for client in [root_client, do_client, guest_client, ds_client]: - res = client.api.services.user.update( - uid=client.me.id, user_update=user_update_to_root_email - ) - assert isinstance(res, SyftError) - assert res.message == "User already exists" + with pytest.raises(SyftException) as exc: + client.api.services.user.update( + uid=client.account.id, email=default_root_email + ) + + assert exc.type == SyftException + assert f"User {default_root_email} already exists" in exc.value.public_message + + +def test_user_view_set_password(worker: Worker, root_client: DatasiteClient) -> None: + change_ok = root_client.account.set_password("123", confirm=False) + assert type(change_ok) == SyftSuccess + assert "Successfully" in change_ok.message + email = root_client.account.email -def test_user_view_set_password(worker: Worker, root_client: DomainClient) -> None: - root_client.me.set_password("123", confirm=False) - email = root_client.me.email # log in again with the wrong password - root_client_c = worker.root_client.login(email=email, password="1234") - assert isinstance(root_client_c, SyftError) + with pytest.raises(SyftException) as exc: + worker.root_client.login(email=email, password="1234") + + assert exc.type == SyftException + assert exc.value.public_message == "Invalid credentials." + # log in again with the right password root_client_b = worker.root_client.login(email=email, password="123") - assert root_client_b.me == root_client.me + assert root_client_b.account == root_client.account @pytest.mark.parametrize( @@ -270,10 +295,13 @@ def test_user_view_set_password(worker: Worker, root_client: DomainClient) -> No ["syft", "syft.com", "syft@.com"], ) def test_user_view_set_invalid_email( - root_client: DomainClient, invalid_email: str + root_client: DatasiteClient, invalid_email: str ) -> None: - result = root_client.me.set_email(invalid_email) - assert isinstance(result, SyftError) + with pytest.raises(SyftException) as exc: + root_client.account.set_email(invalid_email) + + assert exc.type == SyftException + assert "Invalid email" in exc.value.public_message @pytest.mark.parametrize( @@ -284,72 +312,86 @@ def test_user_view_set_invalid_email( ], ) def test_user_view_set_email_success( - root_client: DomainClient, - ds_client: DomainClient, + root_client: DatasiteClient, + ds_client: DatasiteClient, valid_email_root: str, valid_email_ds: str, ) -> None: - result = root_client.me.set_email(valid_email_root) + result = root_client.account.set_email(valid_email_root) assert isinstance(result, SyftSuccess) - result2 = ds_client.me.set_email(valid_email_ds) + + result2 = ds_client.account.set_email(valid_email_ds) assert isinstance(result2, SyftSuccess) def test_user_view_set_default_admin_email_failed( - ds_client: DomainClient, guest_client: DomainClient + ds_client: DatasiteClient, guest_client: DatasiteClient ) -> None: default_root_email = get_default_root_email() - result = ds_client.me.set_email(default_root_email) - assert isinstance(result, SyftError) - assert result.message == "User already exists" + error_msg = f"User {default_root_email} already exists" - result_2 = guest_client.me.set_email(default_root_email) - assert isinstance(result_2, SyftError) - assert result_2.message == "User already exists" + with pytest.raises(SyftException) as exc: + ds_client.account.set_email(default_root_email) + + assert exc.type == SyftException + assert exc.value.public_message == error_msg def test_user_view_set_duplicated_email( - root_client: DomainClient, ds_client: DomainClient, guest_client: DomainClient + root_client: DatasiteClient, ds_client: DatasiteClient, guest_client: DatasiteClient ) -> None: - result = ds_client.me.set_email(root_client.me.email) - result2 = guest_client.me.set_email(root_client.me.email) + email = root_client.account.email + error_msg = f"User {email} already exists" + + with pytest.raises(SyftException) as exc: + ds_client.account.set_email(email) + + assert exc.type == SyftException + assert exc.value.public_message == error_msg + + with pytest.raises(SyftException) as exc: + guest_client.account.set_email(email) + + assert exc.type == SyftException + assert exc.value.public_message == error_msg - assert isinstance(result, SyftError) - assert result.message == "User already exists" - assert isinstance(result2, SyftError) - assert result2.message == "User already exists" + email = ds_client.account.email + error_msg = f"User {email} already exists" - result3 = guest_client.me.set_email(ds_client.me.email) - assert isinstance(result3, SyftError) - assert result3.message == "User already exists" + with pytest.raises(SyftException) as exc: + guest_client.account.set_email(email) + + assert exc.type == SyftException + assert exc.value.public_message == error_msg def test_user_view_update_name_institution_website( - root_client: DomainClient, - ds_client: DomainClient, - guest_client: DomainClient, + root_client: DatasiteClient, + ds_client: DatasiteClient, + guest_client: DatasiteClient, ) -> None: - result = root_client.me.update( + root_client.account.update( name="syft", institution="OpenMined", website="https://syft.org" ) - assert isinstance(result, SyftSuccess) - assert root_client.me.name == "syft" - assert root_client.me.institution == "OpenMined" - assert root_client.me.website == "https://syft.org" + assert root_client.account.name == "syft" + assert root_client.account.institution == "OpenMined" + assert root_client.account.website == "https://syft.org" - result2 = ds_client.me.update(name="syft2", institution="OpenMined") - assert isinstance(result2, SyftSuccess) - assert ds_client.me.name == "syft2" - assert ds_client.me.institution == "OpenMined" + ds_client.account.update(name="syft2", institution="OpenMined") + assert ds_client.account.name == "syft2" + assert ds_client.account.institution == "OpenMined" - result3 = guest_client.me.update(name="syft3") - assert isinstance(result3, SyftSuccess) - assert guest_client.me.name == "syft3" + guest_client.account.update(name="syft3") + assert guest_client.account.name == "syft3" -def test_user_view_set_role(worker: Worker, guest_client: DomainClient) -> None: +def test_user_view_set_role(worker: Worker, guest_client: DatasiteClient) -> None: admin_client = get_mock_client(worker.root_client, ServiceRole.ADMIN) - assert admin_client.me.role == ServiceRole.ADMIN + assert admin_client.account.role == ServiceRole.ADMIN + + # wait for the user to be created for sorting purposes + time.sleep(0.01) + admin_client.register( name="Sheldon Cooper", email="sheldon@caltech.edu", @@ -358,42 +400,53 @@ def test_user_view_set_role(worker: Worker, guest_client: DomainClient) -> None: institution="Caltech", website="https://www.caltech.edu/", ) + sheldon = admin_client.users[-1] assert ( sheldon.syft_client_verify_key - == admin_client.me.syft_client_verify_key + == admin_client.account.syft_client_verify_key == admin_client.verify_key ) assert sheldon.role == ServiceRole.DATA_SCIENTIST + sheldon.update(role="guest") assert sheldon.role == ServiceRole.GUEST + sheldon.update(role="data_owner") assert sheldon.role == ServiceRole.DATA_OWNER - # the data scientist (Sheldon) log in the domain, he should not + + # the data scientist (Sheldon) log in the datasite, he should not # be able to change his role, even if he is a data owner now ds_client = guest_client.login(email="sheldon@caltech.edu", password="changethis") assert ( - ds_client.me.syft_client_verify_key + ds_client.account.syft_client_verify_key == ds_client.verify_key != admin_client.verify_key ) - assert ds_client.me.role == sheldon.role - assert ds_client.me.role == ServiceRole.DATA_OWNER - assert isinstance(ds_client.me.update(role="guest"), SyftError) - assert isinstance(ds_client.me.update(role="data_scientist"), SyftError) + assert ds_client.account.role == sheldon.role + assert ds_client.account.role == ServiceRole.DATA_OWNER + + with pytest.raises(SyftException): + ds_client.account.update(role="guest") + with pytest.raises(SyftException): + ds_client.account.update(role="data_scientist") + # now we set sheldon's role to admin. Only now he can change his role sheldon.update(role="admin") assert sheldon.role == ServiceRole.ADMIN + # QA: this is different than when running in the notebook assert len(ds_client.users.get_all()) == len(admin_client.users.get_all()) - assert isinstance(ds_client.me.update(role="guest"), SyftSuccess) - assert isinstance(ds_client.me.update(role="admin"), SyftError) + assert isinstance(ds_client.account.update(role="guest"), SyftSuccess) + + with pytest.raises(SyftException): + ds_client.account.update(role="admin") def test_user_view_set_role_admin(faker: Faker) -> None: - node = sy.orchestra.launch(name=token_hex(8), reset=True) - domain_client = node.login(email="info@openmined.org", password="changethis") - domain_client.register( + server = sy.orchestra.launch(name=token_hex(8), reset=True) + datasite_client = server.login(email="info@openmined.org", password="changethis") + datasite_client.register( name="Sheldon Cooper", email="sheldon@caltech.edu", password="changethis", @@ -401,7 +454,7 @@ def test_user_view_set_role_admin(faker: Faker) -> None: institution="Caltech", website="https://www.caltech.edu/", ) - domain_client.register( + datasite_client.register( name="Sheldon Cooper", email="sheldon2@caltech.edu", password="changethis", @@ -409,17 +462,56 @@ def test_user_view_set_role_admin(faker: Faker) -> None: institution="Caltech", website="https://www.caltech.edu/", ) - assert len(domain_client.users.get_all()) == 3 - domain_client.users[1].update(role="admin") - ds_client = node.login(email="sheldon@caltech.edu", password="changethis") - assert ds_client.me.role == ServiceRole.ADMIN - assert len(ds_client.users.get_all()) == len(domain_client.users.get_all()) + assert len(datasite_client.users.get_all()) == 3 + + datasite_client.users[1].update(role="admin") + ds_client = server.login(email="sheldon@caltech.edu", password="changethis") + assert ds_client.account.role == ServiceRole.ADMIN + assert len(ds_client.users.get_all()) == len(datasite_client.users.get_all()) + + datasite_client.users[2].update(role="admin") + ds_client_2 = server.login(email="sheldon2@caltech.edu", password="changethis") + assert ds_client_2.account.role == ServiceRole.ADMIN + assert len(ds_client_2.users.get_all()) == len(datasite_client.users.get_all()) + + server.python_server.cleanup() + server.land() + + +@pytest.mark.parametrize( + "search_param", + [ + ("email", "logged_in_user"), + ("name", "logged_in_username"), + ], +) +def test_user_search( + root_client: DatasiteClient, + ds_client: DatasiteClient, + search_param: tuple[str, str], +) -> None: + k, attr = search_param + v = getattr(ds_client, attr) + users = root_client.api.services.user.search(**{k: v}) + + for user in users: + assert getattr(user, k) == v - domain_client.users[2].update(role="admin") - ds_client_2 = node.login(email="sheldon2@caltech.edu", password="changethis") - assert ds_client_2.me.role == ServiceRole.ADMIN - assert len(ds_client_2.users.get_all()) == len(domain_client.users.get_all()) - node.python_node.cleanup() - node.land() +class M(pydantic.BaseModel): + role: ServiceRole + + +@pytest.mark.parametrize("role", [x.name for x in ServiceRole]) +class TestServiceRole: + @staticmethod + def test_accept_str_in_base_model(role: str) -> None: + m = M(role=role) + assert m.role is getattr(ServiceRole, role) + + @staticmethod + def test_accept_str(role: str) -> None: + assert pydantic.TypeAdapter(ServiceRole).validate_python(role) is getattr( + ServiceRole, role + ) diff --git a/packages/syft/tests/syft/worker_image/image_identifier_test.py b/packages/syft/tests/syft/worker_image/image_identifier_test.py index 4b5e843fd93..55b8139884c 100644 --- a/packages/syft/tests/syft/worker_image/image_identifier_test.py +++ b/packages/syft/tests/syft/worker_image/image_identifier_test.py @@ -34,6 +34,23 @@ def test_image_id_with_registry(): assert image_id.full_name_with_tag == tag +def test_image_id_with_gcp_registry(): + tag = "openmined/test-image:1.0" + + gcp_url = "us-central1-docker.pkg.dev/project-12345/registry-name" + registry = SyftImageRegistry.from_url(gcp_url) + image_id = SyftWorkerImageIdentifier.with_registry(tag, registry) + + assert ( + image_id.registry_host + == "us-central1-docker.pkg.dev/project-12345/registry-name" + ) + assert image_id.repo == "openmined/test-image" + assert image_id.tag == "1.0" + assert image_id.repo_with_tag == "openmined/test-image:1.0" + assert image_id.full_name_with_tag == f"{gcp_url}/{tag}" + + def test_image_id_with_incorrect_registry(): with pytest.raises(ValueError): tag = "docker.io/openmined/test-nginx:0.7.8" diff --git a/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py b/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py index 8aad9f5a27e..a14cdef3f8b 100644 --- a/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py +++ b/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py @@ -1,46 +1,68 @@ # third party -from faker import Faker +import pytest # syft absolute import syft as sy +from syft.client.client import SyftClient from syft.custom_worker.config import DockerWorkerConfig -from syft.node.worker import Worker +from syft.custom_worker.config import PrebuiltWorkerConfig +from syft.custom_worker.config import WorkerConfig +from syft.server.worker import Worker from syft.service.request.request import CreateCustomWorkerPoolChange from syft.service.response import SyftSuccess from syft.service.worker.worker_image import SyftWorkerImage from syft.service.worker.worker_pool import WorkerPool -# relative -from ..request.request_code_accept_deny_test import get_ds_client +PREBUILT_IMAGE_TAG = f"docker.io/openmined/syft-backend:{sy.__version__}" +CUSTOM_DOCKERFILE = f""" +FROM {PREBUILT_IMAGE_TAG} -def test_create_image_and_pool_request_accept(faker: Faker, worker: Worker): +RUN pip install recordlinkage +""" + +CUSTOM_IMAGE_TAG = "docker.io/openmined/custom-worker-recordlinkage:latest" + +WORKER_CONFIG_TEST_CASES_WITH_N_IMAGES = [ + ( + CUSTOM_IMAGE_TAG, + DockerWorkerConfig(dockerfile=CUSTOM_DOCKERFILE), + 2, # total number of images. + # 2 since we pull a pre-built image (1) as the base image to build a custom image (2) + ), + (None, PrebuiltWorkerConfig(tag=PREBUILT_IMAGE_TAG), 2), +] + +WORKER_CONFIG_TEST_CASES = [ + test_case[:2] for test_case in WORKER_CONFIG_TEST_CASES_WITH_N_IMAGES +] + + +@pytest.mark.parametrize("docker_tag,worker_config", WORKER_CONFIG_TEST_CASES) +def test_create_image_and_pool_request_accept( + worker: Worker, + docker_tag: str, + worker_config: WorkerConfig, + ds_client: SyftClient, +) -> None: """ Test the functionality of `SyftWorkerPoolService.create_image_and_pool_request` when the request is accepted """ - # construct a root client and data scientist client for a domain + # construct a root client and data scientist client for a datasite root_client = worker.root_client - ds_client = get_ds_client(faker, root_client, worker.guest_client) assert root_client.credentials != ds_client.credentials # the DS makes a request to create an image and a pool based on the image - custom_dockerfile = f""" - FROM openmined/grid-backend:{sy.__version__} - - RUN pip install recordlinkage - """ - docker_config = DockerWorkerConfig(dockerfile=custom_dockerfile) - docker_tag = "openmined/custom-worker-recordlinkage:latest" request = ds_client.api.services.worker_pool.create_image_and_pool_request( pool_name="recordlinkage-pool", num_workers=2, tag=docker_tag, - config=docker_config, + config=worker_config, reason="I want to do some more cool data science with PySyft and Recordlinkage", ) assert len(request.changes) == 2 - assert request.changes[0].config == docker_config + assert request.changes[0].config == worker_config assert request.changes[1].num_workers == 2 assert request.changes[1].pool_name == "recordlinkage-pool" @@ -51,55 +73,68 @@ def test_create_image_and_pool_request_accept(faker: Faker, worker: Worker): assert root_client.requests[-1].status.value == 2 all_image_tags = [ - im.image_identifier.repo_with_tag + im.image_identifier.full_name_with_tag for im in root_client.images.get_all() if im.image_identifier ] - assert docker_tag in all_image_tags + tag = ( + worker_config.tag + if isinstance(worker_config, PrebuiltWorkerConfig) + else docker_tag + ) + assert tag in all_image_tags launched_pool = root_client.worker_pools["recordlinkage-pool"] assert isinstance(launched_pool, WorkerPool) assert len(launched_pool.worker_list) == 2 -def test_create_pool_request_accept(faker: Faker, worker: Worker): +@pytest.mark.parametrize( + "docker_tag,worker_config,n_images", + WORKER_CONFIG_TEST_CASES_WITH_N_IMAGES, +) +def test_create_pool_request_accept( + worker: Worker, + docker_tag: str, + worker_config: WorkerConfig, + n_images: int, + ds_client: SyftClient, +) -> None: """ Test the functionality of `SyftWorkerPoolService.create_pool_request` when the request is accepted """ - # construct a root client and data scientist client for a domain + # construct a root client and data scientist client for a datasite root_client = worker.root_client - ds_client = get_ds_client(faker, root_client, worker.guest_client) assert root_client.credentials != ds_client.credentials # the DO submits the docker config to build an image - custom_dockerfile_str = f""" - FROM openmined/grid-backend:{sy.__version__} - - RUN pip install opendp - """ - docker_config = DockerWorkerConfig(dockerfile=custom_dockerfile_str) - submit_result = root_client.api.services.worker_image.submit_dockerfile( - docker_config=docker_config + submit_result = root_client.api.services.worker_image.submit( + worker_config=worker_config ) assert isinstance(submit_result, SyftSuccess) - assert len(root_client.images.get_all()) == 2 + assert len(root_client.images.get_all()) == n_images # The root client builds the image - worker_image: SyftWorkerImage = root_client.images[1] - docker_tag = "openmined/custom-worker-opendp:latest" - docker_build_result = root_client.api.services.worker_image.build( - image_uid=worker_image.id, - tag=docker_tag, + worker_image: SyftWorkerImage = root_client.api.services.worker_image.get_by_config( + worker_config ) - # update the worker image variable after the image was built - worker_image: SyftWorkerImage = root_client.images[1] - assert isinstance(docker_build_result, SyftSuccess) - assert worker_image.image_identifier.repo_with_tag == docker_tag + if not worker_image.is_prebuilt: + docker_build_result = root_client.api.services.worker_image.build( + image_uid=worker_image.id, + tag=docker_tag, + ) + # update the worker image variable after the image was built + worker_image: SyftWorkerImage = ( + root_client.api.services.worker_image.get_by_config(worker_config) + ) + assert isinstance(docker_build_result, SyftSuccess) + assert worker_image.image_identifier.full_name_with_tag == docker_tag # The DS client submits a request to create a pool from an existing image request = ds_client.api.services.worker_pool.pool_creation_request( pool_name="opendp-pool", num_workers=3, image_uid=worker_image.id ) + assert len(request.changes) == 1 change = request.changes[0] assert isinstance(change, CreateCustomWorkerPoolChange) @@ -110,6 +145,23 @@ def test_create_pool_request_accept(faker: Faker, worker: Worker): # the root client approves the request, and the worker pool should be launched req_result = root_client.requests[-1].approve() assert isinstance(req_result, SyftSuccess) + launched_pool = root_client.worker_pools["opendp-pool"] assert isinstance(launched_pool, WorkerPool) assert len(launched_pool.worker_list) == 3 + + +WORKER_CONFIGS = [test_case[1] for test_case in WORKER_CONFIG_TEST_CASES] + + +@pytest.mark.parametrize("worker_config", WORKER_CONFIGS) +def test_get_by_worker_config( + worker: Worker, + worker_config: WorkerConfig, +) -> None: + root_client = worker.root_client + for config in WORKER_CONFIGS: + root_client.api.services.worker_image.submit(worker_config=config) + + worker_image = root_client.api.services.worker_image.get_by_config(worker_config) + assert worker_image.config == worker_config diff --git a/packages/syft/tests/syft/worker_pool/worker_test.py b/packages/syft/tests/syft/worker_pool/worker_test.py index 6503e51eb66..4da24cb315b 100644 --- a/packages/syft/tests/syft/worker_pool/worker_test.py +++ b/packages/syft/tests/syft/worker_pool/worker_test.py @@ -1,7 +1,7 @@ # syft absolute import syft as sy from syft.custom_worker.config import DockerWorkerConfig -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.response import SyftSuccess from syft.service.worker.worker_image import SyftWorkerImage from syft.types.datetime import DateTime @@ -10,7 +10,7 @@ def get_docker_config(): # the DS makes a request to create an image and a pool based on the image custom_dockerfile = f""" - FROM openmined/grid-backend:{sy.__version__} + FROM openmined/syft-backend:{sy.__version__} RUN pip install recordlinkage """ return DockerWorkerConfig(dockerfile=custom_dockerfile) @@ -23,8 +23,8 @@ def test_syft_worker(worker: Worker): """ root_client = worker.root_client docker_config = get_docker_config() - submit_result = root_client.api.services.worker_image.submit_dockerfile( - docker_config=docker_config + submit_result = root_client.api.services.worker_image.submit( + worker_config=docker_config ) assert isinstance(submit_result, SyftSuccess) @@ -43,7 +43,7 @@ def test_syft_worker(worker: Worker): pool_name = "custom-worker-pool" num_workers = 3 worker_pool_res = root_client.api.services.worker_pool.launch( - name=pool_name, + pool_name=pool_name, image_uid=worker_image.id, num_workers=num_workers, ) diff --git a/packages/syft/tests/syft/worker_test.py b/packages/syft/tests/syft/worker_test.py index 46ca54963c0..f52772038cf 100644 --- a/packages/syft/tests/syft/worker_test.py +++ b/packages/syft/tests/syft/worker_test.py @@ -6,27 +6,27 @@ from nacl.exceptions import BadSignatureError import numpy as np import pytest -from result import Ok # syft absolute import syft as sy from syft.client.api import SignedSyftAPICall from syft.client.api import SyftAPICall -from syft.node.credentials import SIGNING_KEY_FOR -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.server.credentials import SIGNING_KEY_FOR +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject -from syft.service.action.action_store import DictActionStore +from syft.service.action.action_store import ActionObjectStash from syft.service.context import AuthedServiceContext from syft.service.queue.queue_stash import QueueItem -from syft.service.response import SyftAttributeError from syft.service.response import SyftError from syft.service.user.user import User from syft.service.user.user import UserCreate from syft.service.user.user import UserView -from syft.service.user.user_service import UserService -from syft.types.uid import UID +from syft.service.user.user_stash import UserStash +from syft.store.db.sqlite import SQLiteDBManager +from syft.types.errors import SyftException +from syft.types.result import Ok test_signing_key_string = ( "b7803e90a6f3f4330afbd943cef3451c716b338b17a9cf40a0a309bc38bc366d" @@ -76,28 +76,42 @@ def test_signing_key() -> None: assert test_verify_key == test_verify_key_2 -def test_action_store() -> None: +@pytest.fixture( + scope="function", + params=[ + "tODOsqlite_address", + # "TODOpostgres_address", # will be used when we have a postgres CI tests + ], +) +def action_object_stash() -> ActionObjectStash: + root_verify_key = SyftVerifyKey.from_string(test_verify_key_string) + db_manager = SQLiteDBManager.random(root_verify_key=root_verify_key) + stash = ActionObjectStash(store=db_manager) + _ = UserStash(store=db_manager) + stash.db.init_tables() + yield stash + + +def test_action_store(action_object_stash: ActionObjectStash) -> None: test_signing_key = SyftSigningKey.from_string(test_signing_key_string) - action_store = DictActionStore(node_uid=UID()) - uid = UID() + test_verify_key = test_signing_key.verify_key raw_data = np.array([1, 2, 3]) test_object = ActionObject.from_obj(raw_data) + uid = test_object.id - set_result = action_store.set( + action_object_stash.set_or_update( uid=uid, - credentials=test_signing_key, + credentials=test_verify_key, syft_object=test_object, has_result_read_permission=True, - ) - assert set_result.is_ok() - test_object_result = action_store.get(uid=uid, credentials=test_signing_key) - assert test_object_result.is_ok() - assert (test_object == test_object_result.ok()).all() + ).unwrap() + from_stash = action_object_stash.get(uid=uid, credentials=test_verify_key).unwrap() + assert (test_object == from_stash).all() test_verift_key_2 = SyftVerifyKey.from_string(test_verify_key_string_2) - test_object_result_fail = action_store.get(uid=uid, credentials=test_verift_key_2) - assert test_object_result_fail.is_err() - assert "denied" in test_object_result_fail.err() + with pytest.raises(SyftException) as exc: + action_object_stash.get(uid=uid, credentials=test_verift_key_2).unwrap() + assert "denied" in exc.public_message def test_user_transform() -> None: @@ -131,7 +145,7 @@ def test_user_transform() -> None: def test_user_service(worker) -> None: test_signing_key = SyftSigningKey.from_string(test_signing_key_string) - user_service = worker.get_service(UserService) + user_service = worker.services.user # create a user new_user = UserCreate( @@ -142,10 +156,12 @@ def test_user_service(worker) -> None: ) # create a context - context = AuthedServiceContext(node=worker, credentials=test_signing_key.verify_key) + context = AuthedServiceContext( + server=worker, credentials=test_signing_key.verify_key + ) # call the create function - user_view = user_service.create(context=context, user_create=new_user) + user_view = user_service.create(context=context, **new_user) # get the result assert user_view is not None @@ -220,14 +236,6 @@ def post_add(context: Any, name: str, new_result: Any) -> Any: action_object.syft_post_hooks__["__add__"] = [] -def test_worker_serde(worker) -> None: - ser = sy.serialize(worker, to_bytes=True) - de = sy.deserialize(ser, from_bytes=True) - - assert de.signing_key == worker.signing_key - assert de.id == worker.id - - @pytest.fixture(params=[0]) def worker_with_proc(request): worker = Worker( @@ -243,7 +251,7 @@ def worker_with_proc(request): "path, kwargs", [ ("data_subject.get_all", {}), - ("data_subject.get_by_name", {"name": "test"}), + ("user.get_all", {}), ("dataset.get_all", {}), ("dataset.search", {"name": "test"}), ("metadata", {}), @@ -256,7 +264,8 @@ def test_worker_handle_api_request( kwargs: dict, blocking: bool, ) -> None: - node_uid = worker_with_proc.id + print(f"run: blocking: {blocking} path: {path} kwargs: {kwargs}") + server_uid = worker_with_proc.id root_client = worker_with_proc.root_client assert root_client.api is not None @@ -266,36 +275,34 @@ def test_worker_handle_api_request( root_client = worker_with_proc.root_client api_call = SyftAPICall( - node_uid=node_uid, path=path, args=[], kwargs=kwargs, blocking=blocking + server_uid=server_uid, path=path, args=[], kwargs=kwargs, blocking=blocking ) # should fail on unsigned requests - result = worker_with_proc.handle_api_call(api_call).message.data - assert isinstance(result, SyftError) + with pytest.raises(SyftException): + _ = worker_with_proc.handle_api_call(api_call).message.data signed_api_call = api_call.sign(root_client.api.signing_key) # should work on signed api calls - result = worker_with_proc.handle_api_call(signed_api_call).message.data - assert not isinstance(result, SyftError) + _ = worker_with_proc.handle_api_call(signed_api_call).message.data # Guest client should not have access to the APIs guest_signed_api_call = api_call.sign(root_client.api.signing_key) - result = worker_with_proc.handle_api_call(guest_signed_api_call).message - assert not isinstance(result, SyftAttributeError) + _ = worker_with_proc.handle_api_call(guest_signed_api_call).message # should fail on altered requests bogus_api_call = signed_api_call bogus_api_call.serialized_message += b"hacked" - result = worker_with_proc.handle_api_call(bogus_api_call).message.data - assert isinstance(result, SyftError) + with pytest.raises(SyftException): + _ = worker_with_proc.handle_api_call(bogus_api_call).message.data @pytest.mark.parametrize( "path, kwargs", [ ("data_subject.get_all", {}), - ("data_subject.get_by_name", {"name": "test"}), + ("user.get_all", {}), ("dataset.get_all", {}), ("dataset.search", {"name": "test"}), ("metadata", {}), @@ -308,9 +315,11 @@ def test_worker_handle_api_response( kwargs: dict, blocking: bool, ) -> None: - node_uid = worker_with_proc.id + server_uid = worker_with_proc.id n_processes = worker_with_proc.processes root_client = worker_with_proc.root_client + + assert root_client.settings.allow_guest_signup(enable=True) assert root_client.api is not None guest_client = root_client.guest() @@ -326,7 +335,7 @@ def test_worker_handle_api_response( root_client = worker_with_proc.root_client call = SyftAPICall( - node_uid=node_uid, path=path, args=[], kwargs=kwargs, blocking=blocking + server_uid=server_uid, path=path, args=[], kwargs=kwargs, blocking=blocking ) signed_api_call = call.sign(root_client.credentials) diff --git a/packages/syft/tests/syft/zmq_queue_test.py b/packages/syft/tests/syft/zmq_queue_test.py index 9b22ac7d260..a995d09dced 100644 --- a/packages/syft/tests/syft/zmq_queue_test.py +++ b/packages/syft/tests/syft/zmq_queue_test.py @@ -13,17 +13,15 @@ import syft from syft.service.queue.base_queue import AbstractMessageHandler from syft.service.queue.queue import QueueManager -from syft.service.queue.zmq_queue import ZMQClient -from syft.service.queue.zmq_queue import ZMQClientConfig -from syft.service.queue.zmq_queue import ZMQConsumer -from syft.service.queue.zmq_queue import ZMQProducer -from syft.service.queue.zmq_queue import ZMQQueueConfig -from syft.service.response import SyftError +from syft.service.queue.zmq_client import ZMQClient +from syft.service.queue.zmq_client import ZMQClientConfig +from syft.service.queue.zmq_client import ZMQQueueConfig +from syft.service.queue.zmq_consumer import ZMQConsumer +from syft.service.queue.zmq_producer import ZMQProducer from syft.service.response import SyftSuccess +from syft.types.errors import SyftException from syft.util.util import get_queue_address - -# relative -from ..utils.random_port import get_random_port +from syft.util.util import get_random_available_port @pytest.fixture @@ -36,7 +34,7 @@ def client(): client.close() -@pytest.mark.flaky(reruns=3, reruns_delay=3) +# @pytest.mark.flaky(reruns=3, reruns_delay=3) @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") def test_zmq_client(client): hostname = "127.0.0.1" @@ -107,8 +105,8 @@ def handle_message(message: bytes, *args, **kwargs): assert len(received_message) == 1 msg = b"My Message" - response = client.send_message(message=msg, queue_name="random queue") - assert isinstance(response, SyftError) + with pytest.raises(SyftException): + response = client.send_message(message=msg, queue_name="random queue") assert isinstance(client.close(), SyftSuccess) sleep(0.5) @@ -118,7 +116,7 @@ def handle_message(message: bytes, *args, **kwargs): @pytest.fixture def producer(): - pub_port = get_random_port() + pub_port = get_random_available_port() QueueName = token_hex(8) # Create a producer diff --git a/packages/syft/tests/utils/mongodb.py b/packages/syft/tests/utils/mongodb.py deleted file mode 100644 index ec2a0c4256a..00000000000 --- a/packages/syft/tests/utils/mongodb.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -NOTE: - -At the moment testing using container is the easiest way to test MongoDB. - ->> `mockmongo` does not support CodecOptions+TypeRegistry. It also doesn't sort on custom types. ->> Mongo binaries are no longer compiled for generic linux. -There's no guarantee that interpolated download URL will work with latest version of the OS, especially on Github CI. -""" - -# stdlib -from pathlib import Path -import platform -from shutil import copyfileobj -import subprocess -from tarfile import TarFile -from tempfile import gettempdir -from time import sleep -import zipfile - -# third party -import distro -import docker -import psutil -import requests - -# relative -from .random_port import get_random_port - -MONGO_CONTAINER_PREFIX = "pytest_mongo" -MONGO_VERSION = "7.0" -MONGO_FULL_VERSION = f"{MONGO_VERSION}.6" -PLATFORM_ARCH = platform.machine() -PLATFORM_SYS = platform.system() -DISTRO_MONIKER = distro.id() + distro.major_version() + distro.minor_version() - -MONGOD_PIDFILE = "mongod.pid" - -MONGO_BINARIES = { - "Darwin": f"https://fastdl.mongodb.org/osx/mongodb-macos-{PLATFORM_ARCH}-{MONGO_FULL_VERSION}.tgz", - "Linux": f"https://fastdl.mongodb.org/linux/mongodb-linux-{PLATFORM_ARCH}-{DISTRO_MONIKER}-{MONGO_FULL_VERSION}.tgz", - "Windows": f"https://fastdl.mongodb.org/windows/mongodb-windows-x86_64-{MONGO_FULL_VERSION}.zip", -} - - -def start_mongo_server(name, dbname="syft"): - port = get_random_port() - - try: - __start_mongo_proc(name, port) - except Exception: - __start_mongo_container(name, port) - - return f"mongodb://127.0.0.1:{port}/{dbname}" - - -def stop_mongo_server(name): - if PLATFORM_SYS in MONGO_BINARIES.keys(): - __kill_mongo_proc(name) - else: - __kill_mongo_container(name) - - -def __start_mongo_proc(name, port): - download_dir = Path(gettempdir(), "mongodb") - exec_path = __download_mongo(download_dir) - if not exec_path: - raise Exception("Failed to download MongoDB binaries") - - root_dir = Path(gettempdir(), name) - - db_path = Path(root_dir, "db") - db_path.mkdir(parents=True, exist_ok=True) - - proc = subprocess.Popen( - [ - str(exec_path), - "--port", - str(port), - "--dbpath", - str(db_path), - ], - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT, - ) - - pid_path = root_dir / MONGOD_PIDFILE - pid_path.write_text(str(proc.pid)) - - return proc.pid - - -def __kill_mongo_proc(name): - root_dir = Path(gettempdir(), name) - pid_path = root_dir / MONGOD_PIDFILE - pid = int(pid_path.read_text()) - - mongod_proc = psutil.Process(pid) - mongod_proc.terminate() - sleep(1) - - -def __download_mongo(download_dir): - url = MONGO_BINARIES.get(PLATFORM_SYS) - if url is None: - raise NotImplementedError(f"Unsupported platform: {PLATFORM_SYS}") - - download_path = Path(download_dir, f"mongodb_{MONGO_FULL_VERSION}.archive") - download_path.parent.mkdir(parents=True, exist_ok=True) - - if not download_path.exists(): - # download the archive - with requests.get(url, stream=True) as r: - r.raise_for_status() - with open(download_path, "wb") as f: - copyfileobj(r.raw, f) - - # extract it - if url.endswith(".zip"): - archive = zipfile.ZipFile(download_path, "r") - else: - archive = TarFile.open(download_path, "r") - - archive.extractall(download_dir) - archive.close() - - for path in download_dir.glob(f"**/*{MONGO_FULL_VERSION}*/bin/mongod*"): - if path.suffix not in (".exe", ""): - continue - return path - - -def __start_mongo_container(name, port=27017): - client = docker.from_env() - container_name = f"{MONGO_CONTAINER_PREFIX}_{name}" - - try: - return client.containers.get(container_name) - except docker.errors.NotFound: - return client.containers.run( - name=container_name, - image=f"mongo:{MONGO_VERSION}", - ports={"27017/tcp": port}, - detach=True, - remove=True, - auto_remove=True, - labels={"name": "pytest-syft"}, - ) - - -def __kill_mongo_container(name): - client = docker.from_env() - container_name = f"{MONGO_CONTAINER_PREFIX}_{name}" - - try: - container = client.containers.get(container_name) - container.stop() - except docker.errors.NotFound: - pass diff --git a/packages/syft/tests/utils/random_port.py b/packages/syft/tests/utils/random_port.py deleted file mode 100644 index c3370694afb..00000000000 --- a/packages/syft/tests/utils/random_port.py +++ /dev/null @@ -1,8 +0,0 @@ -# stdlib -import socket - - -def get_random_port(): - soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - soc.bind(("", 0)) - return soc.getsockname()[1] diff --git a/packages/syft/tests/utils/xdist_state.py b/packages/syft/tests/utils/xdist_state.py index 02601aef0ba..e69de29bb2d 100644 --- a/packages/syft/tests/utils/xdist_state.py +++ b/packages/syft/tests/utils/xdist_state.py @@ -1,50 +0,0 @@ -# stdlib -import json -from pathlib import Path -from tempfile import gettempdir - -# third party -from filelock import FileLock - - -class SharedState: - """A simple class to manage a file-backed shared state between multiple processes, particulary for pytest-xdist.""" - - def __init__(self, name: str): - self._dir = Path(gettempdir(), name) - self._dir.mkdir(parents=True, exist_ok=True) - - self._statefile = Path(self._dir, "state.json") - self._statefile.touch() - - self._lock = FileLock(str(self._statefile) + ".lock") - - @property - def lock(self): - return self._lock - - def set(self, key, value): - with self._lock: - state = self.read_state() - state[key] = value - self.write_state(state) - return value - - def get(self, key, default=None): - with self._lock: - state = self.read_state() - return state.get(key, default) - - def read_state(self) -> dict: - return json.loads(self._statefile.read_text() or "{}") - - def write_state(self, state): - self._statefile.write_text(json.dumps(state)) - - def purge(self): - if self._statefile: - self._statefile.unlink() - - lock_file = Path(self._lock.lock_file) - if lock_file.exists(): - lock_file.unlink(missing_ok=True) diff --git a/packages/syftbox/.bumpversion.cfg b/packages/syftbox/.bumpversion.cfg new file mode 100644 index 00000000000..dae0c794451 --- /dev/null +++ b/packages/syftbox/.bumpversion.cfg @@ -0,0 +1,11 @@ +[bumpversion] +current_version = 0.3.5 +parse = (?P\d+)\.(?P\d+)\.(?P\d+) +serialize = + {major}.{minor}.{patch} + +[bumpversion:file:pyproject.toml] + +[bumpversion:file:syftbox/__init__.py] + +[bumpversion:file:docker/syftbox.dockerfile] diff --git a/packages/syftbox/.dockerignore b/packages/syftbox/.dockerignore new file mode 100644 index 00000000000..ded50c79cc1 --- /dev/null +++ b/packages/syftbox/.dockerignore @@ -0,0 +1,8 @@ +.git +data +default_apps +dist +docker +notebooks +projects +tests diff --git a/packages/syftbox/.github/workflows/apps-tests.yaml b/packages/syftbox/.github/workflows/apps-tests.yaml new file mode 100644 index 00000000000..7a98224c54f --- /dev/null +++ b/packages/syftbox/.github/workflows/apps-tests.yaml @@ -0,0 +1,75 @@ +name: Test - Apps Standalone + +on: + workflow_dispatch: + + workflow_call: + +jobs: + apps-test: + strategy: + max-parallel: 99 + matrix: + apps: + - ring@main + # - tutorial-apps@basic_aggregator + # - tutorial-apps@pretrained_model_aggregator + # - tutorial-apps@pretrained_model_local + python-version: ["3.9"] + os: [ubuntu-latest, macos-latest] + # runner: [syftbox-sh-linux-x64, scaleway-macOS-arm64] + fail-fast: false + + # runs-on: ${{ matrix.runner }} + runs-on: ${{ matrix.os }} + steps: + - name: Parse app repository info + id: apprepo + run: | + REPO=$(echo ${{ matrix.apps }} | cut -d'@' -f1) + REF=$(echo ${{ matrix.apps }} | cut -d'@' -f2) + echo "REPO=$REPO" >> $GITHUB_OUTPUT + echo "REF=$REF" >> $GITHUB_OUTPUT + + - name: Checkout + uses: actions/checkout@v4 + with: + repository: OpenMined/${{ steps.apprepo.outputs.REPO }} + ref: ${{ steps.apprepo.outputs.REF }} + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Provisioning dummy config + run: | + CONFIG_PATH="$(pwd)/config.json" + SYNC_FOLDER="$(pwd)/sync/" + echo '{ + "config_path": "'"$CONFIG_PATH"'", + "sync_folder": "'"$SYNC_FOLDER"'", + "port": 8011, + "email": "alice@openmined.org", + "token": null, + "server_url": "http://localhost:5001", + "email_token": null, + "autorun_plugins": [ + "init", + "create_datasite", + "sync", + "apps" + ] + }' > $CONFIG_PATH + cat $CONFIG_PATH + + - name: Run the test + run: | + chmod +x ./run.sh + export SYFTBOX_CLIENT_CONFIG_PATH="$(pwd)/config.json" + sh ./run.sh diff --git a/packages/syftbox/.github/workflows/cd-deploy-stage.yaml b/packages/syftbox/.github/workflows/cd-deploy-stage.yaml new file mode 100644 index 00000000000..a1b1ec95b66 --- /dev/null +++ b/packages/syftbox/.github/workflows/cd-deploy-stage.yaml @@ -0,0 +1,91 @@ +name: Deploy Stage + +on: + workflow_dispatch: + inputs: + build: + description: Deploy build from + type: choice + default: local + options: + - local + - pypi + + version: + description: SyftBox version to deploy if above is "pypi" + type: string + default: 0.1.12 + + dryrun: + description: Dry Run. Will not deploy to server. + type: boolean + default: false + + push: + branches: + - main # adjust this to match your main branch name + paths: + - "syftbox/**" # Python package files + - "default_apps/**" # Default Apps + - "pyproject.toml" # Project configuration + - "uv.lock" # Project lock + - "MANIFEST.in" # Wheel manifest + - "bumpversion.cfg" # Version + +# Prevents concurrent runs of the same workflow +# while the previous run is still in progress +concurrency: + group: deploy-syftbox-stage + cancel-in-progress: false + +jobs: + deploy-syftbox-stage: + # runs-on: ubuntu-latest + runs-on: syftbox-sh-linux-x64 + + steps: + - name: Install Git & SSH + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install git openssh-client -y + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Checkout SyftBox repo + uses: actions/checkout@v4 + + - name: Install Just + uses: extractions/setup-just@v2 + with: + just-version: "1.36.0" + + - name: Set up SSH + run: | + mkdir -p ~/.ssh + echo "${{ secrets.SYFTBOX_STAGE_PRIVATE_KEY }}" > ~/.ssh/cert.pem + chmod 600 ~/.ssh/cert.pem + ssh-keyscan -H "4.227.144.171" >> ~/.ssh/known_hosts + + - name: Deploy SyftBox (Local Wheel Build) + # allow local deployment only on workflow_dispatch and non-PR push + if: | + (github.event_name == 'workflow_dispatch' && inputs.dryrun == false && inputs.build == 'local') || + (github.event_name == 'push' && github.event.pull_request == null) + run: | + just upload-dev ~/.ssh/cert.pem azureuser@4.227.144.171 + + - name: Deploy SyftBox (PyPI ${{ inputs.version }}) + # allow pypi deployment only on workflow_dispatch + if: | + (github.event_name == 'workflow_dispatch' && inputs.dryrun == false && inputs.build == 'pypi') || false + run: | + just upload-pip ${{ inputs.version }} ~/.ssh/cert.pem azureuser@4.227.144.171 + + - name: Delete cert.pem + if: always() + run: | + rm -f ~/.ssh/cert.pem diff --git a/packages/syftbox/.github/workflows/cd-deploy.yaml b/packages/syftbox/.github/workflows/cd-deploy.yaml new file mode 100644 index 00000000000..db022ada6dd --- /dev/null +++ b/packages/syftbox/.github/workflows/cd-deploy.yaml @@ -0,0 +1,57 @@ +name: Deploy Prod + +on: + workflow_dispatch: + inputs: + version: + description: "SyftBox Version to deploy" + type: string + default: 0.1.12 + + dryrun: + description: Dry Run. Will not deploy to server. + type: boolean + default: false + +# Prevents concurrent runs of the same workflow +# while the previous run is still in progress +concurrency: + group: deploy-syftbox-prod + cancel-in-progress: false + +jobs: + deploy-syftbox-prod: + # runs-on: ubuntu-latest + runs-on: syftbox-sh-linux-x64 + + steps: + - name: Install Git & SSH + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install git openssh-client -y + + - name: Checkout SyftBox repo + uses: actions/checkout@v4 + + - name: Install Just + uses: extractions/setup-just@v2 + with: + just-version: "1.36.0" + + - name: Set up SSH + run: | + mkdir -p ~/.ssh + echo "${{ secrets.SYFTBOX_SERVER_PRIVATE_KEY }}" > ~/.ssh/cert.pem + chmod 600 ~/.ssh/cert.pem + ssh-keyscan -H "172.210.40.183" >> ~/.ssh/known_hosts + + - name: Deploy SyftBox Server + if: ${{ inputs.dryrun == false && github.event_name != 'pull_request' }} + run: | + just upload-pip ${{ inputs.version }} ~/.ssh/cert.pem azureuser@172.210.40.183 + + - name: Delete cert.pem + if: always() + run: | + rm -f ~/.ssh/cert.pem diff --git a/packages/syftbox/.github/workflows/cd-release.yaml b/packages/syftbox/.github/workflows/cd-release.yaml new file mode 100644 index 00000000000..9dec9d4c4c2 --- /dev/null +++ b/packages/syftbox/.github/workflows/cd-release.yaml @@ -0,0 +1,104 @@ +name: SyftBox PyPI Release + +on: + workflow_dispatch: + inputs: + bump_type: + description: Bump bersion by + type: choice + default: patch + options: + - patch + - minor + - major + + breaking_changes: + description: Use this if minor release and not compatible with previous versions + type: boolean + default: false + + run_tests: + description: Run pre-release tests + type: boolean + default: true + + dryrun: + description: Dry Run. Will not push to PyPI. + type: boolean + default: false + +# Prevents concurrent runs of the same workflow +# while the previous run is still in progress +concurrency: + group: release-syftbox + cancel-in-progress: false + +jobs: + call-pr-tests: + if: ${{ inputs.run_tests == true }} + uses: ./.github/workflows/pr-tests.yaml + + deploy-syftbox: + needs: [call-pr-tests] + if: | + always() && + (needs.call-pr-tests.result == 'success' || needs.call-pr-tests.result == 'skipped') + + # runs-on: ubuntu-latest + runs-on: syftbox-sh-linux-x64 + + steps: + - name: Install Git + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install git -y + + - name: Checkout SyftBox repo with github token + uses: actions/checkout@v4 + with: + token: ${{ secrets.SYFTBOX_BOT_COMMIT_TOKEN }} + + - name: Configure git user + run: | + git config user.name "${{ secrets.OM_BOT_NAME }}" + git config user.email "${{ secrets.OM_BOT_EMAIL }}" + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Install Just + uses: extractions/setup-just@v2 + with: + just-version: "1.36.0" + + - name: Install dependencies + run: | + uv --version + uv tool install twine + twine --version + + - name: Bump the Version + run: | + just bump-version ${{ inputs.bump_type }} ${{ inputs.breaking_changes }} + + - name: Build syftbox + run: | + just build + + - name: Push to pypi + if: ${{ inputs.dryrun == false && github.event_name != 'pull_request' }} + run: | + twine upload -u __token__ -p ${{ secrets.OM_SYFTBOX_PYPI_TOKEN }} dist/* + + - name: Push changes to SyftBox repo + if: ${{ inputs.dryrun == false && github.event_name != 'pull_request' }} + run: | + git push origin --follow-tags diff --git a/packages/syftbox/.github/workflows/e2e-tests.yaml b/packages/syftbox/.github/workflows/e2e-tests.yaml new file mode 100644 index 00000000000..980432ca062 --- /dev/null +++ b/packages/syftbox/.github/workflows/e2e-tests.yaml @@ -0,0 +1,136 @@ +name: Tests - E2E + +on: + push: + branches: + - main # adjust this to match your main branch name + + workflow_dispatch: + + workflow_call: + +concurrency: + group: e2e-tests + cancel-in-progress: true + +jobs: + build-test: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest, macos-latest] + python-version: ["3.9", "3.12"] + + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Install Just + uses: extractions/setup-just@v2 + with: + just-version: "1.36.0" + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Pre-test checks + run: | + uv --version + jq --version + just --version + curl --version + + - name: Run Build & Install + run: | + just install + + - name: Check installed tools + run: | + uv tool list + + - name: Run SyftBox Debug + run: | + syftbox version + syftbox debug + syftbox client --help + syftbox server --help + + e2e-test: + strategy: + max-parallel: 99 + matrix: + e2e-test: [ + "basic_aggregator", + "model_aggregator", + "aggregator_with_local_training", + # "fl_model_training", + ] + python-version: ["3.9", "3.12"] + os: [ubuntu-latest, macos-latest] + # runner: [syftbox-sh-linux-x64, scaleway-macOS-arm64] + fail-fast: false + + # runs-on: ${{ matrix.runner }} + runs-on: ${{ matrix.os }} + steps: + - name: Free Disk Space + run: | + sudo rm -rf /Users/runner/Library/Android/sdk || true + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/.ghcup || true + + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Install Just + uses: extractions/setup-just@v2 + with: + just-version: "1.36.0" + + - name: "Setup jq" + uses: dcarbone/install-jq-action@v3 + with: + version: "1.7" + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Pre-test checks + run: | + uv --version + jq --version + just --version + curl --version + + - name: Run E2E Test for '${{ matrix.e2e-test }}' + run: | + just reset + just test-e2e ${{ matrix.e2e-test }} + + - name: Cleanup unnecessary files + if: ${{ failure() }} + run: | + find . -type f -name "Icon*" -exec rm -f {} \; + find . -type f -name "syftbox.pid" -exec rm -f {} \; + + - name: Upload logs & client/server state + if: failure() + uses: actions/upload-artifact@v4 + with: + name: e2e-${{ matrix.e2e-test }}-${{ runner.os }}-${{ matrix.python-version }} + path: .e2e/${{ matrix.e2e-test }} diff --git a/packages/syftbox/.github/workflows/nightlies.yaml b/packages/syftbox/.github/workflows/nightlies.yaml new file mode 100644 index 00000000000..fade43b26b3 --- /dev/null +++ b/packages/syftbox/.github/workflows/nightlies.yaml @@ -0,0 +1,16 @@ +name: Tests - Nightlies + +on: + schedule: + - cron: "0 00 * * *" # 12am UTC, 5:30pm Indian, 9pm Brazil, 11am AEDT + + workflow_dispatch: + +jobs: + apps: + if: github.repository == 'OpenMined/syft' # don't run on forks + uses: OpenMined/syft/.github/workflows/apps-tests.yaml@main + + e2e: + if: github.repository == 'OpenMined/syft' # don't run on forks + uses: OpenMined/syft/.github/workflows/e2e-tests.yaml@main diff --git a/packages/syftbox/.github/workflows/pr-tests.yaml b/packages/syftbox/.github/workflows/pr-tests.yaml new file mode 100644 index 00000000000..ebed4cf47d9 --- /dev/null +++ b/packages/syftbox/.github/workflows/pr-tests.yaml @@ -0,0 +1,126 @@ +name: Tests - PR + +on: + workflow_dispatch: + + workflow_call: + + pull_request: + branches: + - main + +concurrency: + group: syft-${{ github.event_name == 'pull_request' && format('{0}-{1}', github.workflow, github.event.pull_request.number) || github.workflow_ref }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - uses: pre-commit/action@v3.0.1 + + unit: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest, macos-latest] + # runner: [syftbox-sh-linux-x64, scaleway-macOS-arm64] + python-version: ["3.12", "3.11", "3.10", "3.9"] + fail-fast: false + + # runs-on: ${{ matrix.runner }} + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Get uv cache dir + id: pip-cache + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: pr-uv-${{ runner.os }}-py${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }} + restore-keys: | + pr-uv-${{ runner.os }}-py${{ matrix.python-version }} + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install tox + run: | + # explicitly define which python version to use + # else we may end up picking system default which is not the same as the actions/setup-python + uv tool install tox --with tox-uv --python ${{ matrix.python-version }} + uv run tox --version + + - name: Run unit tests + env: + TOX_PYTHON: python${{ matrix.python-version }} + run: | + uv run tox -e syft.test.unit + + integration: + strategy: + max-parallel: 99 + matrix: + os: [ubuntu-latest, macos-latest] + # runner: [syftbox-sh-linux-x64, scaleway-macOS-arm64] + python-version: ["3.12", "3.9"] + fail-fast: false + + # runs-on: ${{ matrix.runner }} + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.4.25" + + - name: Get uv cache dir + id: pip-cache + shell: bash + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + + - name: Load github cache + uses: actions/cache@v4 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: pr-uv-${{ runner.os }}-py${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }} + restore-keys: | + pr-uv-${{ runner.os }}-py${{ matrix.python-version }} + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install tox + run: | + # explicitly define which python version to use + # else we may end up picking system default which is not the same as the actions/setup-python + uv tool install tox --with tox-uv --python ${{ matrix.python-version }} + uv run tox --version + + - name: Run Integration tests + env: + TOX_PYTHON: python${{ matrix.python-version }} + run: | + uv run tox -e syft.test.integration diff --git a/packages/syftbox/.gitignore b/packages/syftbox/.gitignore new file mode 100644 index 00000000000..04917ff8d52 --- /dev/null +++ b/packages/syftbox/.gitignore @@ -0,0 +1,191 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# os garbage +**/.DS_Store +**/Thumbs.db + +# syft dirs +data/ +./users/ +.clients/ +.server/ +.e2e/ +keys/ +backup/ +netflix_data/ +notebooks/crypto + +# syft files +scheduler.lock +jobs.sqlite + +.vscode +experimental/ +*.db +notebooks/ +dev_space/ + +*.env +!tests/**/.env + +syftbox/assets/icon/* diff --git a/packages/syftbox/.pre-commit-config.yaml b/packages/syftbox/.pre-commit-config.yaml new file mode 100644 index 00000000000..0c059bfa3c9 --- /dev/null +++ b/packages/syftbox/.pre-commit-config.yaml @@ -0,0 +1,74 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-ast + always_run: true + - id: trailing-whitespace + always_run: true + exclude: .bumpversion.cfg + - id: check-docstring-first + always_run: true + - id: check-json + always_run: true + - id: check-added-large-files + always_run: true + exclude: '.*Pyfhel-3\.4\.2-cp311-cp311-macosx_13_0_arm64\.whl|.*syftbox-0.1.0-py3-none-any\.whl' + - id: check-yaml + always_run: true + - id: check-merge-conflict + always_run: true + args: ["--assume-in-merge"] + - id: check-executables-have-shebangs + always_run: true + - id: debug-statements + always_run: true + - id: name-tests-test + always_run: true + exclude: "tests/.*(e2e|stress|fixtures)" + - id: requirements-txt-fixer + always_run: true + - id: mixed-line-ending + args: ["--fix=lf"] + + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: "v0.6.5" + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + types_or: [python, pyi, jupyter] + - id: ruff-format + types_or: [python, pyi, jupyter] + + - repo: https://github.com/kynan/nbstripout + rev: 0.7.1 + hooks: + - id: nbstripout + + - repo: https://github.com/pre-commit/mirrors-prettier # This repository has been archived by the owner on Apr 11, 2024. It is now read-only. + rev: "v3.0.0-alpha.9-for-vscode" + hooks: + - id: prettier + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.10.0 + hooks: + - id: mypy + name: "mypy" + always_run: true + files: ^syftbox + args: ["--config-file=tox.ini", "--install-types", "--non-interactive"] + + # Very SLOW! + # - repo: https://github.com/renovatebot/pre-commit-hooks + # rev: 39.60.0 + # hooks: + # - id: renovate-config-validator + # args: [--strict] + + # - repo: meta + # hooks: + # - id: identity + # always_run: true + # files: "notebooks/api/*" diff --git a/packages/syftbox/CHANGELOG.md b/packages/syftbox/CHANGELOG.md new file mode 100644 index 00000000000..d7c8d46d53c --- /dev/null +++ b/packages/syftbox/CHANGELOG.md @@ -0,0 +1,17 @@ +# Change Log + +Changes to SyftBox + +## [Unreleased] - yyyy-mm-dd + +### Added + +### Changed + +### Fixed + +- [Sync] - First Example + +## [0.1.11] - 2024-10-19 + +First Change Log version. diff --git a/packages/syftbox/LICENSE b/packages/syftbox/LICENSE new file mode 100644 index 00000000000..3cbca1a91c0 --- /dev/null +++ b/packages/syftbox/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2024] OpenMined Foundation 501(c)(3) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/packages/syftbox/README.md b/packages/syftbox/README.md new file mode 100644 index 00000000000..aa5f692810b --- /dev/null +++ b/packages/syftbox/README.md @@ -0,0 +1,110 @@ +``` + ____ __ _ ____ +/ ___| _ _ / _| |_| __ ) _____ __ +\___ \| | | | |_| __| _ \ / _ \ \/ / + ___) | |_| | _| |_| |_) | (_) > < +|____/ \__, |_| \__|____/ \___/_/\_\ + |___/ +``` + +# Quickstart User Installation + +## SyftBox 1 liner + +curl -LsSf https://syftbox.openmined.org/install.sh | sh -s -- run + +## Manual install + +### install uv + +curl -LsSf https://astral.sh/uv/install.sh | sh + +### create a virtualenv somewhere + +uv venv .venv + +### Install Syftbox + +uv pip install -U syftbox + +### run the client + +uv run syftbox client + +# Quickstart Client Developer Installation + +### Step 0: Open your terminal to the root of this Github repository + +Begin by opening your terminal and navigating to the root directory of this github repository (so when you run 'ls' it should show folders like "syftbox", "server", "tests", etc.). Then run the commands in steps 1-4: + +### Step 1: Install Homebrew + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +``` + +### Step 2: Install uv (using homebrew — which is better for this than pip) + +``` +brew install uv +``` + +### Step 3: Install a virtual environment using uv + +``` +uv venv +``` + +### Step 4: Install a relative version of uv. + +``` +uv pip install -e . +``` + +### Step 5: Run the client + +``` +uv run syftbox/client/client.py +``` + +# Alternative Options + +### Run Client + +``` +syftbox client --config_path=./config.json --sync_folder=~/Desktop/SyftBox --email=your@email.org --port=8082 --server=https://syftbox.openmined.org +``` + +### Staging Server + +If you have issues or want to use a bleeding edge server try --server=https://syftboxstage.openmined.org + +### Deploy + +This builds the latest source to a wheel and deploys and restarts the server: +https://syftbox.openmined.org + +``` +./scripts/deploy.sh +``` + +### Dev Mode + +Run the server and clients locally in editable mode with: +Server: + +``` +./scripts/server.sh +``` + +Client1: + +``` +./scripts/madhava.sh +``` + +Client2: + +``` +./scripts/andrew.sh +``` diff --git a/packages/syftbox/config/prod/syftbox.service b/packages/syftbox/config/prod/syftbox.service new file mode 100644 index 00000000000..c779c0cb65a --- /dev/null +++ b/packages/syftbox/config/prod/syftbox.service @@ -0,0 +1,16 @@ +[Unit] +Description=Syftbox Prod Server +After=network.target + +[Service] +LimitNOFILE=262144 +User=azureuser +WorkingDirectory=/home/azureuser +ExecStartPre=uv run syftbox server migrate +ExecStart=uv run uvicorn syftbox.server.server:app --host 0.0.0.0 --port 8443 --workers=4 --timeout-graceful-shutdown=5 --ssl-keyfile /etc/letsencrypt/live/syftbox.openmined.org/privkey.pem --ssl-certfile /etc/letsencrypt/live/syftbox.openmined.org/fullchain.pem +Restart=always +RestartSec=5 +Environment=OTEL_RESOURCE_ATTRIBUTES=service.name=syftbox-prod + +[Install] +WantedBy=multi-user.target diff --git a/packages/syftbox/config/stage/syftbox.service b/packages/syftbox/config/stage/syftbox.service new file mode 100644 index 00000000000..007122bbe6a --- /dev/null +++ b/packages/syftbox/config/stage/syftbox.service @@ -0,0 +1,16 @@ +[Unit] +Description=Syftbox Stage Server +After=network.target + +[Service] +LimitNOFILE=262144 +User=azureuser +WorkingDirectory=/home/azureuser +ExecStartPre=uv run syftbox server migrate +ExecStart=uv run gunicorn syftbox.server.server:app -w 5 -k uvicorn.workers.UvicornWorker --bind 0.0.0.0:8080 --graceful-timeout 5 --keep-alive 5 --max-requests 500 --max-requests-jitter 50 --forwarded-allow-ips * --worker-tmp-dir /dev/shm --keyfile /etc/letsencrypt/live/syftboxstage.openmined.org/privkey.pem --certfile /etc/letsencrypt/live/syftboxstage.openmined.org/fullchain.pem +Environment=OTEL_RESOURCE_ATTRIBUTES=service.name=syftbox-stage +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/packages/syftbox/default_apps/starter_app/main.py b/packages/syftbox/default_apps/starter_app/main.py new file mode 100644 index 00000000000..98c5a879bb9 --- /dev/null +++ b/packages/syftbox/default_apps/starter_app/main.py @@ -0,0 +1,67 @@ +import os +import shutil +import tempfile +import urllib.request +import zipfile +from pathlib import Path + +DEFAULT_APPS = [ + "https://github.com/OpenMined/logged_in", + "https://github.com/OpenMined/inbox", + "https://github.com/OpenMined/cpu_tracker_member", + "https://github.com/OpenMined/DatasetLoader", +] + + +def download_github_repo(url: str, target_dir: str = None) -> Path: + """Downloads and extracts a GitHub repository without git.""" + if not url.startswith(("http://", "https://")): + raise ValueError("Invalid GitHub URL") + + repo_name = url.rstrip("/").split("/")[-1] + target_dir = Path(target_dir or os.getcwd()) / repo_name + + with tempfile.TemporaryDirectory() as tmp: + tmp_path = Path(tmp) + try: + zip_path = tmp_path / "repo.zip" + urllib.request.urlretrieve(f"{url}/archive/main.zip", zip_path) + + with zipfile.ZipFile(zip_path) as zip_ref: + extracted = tmp_path / zip_ref.namelist()[0].split("/")[0] + zip_ref.extractall(tmp_path) + if target_dir.exists(): + shutil.rmtree(target_dir) + shutil.move(str(extracted), str(target_dir)) + + return target_dir + except Exception as e: + print(f"Failed to download or extract {url}: {e}") + + +def clone_apps(): + apps = DEFAULT_APPS + + # this is needed for E2E or integration testing to only install only select apps + # DO NOT MERGE IT WITH DEFAULT_APPS + env_apps = os.getenv("SYFTBOX_DEFAULT_APPS", None) + if env_apps: + print(f"SYFTBOX_DEFAULT_APPS={env_apps}") + apps = env_apps.strip().split(",") + + print("Installing", apps) + + # Iterate over the list and clone each repository + for url in apps: + download_github_repo(url) + + print("Done") + + +if __name__ == "__main__": + current_directory = Path(os.getcwd()) + + apps_directory = current_directory.parent + os.chdir(apps_directory) + clone_apps() + shutil.rmtree(current_directory) diff --git a/packages/syftbox/default_apps/starter_app/run.sh b/packages/syftbox/default_apps/starter_app/run.sh new file mode 100755 index 00000000000..e1ff693b93f --- /dev/null +++ b/packages/syftbox/default_apps/starter_app/run.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +if [ ! -d .venv ]; then + uv venv +fi +. .venv/bin/activate + +echo "Running 'starter_app' with $(python3 --version) at '$(which python3)'" +python3 main.py +deactivate diff --git a/packages/syftbox/docker/syftbox.dockerfile b/packages/syftbox/docker/syftbox.dockerfile new file mode 100644 index 00000000000..2a278be5708 --- /dev/null +++ b/packages/syftbox/docker/syftbox.dockerfile @@ -0,0 +1,17 @@ +FROM cgr.dev/chainguard/wolfi-base + +ARG PYTHON_VERSION="3.12" +ARG UV_VERSION="0.4.20-r0" +ARG SYFT_VERSION="0.3.5" + +RUN apk update && apk upgrade && \ + apk add --no-cache python-$PYTHON_VERSION uv=$UV_VERSION + +WORKDIR /app + +RUN uv venv +RUN uv pip install --no-cache syftbox==${SYFT_VERSION} + +EXPOSE 8000 + +CMD ["uv", "run", "gunicorn", "syftbox.server.server:app", "--bind=0.0.0.0:8000"] diff --git a/packages/syftbox/docs/assets/tui_example.png b/packages/syftbox/docs/assets/tui_example.png new file mode 100644 index 00000000000..ffbe08ac28c Binary files /dev/null and b/packages/syftbox/docs/assets/tui_example.png differ diff --git a/packages/syftbox/docs/auth.md b/packages/syftbox/docs/auth.md new file mode 100644 index 00000000000..4504a4257a4 --- /dev/null +++ b/packages/syftbox/docs/auth.md @@ -0,0 +1,11 @@ +# Authentication + +When using SyftBox for the first time users will be asked to fill in their email, which will receive a registration token. The registration token can be pasted into the terminal, which will result in an access token that will be stored in `/config.json` and will be used when loggin in. + +## Password reset + +If users lose their config.json, they can regain access to their account by going through the registration flow again. Users will receive a new email and will be asked to copy a new registration token into the terminal. + +## Dev + +When you launch a syftbox caching server for development with `just run-server`, by default it will start without authentication. During registration, the client receives a response that indicates that auth is turned off and during login the client just passes a base64 encoded json of your email address. The server will skip any jwt validation. diff --git a/packages/syftbox/docs/permissions.md b/packages/syftbox/docs/permissions.md new file mode 100644 index 00000000000..9ed049bb256 --- /dev/null +++ b/packages/syftbox/docs/permissions.md @@ -0,0 +1,52 @@ +# Permissions + +Users can define permissions for paths (files or folders) in their syftbox. Permissions define which other users can read, create, update or delete specific paths. Users can also invite other users to set permissions for specific paths. + +## Permission types and syftperm.yaml files + +Permissions are defined by creating a set of `syftperm.yaml` files in the filetree of your datasite. A `syftperm.yaml` file can define permissions for all paths lower in the directory structure by defining a set of rules. We have 4 permission `bits`: + +- `read`: Can read the file +- `create`: Can create a new file for a particular path +- `write`: Can update an existing file for a particular path +- `admin`: Can change the contents of `syftperm.yaml` files for a particular path + +### syftperm.yaml file format + +An example of a such a set of rules in a `syftperm.yaml` file is: + +``` +- permission: read + path: x.txt + user: user@example.org + +- permission: write + user: * + type: disallow +``` + +### Rule arguments + +A rule has the following arguments: + +- `permission`: either a single permission, e.g. `read` or a list of permissions, e.g. `["read", "write"]`. Accepted permissions are: `read`, `create`, `write`, `admin` +- `user`: Can be a specific user email, e.g. `user@example.org` or `*` +- `type`: either `allow` or `disallow`. The default is `allow` +- `path`: A path is a unix style glob pattern. You can only use patterns for paths that are in the current directory or lower directory, not parent directories. Currently its not supporting `[]` or `{}` syntax. It also accepts `{useremail}` as a value, which will resolve to a specific user. Valid examples of the `path` values are: `*`: all paths in the current directory `*.txt`: all txt files in the current directory. `**`: all paths recursively. `{useremail}/*.txt`, which will match all txt files for a specific user folder. + +## Combining rules + +The final set of `read`, `create`, `write`, `admin` permissions is computed by combining the rules. First we sort all the rules by file depth and rule number. The rules are then combined by overriding earlier rules top to bottom as follows: + +- By default any datasite owner has all permissions to anything in their datasite. This cannot be overridden. By default, any other user has no permissions to anything in the datasite. +- for any permission in a rule, the permission is added or removed from the final set of permissions for all the users specified by the `user` argument for all the paths specified by the `path` argument. Depending on the `type` argument, the permission is either added or removed. + +Permissions "bits" (`read`, `create`, `write`, `admin`) are stored using bitwise independent permissions bits. In general this means that having one permission does not imply that you have the other permissions. There are three exceptions to this: + +- if you have admin permissions, you have all other permissions automatically. +- You will only have effective `write` or `create` permissions, if you also have `read` permissions. This is because with syncing, writing to files becomes challenging without `read` permissions. +- any datasite owner can `read`, `create`, `write`, or change any permissions for any path. + +## Aliases (future) + +Currently not supported: we are planning to add aliases like `creator` which implies `read+create` and `updater` which implies `read+write`. diff --git a/packages/syftbox/docs/syncing.md b/packages/syftbox/docs/syncing.md new file mode 100644 index 00000000000..9d6a7c11247 --- /dev/null +++ b/packages/syftbox/docs/syncing.md @@ -0,0 +1,19 @@ +# Syncing + +To move file changes from Box A to Box B, SyftBox uses a syncing component that uploads file changes from A to a cachingserver, and downloads them to B from the caching server. These filechanges are subject to permissions, which are checked on the clients and server. + +## Components & flow + +From a high level the clients implements a producer and a consumer. The producer compares the hashes of local files against the hashes of the files on the server, and if there is a difference it pushes this change to the consumer. Then, the consumer compares three hashes: + +1. The hash of that local file the last time it was synced +2. The current hash of the local file +3. The curent hash of the remote file + +Based on this information, the client determines the location of the change (local vs remote), and what type of modification (create/delete/modify). Based on that, the consumer will take action to sync the file. Syncing the file may entail a download, an upload, a request to apply a diff, a local remove or a request to remove on the server. The logic on the server is very lightweight, it just checkes whether this user is allowed to make a change based on the permissions, and applies it. + +When you start a new syftbox, there are a lot of files to sync. Therefore, the initial set of files is downloaded as a batch with a single api call. + +## Datastructures + +For performance reasons, file metadata (hashes, path, etc.) is stored in a database, such that it can be retrieved quickly when needed. diff --git a/packages/syftbox/fetchers/netflix/.gitignore b/packages/syftbox/fetchers/netflix/.gitignore new file mode 100644 index 00000000000..04bd1fda871 --- /dev/null +++ b/packages/syftbox/fetchers/netflix/.gitignore @@ -0,0 +1,2 @@ +inputs/* +output \ No newline at end of file diff --git a/packages/syftbox/fetchers/netflix/README.md b/packages/syftbox/fetchers/netflix/README.md new file mode 100644 index 00000000000..9687c67c5ad --- /dev/null +++ b/packages/syftbox/fetchers/netflix/README.md @@ -0,0 +1,22 @@ +# Netflix Fetcher + +This will run periodically and download your netflix data so you can keep your stats up to date. + +## Instructions + +Add email, password and profile to text files in ./inputs + +``` +├── inputs +│   ├── NETFLIX_EMAIL.txt +│   ├── NETFLIX_PASSWORD.txt +│   └── NETFLIX_PROFILE.txt +``` + +## Profile ID + +To get your profile ID go to the Profile Gate: +https://www.netflix.com/ProfilesGate + +Right click and copy the url for your profile and get the part after: +https://www.netflix.com/SwitchProfile?tkn= diff --git a/packages/syftbox/fetchers/netflix/main.py b/packages/syftbox/fetchers/netflix/main.py new file mode 100644 index 00000000000..fb87aeab765 --- /dev/null +++ b/packages/syftbox/fetchers/netflix/main.py @@ -0,0 +1,68 @@ +import os +import time + +from selenium import webdriver +from selenium.webdriver.chrome.options import Options +from selenium.webdriver.chrome.service import Service +from selenium.webdriver.common.by import By +from selenium.webdriver.common.keys import Keys + +chrome_driver_path = os.environ["CHROMEDRIVER_PATH"] +email = os.environ["NETFLIX_EMAIL"] +password = os.environ["NETFLIX_PASSWORD"] +profile = os.environ["NETFLIX_PROFILE"] +output_dir = os.environ["OUTPUT_DIR"] + +print(f"🍿 Downloading Netflix Activity for: {email} Profile {profile}") + +# Set up WebDriver (for Chrome) +chrome_options = Options() +prefs = { + "download.default_directory": output_dir, + "download.prompt_for_download": False, +} +chrome_options.add_experimental_option("prefs", prefs) +chrome_options.add_argument("--headless") # Run in headless mode, comment this if you want to see the browser window +chrome_service = Service(chrome_driver_path) # Set the path to your ChromeDriver + +driver = webdriver.Chrome(service=chrome_service, options=chrome_options) + +# get login page +driver.get("https://www.netflix.com/login") + + +# Find the email and password input fields +email_input = driver.find_element(By.NAME, "userLoginId") +password_input = driver.find_element(By.NAME, "password") +# Enter email and password +email_input.send_keys(email) +password_input.send_keys(password) + +# Submit the login form +print("Logging In") +password_input.send_keys(Keys.ENTER) + +# Wait for the login to complete +time.sleep(3) + +print("Switching Profiles") +# Navigate to Viewing Activity page +driver.get(f"https://www.netflix.com/SwitchProfile?tkn={profile}") + +# Wait for the login to complete +time.sleep(3) + +print("Getting Viewing Activity") +# Navigate to Viewing Activity page +driver.get("https://www.netflix.com/viewingactivity") + +time.sleep(3) + +print("Clicking Download all") +# Navigate to a page and download a file +element = driver.find_element(By.LINK_TEXT, "Download all").click() + +print("Sleeping just in case") +time.sleep(10) + +driver.quit() diff --git a/packages/syftbox/fetchers/netflix/requirements.txt b/packages/syftbox/fetchers/netflix/requirements.txt new file mode 100644 index 00000000000..7cb6656b279 --- /dev/null +++ b/packages/syftbox/fetchers/netflix/requirements.txt @@ -0,0 +1 @@ +selenium diff --git a/packages/syftbox/fetchers/netflix/run.sh b/packages/syftbox/fetchers/netflix/run.sh new file mode 100755 index 00000000000..466a1510010 --- /dev/null +++ b/packages/syftbox/fetchers/netflix/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# Check if chromedriver is in the PATH +if ! command -v chromedriver &> /dev/null +then + echo "chromedriver is not installed. Installing with brew..." + brew install chromedriver +else + echo "chromedriver is already installed." +fi + +export CHROMEDRIVER_PATH=$(which chromedriver) +echo $CHROMEDRIVER_PATH + +mkdir -p inputs +mkdir -p output + +export NETFLIX_EMAIL=$(cat inputs/NETFLIX_EMAIL.txt) +export NETFLIX_PASSWORD=$(cat inputs/NETFLIX_PASSWORD.txt) +export NETFLIX_PROFILE=$(cat inputs/NETFLIX_PROFILE.txt) +export OUTPUT_DIR=$(realpath ./output) + +uv pip install -r requirements.txt +uv run main.py diff --git a/packages/syftbox/justfile b/packages/syftbox/justfile new file mode 100644 index 00000000000..d90d80558a7 --- /dev/null +++ b/packages/syftbox/justfile @@ -0,0 +1,261 @@ +# Guidelines for new commands +# - Start with a verb +# - Keep it short (max. 3 words in a command) +# - Group commands by context. Include group name in the command name. +# - Mark things private that are util functions with [private] or _var +# - Don't over-engineer, keep it simple. +# - Don't break existing commands +# - Run just --fmt --unstable after adding new commands + +set dotenv-load := true + +# --------------------------------------------------------------------------------------------------------------------- +# Private vars + +_red := '\033[1;31m' +_cyan := '\033[1;36m' +_green := '\033[1;32m' +_yellow := '\033[1;33m' +_nc := '\033[0m' + +# --------------------------------------------------------------------------------------------------------------------- +# Aliases + +alias rs := run-server +alias rsu := run-server-uvicorn +alias rc := run-client +alias rj := run-jupyter +alias b := build + +# --------------------------------------------------------------------------------------------------------------------- + +@default: + just --list + +# --------------------------------------------------------------------------------------------------------------------- + +# Run a local syftbox server on port 5001 +[group('server')] +run-server port="5001" gunicorn_args="": + #!/bin/bash + set -eou pipefail + + export SYFTBOX_DATA_FOLDER=${SYFTBOX_DATA_FOLDER:-.server/data} + uv run syftbox server migrate + uv run gunicorn syftbox.server.server:app -k uvicorn.workers.UvicornWorker --bind 127.0.0.1:{{ port }} --reload {{ gunicorn_args }} + +migrate: + #!/bin/bash + set -eou pipefail + + uv run syftbox server migrate + +[group('server')] +run-server-uvicorn port="5001" uvicorn_args="": + #!/bin/bash + set -eou pipefail + + export SYFTBOX_DATA_FOLDER=${SYFTBOX_DATA_FOLDER:-.server/data} + uv run syftbox server migrate + uv run uvicorn syftbox.server.server:app --host 127.0.0.1 --port {{ port }} --reload --reload-dir ./syftbox {{ uvicorn_args }} + +# --------------------------------------------------------------------------------------------------------------------- + +# Run a local syftbox client on any available port between 8080-9000 +[group('client')] +run-client name port="auto" server="http://localhost:5001": + #!/bin/bash + set -eou pipefail + + # generate a local email from name, but if it looks like an email, then use it as is + EMAIL="{{ name }}@openmined.org" + if [[ "{{ name }}" == *@*.* ]]; then EMAIL="{{ name }}"; fi + + # if port is auto, then generate a random port between 8000-8090, else use the provided port + PORT="{{ port }}" + if [[ "$PORT" == "auto" ]]; then PORT="0"; fi + + # Working directory for client is .clients/ + DATA_DIR=.clients/$EMAIL + mkdir -p $DATA_DIR + + echo -e "Email : {{ _green }}$EMAIL{{ _nc }}" + echo -e "Client : {{ _cyan }}http://localhost:$PORT{{ _nc }}" + echo -e "Server : {{ _cyan }}{{ server }}{{ _nc }}" + echo -e "Data Dir : $DATA_DIR" + + uv run syftbox client --config=$DATA_DIR/config.json --data-dir=$DATA_DIR --email=$EMAIL --port=$PORT --server={{ server }} --no-open-dir + +# --------------------------------------------------------------------------------------------------------------------- + +[group('client')] +run-live-client server="https://syftbox.openmined.org/": + uv run syftbox client --server={{ server }} + +# --------------------------------------------------------------------------------------------------------------------- + +# Run a local syftbox app command +[group('app')] +run-app name command subcommand="": + #!/bin/bash + set -eou pipefail + + # generate a local email from name, but if it looks like an email, then use it as is + EMAIL="{{ name }}@openmined.org" + if [[ "{{ name }}" == *@*.* ]]; then EMAIL="{{ name }}"; fi + + # Working directory for client is .clients/ + DATA_DIR=$(pwd)/.clients/$EMAIL + mkdir -p $DATA_DIR + echo -e "Data Dir : $DATA_DIR" + + uv run syftbox/main.py app {{ command }} {{ subcommand }} --config=$DATA_DIR/config.json + +# --------------------------------------------------------------------------------------------------------------------- + +# Build syftbox wheel +[group('build')] +build: + rm -rf dist + uv build + + +# Build syftbox wheel +[group('install')] +install: + rm -rf dist + uv build + uv tool install $(ls ./dist/*.whl) --reinstall + +# Bump version, commit and tag +[group('build')] +bump-version level="patch" breaking_changes="false": + #!/bin/bash + # We need to uv.lock before we can commit the whole thing in the repo. + # DO not bump the version on the uv.lock file, else other packages with same version might get updated + + set -eou pipefail + + # sync dev dependencies for bump2version + uv sync --frozen + + # get the current and new version + BUMPVERS_CHANGES=$(uv run bump2version --dry-run --allow-dirty --list {{ level }}) + CURRENT_VERSION=$(echo "$BUMPVERS_CHANGES" | grep current_version | cut -d'=' -f2) + NEW_VERSION=$(echo "$BUMPVERS_CHANGES" | grep new_version | cut -d'=' -f2) + echo "Bumping version from $CURRENT_VERSION to $NEW_VERSION" + + # first bump version + uv run bump2version {{ level }} + + # upgrade version compatibility matrix + cd scripts + BREAKING_CHANGES="" + if [[ '{{ breaking_changes }}' == true ]]; then BREAKING_CHANGES="--breaking_changes"; fi + uv run upgrade_version_matrix.py {{ level }} $BREAKING_CHANGES + cd .. + # update uv.lock file to reflect new package version + uv lock + + # commit the changes + git commit -am "Bump version $CURRENT_VERSION -> $NEW_VERSION" + git tag -a $NEW_VERSION -m "Release $NEW_VERSION" + +# --------------------------------------------------------------------------------------------------------------------- + +[group('test')] +test-e2e-old test_name: + @echo "Using SyftBox from {{ _green }}'$(which syftbox)'{{ _nc }}" + chmod +x ./tests/e2e/{{ test_name }}/run.bash + bash ./tests/e2e.old/{{ test_name }}/run.bash + +[group('test')] +test-e2e test_name: + #!/bin/sh + uv sync --frozen + . .venv/bin/activate + echo "Using SyftBox from {{ _green }}'$(which syftbox)'{{ _nc }}" + pytest -sq --color=yes ./tests/e2e/test_{{ test_name }}.py + +# --------------------------------------------------------------------------------------------------------------------- + +# Build & Deploy syftbox to a remote server using SSH +[group('deploy')] +upload-dev keyfile remote="user@0.0.0.0": build + #!/bin/bash + set -eou pipefail + + # there will be only one wheel file in the dist directory, but you never know... + LOCAL_WHEEL=$(ls dist/*.whl | grep syftbox | head -n 1) + + # Remote paths to copy the wheel to + REMOTE_DIR="~" + REMOTE_WHEEL="$REMOTE_DIR/$(basename $LOCAL_WHEEL)" + + echo -e "Deploying {{ _cyan }}$LOCAL_WHEEL{{ _nc }} to {{ _green }}{{ remote }}:$REMOTE_WHEEL{{ _nc }}" + + # change permissions to comply with ssh/scp + chmod 600 {{ keyfile }} + + # Use scp to transfer the file to the remote server + scp -i {{ keyfile }} "$LOCAL_WHEEL" "{{ remote }}:$REMOTE_DIR" + + # install pip package + ssh -i {{ keyfile }} {{ remote }} "uv venv && uv pip install $REMOTE_WHEEL" + + # restart service + # NOTE - syftbox service is created manually on the remote server + ssh -i {{ keyfile }} {{ remote }} "sudo systemctl daemon-reload && sudo systemctl restart syftbox" + echo -e "{{ _green }}Deployed SyftBox local wheel to {{ remote }}{{ _nc }}" + +# Deploy syftbox from pypi to a remote server using SSH +[group('deploy')] +upload-pip version keyfile remote="user@0.0.0.0": + #!/bin/bash + set -eou pipefail + + # change permissions to comply with ssh/scp + chmod 600 {{ keyfile }} + + echo -e "Deploying syftbox version {{ version }} to {{ remote }}..." + + # install pip package + ssh -i {{ keyfile }} {{ remote }} "uv venv && uv pip install syftbox=={{ version }}" + + # restart service + ssh -i {{ keyfile }} {{ remote }} "sudo systemctl daemon-reload && sudo systemctl restart syftbox" + + echo -e "{{ _green }}Deployed SyftBox {{ version }} to {{ remote }}{{ _nc }}" + +# --------------------------------------------------------------------------------------------------------------------- + +[group('utils')] +ssh keyfile remote="user@0.0.0.0": + ssh -i {{ keyfile }} {{ remote }} + +# remove all local files & directories +[group('utils')] +reset: + rm -rf ./.clients ./.server ./dist ./.e2e + +[group('utils')] +run-jupyter jupyter_args="": + uv run --frozen --with "jupyterlab" \ + jupyter lab {{ jupyter_args }} + +auth email server="http://127.0.0.1:5001": + # get access token from dev server + + EMAIL={{ email }} && \ + EMAIL_TOKEN=$( \ + curl -s -X 'POST' '{{server}}/auth/request_email_token' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d "{\"email\": \"${EMAIL}\"}" \ + | jq -r '.email_token' \ + ) && \ + curl -s -X 'POST' "{{server}}/auth/validate_email_token?email=${EMAIL}" \ + -H 'accept: application/json' \ + -H "Authorization: Bearer ${EMAIL_TOKEN}" \ + -d '' \ + | jq -r '.access_token' \ No newline at end of file diff --git a/packages/syftbox/notebooks/01-trade-create.ipynb b/packages/syftbox/notebooks/01-trade-create.ipynb new file mode 100644 index 00000000000..3cd728c97e7 --- /dev/null +++ b/packages/syftbox/notebooks/01-trade-create.ipynb @@ -0,0 +1,347 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install pandas" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "# Create a Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib import SyftVault, TabularDataset, autocache, config_for_user" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# client_config = ClientConfig.load(\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "client_config = config_for_user(\"andrew@openmined.org\")\n", + "client_config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "client_config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "manifest = client_config.manifest" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "manifest" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "canada_dataset_url = \"https://github.com/OpenMined/datasets/blob/main/trade_flow/ca%20-%20feb%202021.csv?raw=True\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "df = pd.read_csv(autocache(canada_dataset_url))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# private data samples\n", + "ca_data = df[0:10]\n", + "ca_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Mock data samples\n", + "mock_ca_data = df[10:20]\n", + "mock_ca_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "mock_ca_data" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "# Save where ever you like" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# create a folder with the manifest object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "datasets_path = manifest.create_public_folder(\"datasets\")\n", + "datasets_path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "dataset_path = datasets_path / \"trade_data\"\n", + "dataset_path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.makedirs(dataset_path, exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "csv_file = dataset_path / \"trade_mock.csv\"\n", + "csv_file" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "mock_ca_data.to_csv(csv_file)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "dataset = TabularDataset.from_csv(csv_file, name=\"Trade Data\", has_private=True)\n", + "dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "dataset.readme_link" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "dataset.publish(manifest, overwrite=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "# Link Private Data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "private_path = os.path.abspath(\"trade_private.csv\")\n", + "ca_data.to_csv(private_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "SyftVault.link_private(csv_file, private_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "SyftVault.load_vault()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syftbox/notebooks/02-trade-code.ipynb b/packages/syftbox/notebooks/02-trade-code.ipynb new file mode 100644 index 00000000000..20bfb1a32fa --- /dev/null +++ b/packages/syftbox/notebooks/02-trade-code.ipynb @@ -0,0 +1,233 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# !uv pip install opendp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib import config_for_user, syftbox_code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "client_config = config_for_user(\"andrew@openmined.org\")\n", + "client_config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "client_config.use()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "manifest = client_config.manifest\n", + "manifest" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "datasets = client_config.get_datasets()\n", + "datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "trade_data = datasets[0]\n", + "trade_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "trade_data.file_path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "trade_data.import_string" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "# Decorator Style" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib.andrew.at.openmined.org.datasets import trade_data\n", + "\n", + "trade_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "!uv pip install opendp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "@syftbox_code\n", + "def myanalysis(trade_data):\n", + " import opendp.prelude as dp\n", + "\n", + " dp.enable_features(\"contrib\")\n", + "\n", + " aggregate = 0.0\n", + " base_lap = dp.m.make_laplace(\n", + " dp.atom_domain(T=float),\n", + " dp.absolute_distance(T=float),\n", + " scale=5.0,\n", + " )\n", + " noise = base_lap(aggregate)\n", + "\n", + " total = trade_data[\"Trade Value (US$)\"].sum()\n", + " result = (float(total / 1_000_000), float(noise))\n", + "\n", + " print(result)\n", + " if result[0] > 3:\n", + " print(\"Got mock\")\n", + " else:\n", + " print(\"Got private\")\n", + " return result\n", + "\n", + "\n", + "myanalysis(trade_data.load())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# TEMP bug where we cant use theirs_with_my_read because the parent write is ignored but allowing the perm file to set its own\n", + "# rules wont work either so we need to solve the permissioning of files themselves\n", + "path = myanalysis.to_flow(client_config=client_config, inputs={\"trade_data\": trade_data})\n", + "path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# also publish live" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "myanalysis.publish(manifest=client_config.manifest)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syftbox/notebooks/03-netflix-code.ipynb b/packages/syftbox/notebooks/03-netflix-code.ipynb new file mode 100644 index 00000000000..81ea0604e22 --- /dev/null +++ b/packages/syftbox/notebooks/03-netflix-code.ipynb @@ -0,0 +1,450 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib import config_for_user, syftbox_code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "client_config = config_for_user(\"madhava@openmined.org\")\n", + "client_config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "client_config.use()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "datasets = client_config.get_datasets()\n", + "datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "netflix = datasets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "netflix" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib.david.rolle.at.gmail.com.datasets import netflix_tmdb_imdb\n", + "\n", + "netflix_tmdb_imdb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "from syftbox.lib.me.at.madhavajay.com.datasets import netflix_tmdb_imdb\n", + "\n", + "netflix_tmdb_imdb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "def reset_folder():\n", + " import shutil\n", + "\n", + " try:\n", + " shutil.rmtree(\"./crypto/data\")\n", + " except Exception:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "# private\n", + "def create_and_get_he_context():\n", + " import os\n", + "\n", + " from Pyfhel import Pyfhel\n", + "\n", + " crypto_folder = \"./crypto\"\n", + " os.makedirs(crypto_folder, exist_ok=True)\n", + " HE = Pyfhel()\n", + " if os.path.exists(\"crypto/pyfhel.secret\"):\n", + " print(\"Loading HE keys\")\n", + " HE.load_context(f\"{crypto_folder}/pyfhel.context\")\n", + " HE.load_secret_key(f\"{crypto_folder}/pyfhel.secret\")\n", + " HE.load_public_key(f\"{crypto_folder}/pyfhel.pk\")\n", + " else:\n", + " print(\"Generating new HE keys\")\n", + " HE.contextGen(scheme=\"bfv\", n=2**15, t_bits=20)\n", + " HE.keyGen()\n", + " HE.save_secret_key(\"crypto/pyfhel.secret\")\n", + " HE.save_public_key(\"crypto/pyfhel.pk\")\n", + " HE.save_context(\"crypto/pyfhel.context\")\n", + "\n", + " return HE\n", + "\n", + "\n", + "HE = create_and_get_he_context()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# private\n", + "def create_he_data(HE):\n", + " import os\n", + "\n", + " import numpy as np\n", + "\n", + " crypto_folder = \"./crypto\"\n", + " stats_keys = [\n", + " \"total_time\",\n", + " \"total_views\",\n", + " \"total_unique_show_views\",\n", + " # \"year_fav_day\"\n", + " ]\n", + "\n", + " stat_folder = f\"./{crypto_folder}/data\"\n", + " part_path = f\"{stat_folder}/totals\"\n", + " slice_folder = f\"{stat_folder}/view_counts\"\n", + " os.makedirs(stat_folder, exist_ok=True)\n", + " os.makedirs(slice_folder, exist_ok=True)\n", + "\n", + " # create totals\n", + " stats_array = np.zeros(len(stats_keys)).astype(int)\n", + " value = HE.encryptInt(stats_array)\n", + " value.save(part_path)\n", + "\n", + " max_tv_id = 300_000 # just a guess\n", + " slice_size = 30_000 # max size of the above HE context\n", + "\n", + " # create imdb_id slices\n", + " counter = 0\n", + " for i in range(0, max_tv_id + 1, slice_size):\n", + " tv_count_array = np.zeros(slice_size).astype(int)\n", + " tv_count_slice = HE.encryptInt(tv_count_array)\n", + " part_path = f\"{slice_folder}/tmdb_id_{counter:02}\"\n", + " tv_count_slice.save(part_path)\n", + " counter += 1\n", + " print(\"HE Data Created\")\n", + "\n", + "\n", + "reset_folder()\n", + "create_he_data(HE)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# public federated code\n", + "@syftbox_code\n", + "def netflix_stats(datasite, df):\n", + " import datetime\n", + " import os\n", + "\n", + " import numpy as np\n", + " import pandas as pd\n", + " from Pyfhel import PyCtxt, Pyfhel\n", + "\n", + " crypto_folder = \"./crypto\"\n", + " completed_sentinel = f\"{crypto_folder}/{datasite}\"\n", + " if os.path.exists(completed_sentinel):\n", + " print(\"✅ Already generated 🔐 Homomorphically Encrypted Stats\")\n", + " return\n", + "\n", + " HE = Pyfhel()\n", + " HE.load_context(f\"{crypto_folder}/pyfhel.context\")\n", + " HE.load_secret_key(f\"{crypto_folder}/pyfhel.secret\")\n", + " HE.load_public_key(f\"{crypto_folder}/pyfhel.pk\")\n", + "\n", + " current_year = datetime.datetime.now().year\n", + " df[\"netflix_date\"] = pd.to_datetime(df[\"netflix_date\"])\n", + " year_df = df[df[\"netflix_date\"].dt.year == current_year]\n", + " year_tv_df = year_df[year_df[\"tmdb_media_type\"] == \"tv\"]\n", + " year_tv_df[\"day_of_week\"] = year_tv_df[\"netflix_date\"].dt.day_name()\n", + " total_time = year_tv_df[\"imdb_runtime_minutes\"].sum()\n", + " total_views = len(year_tv_df)\n", + " total_unique_show_views = year_tv_df[\"imdb_id\"].nunique()\n", + " # day_counts = year_tv_df[\"day_of_week\"].value_counts()\n", + " # favorite_day = list(day_counts.to_dict().keys())[0]\n", + " # year_tv_df[\"day_of_week\"] = year_tv_df[\"netflix_date\"].dt.weekday\n", + " # change to an int as a numpy array so we can add them\n", + "\n", + " value_counts = year_tv_df[\"tmdb_id\"].value_counts().astype(int)\n", + "\n", + " stats = {\n", + " \"total_time\": int(total_time),\n", + " \"total_views\": int(total_views),\n", + " \"total_unique_show_views\": int(total_unique_show_views),\n", + " # \"year_fav_day\": str(favorite_day),\n", + " }\n", + "\n", + " stat_folder = f\"./{crypto_folder}/data\"\n", + " part_path = f\"{stat_folder}/totals\"\n", + " slice_folder = f\"{stat_folder}/view_counts\"\n", + " exists_files_folders = [stat_folder, part_path, slice_folder]\n", + "\n", + " for path in exists_files_folders:\n", + " if not os.path.abspath(path):\n", + " raise Exception(f\"Requires {stat_folder} to finish syncing\")\n", + "\n", + " imdb_id_files = os.listdir(slice_folder)\n", + " if len(imdb_id_files) < 10:\n", + " raise Exception(f\"Requires {slice_folder} to finish syncing\")\n", + "\n", + " # write stats to encrypted array\n", + " stats_array = np.zeros(len(stats)).astype(int)\n", + " for i, value in enumerate(stats.values()):\n", + " stats_array[i] = int(value)\n", + "\n", + " value = PyCtxt(pyfhel=HE)\n", + " value.load(part_path)\n", + " value += stats_array\n", + " value.save(part_path)\n", + "\n", + " slice_size = 30_000 # max size of the above HE context\n", + "\n", + " # write imdb_id value counts to chunked arrays\n", + " for k, v in value_counts.items():\n", + " imdb_id = int(k)\n", + " index = imdb_id // slice_size\n", + " sub_index = imdb_id % slice_size\n", + " tv_count_slice = PyCtxt(pyfhel=HE)\n", + " part_path = f\"{slice_folder}/tmdb_id_{index:02}\"\n", + " empty_array = np.zeros(slice_size).astype(int)\n", + " empty_array[sub_index] += int(v)\n", + " tv_count_slice.load(part_path)\n", + " tv_count_slice += empty_array\n", + " tv_count_slice.save(part_path)\n", + "\n", + " with open(f\"{crypto_folder}/{datasite}\", \"w\") as f:\n", + " print(\"✅ Writing 🔐 Homomorphically Encrypted Stats\")\n", + " f.write(str(datetime.datetime.now()))\n", + "\n", + "\n", + "# netflix_stats(\"me@madhavajay.com\", netflix_tmdb_imdb.load())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "def decode_results(HE, stat_keys, path):\n", + " import numpy as np\n", + " from Pyfhel import PyCtxt\n", + "\n", + " crypto_folder = path + \"/crypto\"\n", + " stat_folder = f\"./{crypto_folder}/data\"\n", + " part_path = f\"{stat_folder}/totals\"\n", + " slice_folder = f\"{stat_folder}/view_counts\"\n", + "\n", + " # decode stats\n", + " value = PyCtxt(pyfhel=HE)\n", + " part_path = f\"{stat_folder}/totals\"\n", + " value.load(part_path)\n", + " value_array = HE.decryptInt(value)\n", + " stats = {}\n", + " for i, key in enumerate(stats_keys):\n", + " stats[key] = int(value_array[i])\n", + "\n", + " tmdb_id_value_counts = {}\n", + " max_tv_id = 300_000 # just a guess\n", + " slice_size = 30_000 # max size of the above HE context\n", + " counter = 0\n", + " for i in range(0, max_tv_id + 1, slice_size):\n", + " part_path = f\"{slice_folder}/tmdb_id_{counter:02}\"\n", + " tv_count_slice = PyCtxt(pyfhel=HE)\n", + " tv_count_slice.load(part_path)\n", + " tv_count_array = HE.decryptInt(tv_count_slice)\n", + "\n", + " non_zero_indices = np.nonzero(tv_count_array)[0].astype(int)\n", + " non_zero_values = tv_count_array[non_zero_indices].astype(int)\n", + " outer_part = counter * slice_size\n", + " non_zero_dict = {int(k + outer_part): int(v) for k, v in dict(zip(non_zero_indices, non_zero_values)).items()}\n", + " tmdb_id_value_counts.update(non_zero_dict)\n", + " counter += 1\n", + " stats[\"value_counts\"] = dict(sorted(tmdb_id_value_counts.items(), key=lambda item: item[1], reverse=True))\n", + " return stats\n", + "\n", + "\n", + "stats_keys = [\n", + " \"total_time\",\n", + " \"total_views\",\n", + " \"total_unique_show_views\",\n", + " # \"year_fav_day\"\n", + "]\n", + "all_results = decode_results(HE, stats_keys, \"./\")\n", + "all_results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "def top_k_summary(all_results, num_parties, top_k=5):\n", + " top_5_summary = {}\n", + " top_5_summary[\"avg_time\"] = round(all_results[\"total_time\"] / num_parties)\n", + " top_5_summary[\"avg_views\"] = round(all_results[\"total_views\"] / num_parties)\n", + " top_5_summary[\"avg_unique_show_views\"] = round(all_results[\"total_unique_show_views\"] / num_parties)\n", + " top_5_summary[\"top_5\"] = dict(\n", + " sorted(all_results[\"value_counts\"].items(), key=lambda item: item[1], reverse=True)[:top_k]\n", + " )\n", + " return top_5_summary\n", + "\n", + "\n", + "top_k_summary(all_results, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "dataset_list = [dataset for dataset in datasets]\n", + "type(dataset_list)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "dataset_list[1].syft_link.datasite" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "path = netflix_stats.to_flow(client_config=client_config, inputs={\"dfs\": dataset_list})\n", + "path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syftbox/notebooks/rsync.ipynb b/packages/syftbox/notebooks/rsync.ipynb new file mode 100644 index 00000000000..a92593b667c --- /dev/null +++ b/packages/syftbox/notebooks/rsync.ipynb @@ -0,0 +1,168 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "import hashlib\n", + "import sys\n", + "from pathlib import Path\n", + "\n", + "import py_fast_rsync\n", + "from faker import Faker\n", + "\n", + "fake = Faker()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "data_folder = Path(\"./data\")\n", + "data_folder.mkdir(exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "def create_txt(size_in_kb):\n", + " content = \"\"\n", + " while len(content.encode(\"utf-8\")) < size_in_kb * 1024:\n", + " content += fake.text() + \"\\n\"\n", + " return content\n", + "\n", + "\n", + "for size in [1, 10, 100, 1000]:\n", + " file_content = create_txt(size)\n", + " file_path = data_folder / f\"{size}KB.txt\"\n", + " file_path.write_text(file_content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "from py_fast_rsync import signature" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"./data/1000KB.txt\", \"rb\") as f:\n", + " data_server = f.read()\n", + "\n", + "data_local = data_server[:-100]\n", + "\n", + "# data_server has 100 more characters\n", + "len(data_server), len(data_local)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Scenario 1: client pulls changes from server\n", + "\n", + "# 1. client sends signature to server\n", + "# POST /rsync/get_diff {path: str, signature: blob}\n", + "sig_local = signature.calculate(data_local)\n", + "print(f\"sending {sys.getsizeof(sig_local)} bytes to server\")\n", + "\n", + "# 2. server calculates diff and hash for verification\n", + "diff = py_fast_rsync.diff(sig_local, data_server)\n", + "hash_server = hashlib.sha256(data_server).digest()\n", + "\n", + "# 3. server returns diff to client\n", + "print(f\"sending {sys.getsizeof(diff)} + {sys.getsizeof(hash_server)} bytes to client\")\n", + "\n", + "# 4. client applies diff\n", + "result = py_fast_rsync.apply(data_local, diff)\n", + "\n", + "# 5. client verifies the result\n", + "hash_result = hashlib.sha256(result).digest()\n", + "assert hash_result == hash_server\n", + "assert data_server == result\n", + "\n", + "# Calculate bytes saved\n", + "bytes_saved = sys.getsizeof(data_server) - sys.getsizeof(diff) - sys.getsizeof(hash_server)\n", + "print(\"bytes saved\", bytes_saved)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# Scenario 2: client pushes changes to server\n", + "\n", + "# 1. client requests signature from server\n", + "# GET /rsync/get_signature?path={path}\n", + "sig_server = signature.calculate(data_server)\n", + "print(f\"sending {sys.getsizeof(sig_server)} bytes to client\")\n", + "\n", + "# 2. client calculates diff and hash for verification\n", + "diff = py_fast_rsync.diff(sig_server, data_local)\n", + "hash_local = hashlib.sha256(data_local).digest()\n", + "\n", + "# 3. client sends diff to server\n", + "# POST /rsync/apply_diff\n", + "print(f\"sending {sys.getsizeof(diff)} + {sys.getsizeof(hash_local)} bytes to server\")\n", + "\n", + "# 4. server applies diff\n", + "result = py_fast_rsync.apply(data_server, diff)\n", + "\n", + "# 5. server verifies the result\n", + "hash_result = hashlib.sha256(result).digest()\n", + "assert hash_result == hash_local\n", + "assert data_local == result\n", + "\n", + "# Calculate bytes saved\n", + "bytes_saved = sys.getsizeof(data_server) - sys.getsizeof(diff) - sys.getsizeof(hash_local)\n", + "print(\"bytes saved\", bytes_saved)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/syftbox/notebooks/trade_private.csv b/packages/syftbox/notebooks/trade_private.csv new file mode 100644 index 00000000000..8a95dea154d --- /dev/null +++ b/packages/syftbox/notebooks/trade_private.csv @@ -0,0 +1,11 @@ +,Classification,Year,Period,Period Desc.,Aggregate Level,Is Leaf Code,Trade Flow Code,Trade Flow,Reporter Code,Reporter,Reporter ISO,Partner Code,Partner,Partner ISO,Commodity Code,Commodity,Qty Unit Code,Qty Unit,Qty,Netweight (kg),Trade Value (US$),Flag +0,HS,2021,202102,February 2021,4,0,1,Imports,124,Canada,,490,"Other Asia, nes",,6117,"Clothing accessories; made up, knitted or crocheted, knitted or crocheted parts of garments or of clothing accessories",0,,,,9285,0 +1,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,818,Egypt,,18,Cocoa and cocoa preparations,0,,,0.0,116604,0 +2,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,826,United Kingdom,,18,Cocoa and cocoa preparations,0,,,0.0,1495175,0 +3,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,834,United Rep. of Tanzania,,18,Cocoa and cocoa preparations,0,,,0.0,2248,0 +4,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,702,Singapore,,18,Cocoa and cocoa preparations,0,,,0.0,47840,0 +5,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,704,Viet Nam,,18,Cocoa and cocoa preparations,0,,,0.0,3526,0 +6,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,710,South Africa,,18,Cocoa and cocoa preparations,0,,,0.0,5462,0 +7,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,724,Spain,,18,Cocoa and cocoa preparations,0,,,0.0,311425,0 +8,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,752,Sweden,,18,Cocoa and cocoa preparations,0,,,0.0,11786,0 +9,HS,2021,202102,February 2021,2,0,1,Imports,124,Canada,,862,Venezuela,,18,Cocoa and cocoa preparations,0,,,0.0,33715,0 diff --git a/packages/syftbox/projects/.gitignore b/packages/syftbox/projects/.gitignore new file mode 100644 index 00000000000..d669fecda25 --- /dev/null +++ b/packages/syftbox/projects/.gitignore @@ -0,0 +1 @@ +netflix_stats/inputs/* \ No newline at end of file diff --git a/packages/syftbox/projects/myanalysis/inputs/trade_data/trade_mock.csv b/packages/syftbox/projects/myanalysis/inputs/trade_data/trade_mock.csv new file mode 120000 index 00000000000..5f41d2aa26b --- /dev/null +++ b/packages/syftbox/projects/myanalysis/inputs/trade_data/trade_mock.csv @@ -0,0 +1 @@ +/Users/madhavajay/dev/syft/users/davo/me@madhavajay.com/public/datasets/trade_data/trade_mock.csv \ No newline at end of file diff --git a/packages/syftbox/projects/myanalysis/inputs/trade_data/trade_mock.csv.private b/packages/syftbox/projects/myanalysis/inputs/trade_data/trade_mock.csv.private new file mode 120000 index 00000000000..15b09aefdb4 --- /dev/null +++ b/packages/syftbox/projects/myanalysis/inputs/trade_data/trade_mock.csv.private @@ -0,0 +1 @@ +/Users/madhavajay/dev/syft/users/davo/me@madhavajay.com/public/datasets/trade_data/trade_mock.csv.private \ No newline at end of file diff --git a/packages/syftbox/projects/myanalysis/main.py b/packages/syftbox/projects/myanalysis/main.py new file mode 100644 index 00000000000..349e4d7aab2 --- /dev/null +++ b/packages/syftbox/projects/myanalysis/main.py @@ -0,0 +1,84 @@ +# /// script +# dependencies = [ +# "opendp==0.11.1", +# "syftbox==0.1.0", +# "pandas==2.2.3", +# ] +# +# [tool.uv.sources] +# syftbox = { path = "/Users/madhavajay/dev/syft", editable = true } +# /// + +__name__ = "myanalysis" +__author__ = "david.rolle@gmail.com" + + +def input_reader(private: bool = False): + import pandas as pd + + from syftbox.lib import sy_path + + inputs = {} + inputs["trade_data"] = pd.read_csv(sy_path("./inputs/trade_data/trade_mock.csv", resolve_private=private)) + return inputs + + +def output_writer(result, private: bool = False): + import json + + output_path = "./output/result/result.json" + if not private: + output_path = output_path.replace(".json", ".mock.json") + with open(output_path, "w") as f: + f.write(json.dumps(result)) + + +# START YOUR CODE + + +def myanalysis(trade_data): + import opendp.prelude as dp + + dp.enable_features("contrib") + + aggregate = 0.0 + base_lap = dp.m.make_laplace( + dp.atom_domain(T=float), + dp.absolute_distance(T=float), + scale=5.0, + ) + noise = base_lap(aggregate) + + total = trade_data["Trade Value (US$)"].sum() + result = (float(total / 1_000_000), float(noise)) + + print(result) + if result[0] > 3: + print("Got mock") + else: + print("Got private") + return result + + +# END YOUR CODE + + +def main(): + import argparse + + parser = argparse.ArgumentParser(description="Process some input.") + parser.add_argument("--private", action="store_true", help="Run in private mode") + args = parser.parse_args() + + print(f"Running: {__name__} from {__author__}") + inputs = input_reader(private=args.private) + print("> Reading Inputs", inputs) + + output = myanalysis(**inputs) + + print("> Writing Outputs", output) + output_writer(output, private=args.private) + print(f"> ✅ Running {__name__} Complete!") + + +main() diff --git a/packages/syftbox/projects/myanalysis/manifest.json b/packages/syftbox/projects/myanalysis/manifest.json new file mode 100644 index 00000000000..1cb603ed937 --- /dev/null +++ b/packages/syftbox/projects/myanalysis/manifest.json @@ -0,0 +1,7 @@ +{ + "author": "david.rolle@gmail.com", + "execution_datasite": "me@madhavajay.com", + "result_datasite": "david.rolle@gmail.com", + "write_back_approved_path": "results/2_approved", + "write_back_denied_path": "results/3_denied" +} diff --git a/packages/syftbox/projects/myanalysis/output/result/_.syftperm b/packages/syftbox/projects/myanalysis/output/result/_.syftperm new file mode 100644 index 00000000000..8c6778d41bf --- /dev/null +++ b/packages/syftbox/projects/myanalysis/output/result/_.syftperm @@ -0,0 +1 @@ +{"admin": ["me@madhavajay.com"], "read": ["me@madhavajay.com", "david.rolle@gmail.com"], "write": ["me@madhavajay.com", "david.rolle@gmail.com"], "filepath": "/Users/madhavajay/dev/syft/users/davo/staging/myanalysis/output/result/_.syftperm", "terminal": false} \ No newline at end of file diff --git a/packages/syftbox/projects/myanalysis/run.sh b/packages/syftbox/projects/myanalysis/run.sh new file mode 100755 index 00000000000..b86c44c3542 --- /dev/null +++ b/packages/syftbox/projects/myanalysis/run.sh @@ -0,0 +1,2 @@ +#!/bin/sh +uv run main.py $( [ "$1" = "--private" ] && echo '--private' ) diff --git a/packages/syftbox/projects/netflix_stats/Pyfhel-3.4.2-cp311-cp311-macosx_13_0_arm64.whl b/packages/syftbox/projects/netflix_stats/Pyfhel-3.4.2-cp311-cp311-macosx_13_0_arm64.whl new file mode 100644 index 00000000000..fc389190165 Binary files /dev/null and b/packages/syftbox/projects/netflix_stats/Pyfhel-3.4.2-cp311-cp311-macosx_13_0_arm64.whl differ diff --git a/packages/syftbox/projects/netflix_stats/main.py b/packages/syftbox/projects/netflix_stats/main.py new file mode 100644 index 00000000000..7e7225a7ef9 --- /dev/null +++ b/packages/syftbox/projects/netflix_stats/main.py @@ -0,0 +1,224 @@ +## amke stats +# stat_folder = "./crypto/data" +# os.makedirs(stat_folder, exist_ok=True) +# stats_array = np.zeros(len(stats.keys())).astype(int) +# value = HE.encryptInt(stats_array) +# part_path = f"{stat_folder}/totals" +# value.save(part_path) + +# # encode +# max_tv_id = 300_000 # random guess looking at their website +# tv_count_array = np.zeros(max_tv_id) + +# tmdb_ids = np.array(list(value_counts.keys())) +# counts = np.array(value_counts.values) +# tv_count_array[tmdb_ids] += counts + +# # decode +# non_zero_indices = np.nonzero(tv_count_array)[0].astype(int) +# non_zero_values = tv_count_array[non_zero_indices].astype(int) +# non_zero_dict = {int(k): int(v) for k, v in dict(zip(non_zero_indices, non_zero_values)).items()} +# print(non_zero_dict) +# return non_zero_dict +# HE = Pyfhel() +# HE.contextGen(scheme='bfv', n=2**15, t_bits=20) +# HE.keyGen() +# import os +# os.makedirs("crypto", exist_ok=True) +# HE.save_secret_key("crypto/pyfhel.secret") +# HE.save_public_key("crypto/pyfhel.pk") +# HE.save_context("crypto/pyfhel.context") + +# brew install cmake zlib llvm libomp +# export CC=/opt/homebrew/opt/llvm/bin/clang +# uv pip install Pyfhel + +# LDFLAGS="-L/opt/homebrew/opt/llvm/lib/c++ -L/opt/homebrew/opt/llvm/lib -lunwind" +# If you need to have llvm first in your PATH, run: +# echo 'export PATH="/opt/homebrew/opt/llvm/bin:$PATH"' >> ~/.zshrc + +# For compilers to find llvm you may need to set: +# export LDFLAGS="-L/opt/homebrew/opt/llvm/lib" +# export CPPFLAGS="-I/opt/homebrew/opt/llvm/include" + +# ln -s /Users/madhavajay/dev/syft/users/madhava/me@madhavajay.com/public/datasets/netflix_tmdb_imdb/NetflixViewingHistory_TMDB_IMDB.csv ./dfs_0.csv +# /// script +# dependencies = [ +# "pandas==2.2.3", +# "syftbox==0.1.0", +# "phe==1.5.0", +# "pyfhel==3.4.2", +# ] +# +# [tool.uv.sources] +# syftbox = { path = "/Users/madhavajay/dev/syft", editable = true } +# /// +from typing_extensions import Optional + +__name__ = "netflix_stats" +__author__ = "madhava@openmined.org" + + +def input_reader(private: bool = False, datasite: Optional[str] = None): + import os + + import pandas as pd + + from syftbox.lib import extract_leftmost_email, sy_path + + df = None + files = os.listdir("./inputs/dfs") + for file in files: + if file.startswith("dfs_"): + full_path = f"./inputs/dfs/{file}" + target_path = os.readlink(full_path) + link_datasite = extract_leftmost_email(target_path) + if link_datasite == datasite: + df = pd.read_csv(sy_path(full_path, resolve_private=private)) + break + inputs = {"datasite": datasite, "df": df} + return inputs + + +def output_writer(result, private: bool = False): + import json + + output_path = "./output/result/result.json" + if not private: + output_path = output_path.replace(".json", ".mock.json") + with open(output_path, "w") as f: + f.write(json.dumps(result)) + + +# START YOUR CODE + + +def netflix_stats(datasite, df): + import os + + crypto_folder = "./crypto" + completed_sentinel = f"{crypto_folder}/{datasite}" + if os.path.exists(completed_sentinel): + print("✅ Already generated 🔐 Homomorphically Encrypted Stats") + return + + import datetime + import os + + import numpy as np + import pandas as pd + from Pyfhel import Pyfhel + from Pyfhel.PyCtxt import PyCtxt + + HE = Pyfhel() + HE.load_context(f"{crypto_folder}/pyfhel.context") + HE.load_secret_key(f"{crypto_folder}/pyfhel.secret") + HE.load_public_key(f"{crypto_folder}/pyfhel.pk") + + current_year = datetime.datetime.now().year + df["netflix_date"] = pd.to_datetime(df["netflix_date"]) + year_df = df[df["netflix_date"].dt.year == current_year] + year_tv_df = year_df[year_df["tmdb_media_type"] == "tv"] + year_tv_df["day_of_week"] = year_tv_df["netflix_date"].dt.day_name() + total_time = year_tv_df["imdb_runtime_minutes"].sum() + total_views = len(year_tv_df) + total_unique_show_views = year_tv_df["imdb_id"].nunique() + # day_counts = year_tv_df["day_of_week"].value_counts() + # favorite_day = list(day_counts.to_dict().keys())[0] + # year_tv_df["day_of_week"] = year_tv_df["netflix_date"].dt.weekday + # change to an int as a numpy array so we can add them + + value_counts = year_tv_df["tmdb_id"].value_counts().astype(int) + # top_5_value_counts = {k: int(v) for k, v in value_counts.sort_values(ascending=False)[0:5].items()} + + stats = { + "total_time": int(total_time), + "total_views": int(total_views), + "total_unique_show_views": int(total_unique_show_views), + # "year_fav_day": str(favorite_day), + } + + stat_folder = f"./{crypto_folder}/data" + part_path = f"{stat_folder}/totals" + slice_folder = f"{stat_folder}/view_counts" + exists_files_folders = [stat_folder, part_path, slice_folder] + os.makedirs(stat_folder, exist_ok=True) + os.makedirs(slice_folder, exist_ok=True) + + for path in exists_files_folders: + if not os.path.abspath(path): + raise Exception(f"Requires {stat_folder} to finish syncing") + + imdb_id_files = os.listdir(slice_folder) + if len(imdb_id_files) < 10: + raise Exception(f"Requires {slice_folder} to finish syncing") + + # create totals + stats_array = np.zeros(len(stats.keys())).astype(int) + value = HE.encryptInt(stats_array) + value.save(part_path) + + # write stats to encrypted array + stats_array = np.zeros(len(stats)).astype(int) + for i, value in enumerate(stats.values()): + stats_array[i] = int(value) + + value = PyCtxt(pyfhel=HE) + + value.load(part_path) + value += stats_array + value.save(part_path) + + max_tv_id = 300_000 # just a guess + slice_size = 30_000 # max size of the above HE context + + # create imdb_id slices + counter = 0 + for i in range(0, max_tv_id + 1, slice_size): + tv_count_array = np.zeros(slice_size).astype(int) + tv_count_slice = HE.encryptInt(tv_count_array) + part_path = f"{slice_folder}/tmdb_id_{counter:02}" + tv_count_slice.save(part_path) + counter += 1 + + # write imdb_id value counts to chunked arrays + for k, v in value_counts.items(): + imdb_id = int(k) + index = imdb_id // slice_size + sub_index = imdb_id % slice_size + tv_count_slice = PyCtxt(pyfhel=HE) + part_path = f"{slice_folder}/tmdb_id_{index:02}" + empty_array = np.zeros(slice_size).astype(int) + empty_array[sub_index] += int(v) + tv_count_slice.load(part_path) + tv_count_slice += empty_array + tv_count_slice.save(part_path) + + with open(f"{crypto_folder}/{datasite}", "w") as f: + print("✅ Writing 🔐 Homomorphically Encrypted Stats") + f.write(str(datetime.datetime.now())) + + +# END YOUR CODE + + +def main(): + import argparse + + parser = argparse.ArgumentParser(description="Process some input.") + parser.add_argument("--private", action="store_true", help="Run in private mode") + parser.add_argument("--datasite", help="Datasite running operation") + args = parser.parse_args() + + print(f"Running: {__name__} from {__author__}") + inputs = input_reader(private=args.private, datasite=args.datasite) + print("> Reading Inputs", inputs) + + output = netflix_stats(**inputs) + + print("> Writing Outputs", output) + output_writer(output, private=args.private) + print(f"> ✅ Running {__name__} Complete!") + + +main() diff --git a/packages/syftbox/projects/netflix_stats/manifest.json b/packages/syftbox/projects/netflix_stats/manifest.json new file mode 100644 index 00000000000..d1c0d7cf8ec --- /dev/null +++ b/packages/syftbox/projects/netflix_stats/manifest.json @@ -0,0 +1,7 @@ +{ + "author": "madhava@openmined.org", + "execution_datasite": "me@madhavajay.com", + "result_datasite": "madhava@openmined.org", + "write_back_approved_path": "results/2_approved", + "write_back_denied_path": "results/3_denied" +} diff --git a/packages/syftbox/projects/netflix_stats/output/result/_.syftperm b/packages/syftbox/projects/netflix_stats/output/result/_.syftperm new file mode 100644 index 00000000000..24f1e2fe8bb --- /dev/null +++ b/packages/syftbox/projects/netflix_stats/output/result/_.syftperm @@ -0,0 +1 @@ +{"admin": ["madhava@openmined.org", "me@madhavajay.com"], "read": ["madhava@openmined.org", "me@madhavajay.com"], "write": ["madhava@openmined.org", "me@madhavajay.com"], "filepath": "/Users/madhavajay/dev/syft/users/madhava/staging/netflix_stats/output/result/_.syftperm", "terminal": false} \ No newline at end of file diff --git a/packages/syftbox/projects/netflix_stats/run.sh b/packages/syftbox/projects/netflix_stats/run.sh new file mode 100755 index 00000000000..35adf81ecfb --- /dev/null +++ b/packages/syftbox/projects/netflix_stats/run.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# Detect if macOS or something else (e.g., Linux) +if [[ "$OSTYPE" == "darwin"* ]]; then + uv run --python 3.11 -- uv pip install ./Pyfhel-3.4.2-cp311-cp311-macosx_13_0_arm64.whl +else + uv run --python 3.11 -- uv pip install pyfhel==3.4.2 +fi + +uv run --python 3.11 -- uv run main.py "$@" diff --git a/packages/syftbox/pyproject.toml b/packages/syftbox/pyproject.toml new file mode 100644 index 00000000000..c46d45395c1 --- /dev/null +++ b/packages/syftbox/pyproject.toml @@ -0,0 +1,98 @@ +[project] +name = "syftbox" +version = "0.3.5" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.9" +dependencies = [ + "fastapi==0.115.7", + "uvicorn==0.34.0", + "gunicorn==23.0.0", + "jinja2==3.1.5", + "typing-extensions==4.12.2", + "pydantic-settings==2.7.1", + "httpx==0.28.1", + "pyyaml==6.0.2", + "psutil==6.1.1", + "loguru==0.7.3", + "py-fast-rsync==0.1.0", + "pathspec==0.12.1", + "python-multipart==0.0.20", + "rich==13.9.4", + "croniter==6.0.0", + "typer==0.15.1", + "pid==3.0.4", + "pydantic[email]==2.10.6", + "pyjwt==2.10.1", + "wcmatch==10.0", + "curl-cffi>=0.7.4", + "opentelemetry-instrumentation-fastapi==0.50b0", + "opentelemetry-instrumentation-sqlite3==0.50b0", + "opentelemetry-exporter-otlp-proto-grpc==1.29.0", + "distro==1.9.0", + "textual>=1.0.0", + "msgpack>=1.1.0", + "tqdm>=4.67.1", + "aiofiles>=24.1.0", + "requests>=2.32.3", +] + +# Local dependencies for development +# Add using `uv add --group ` +[dependency-groups] + +# Published optional dependencies, or "extras". Will be referenced in the built wheel +# add using `uv add --optional ` +[project.optional-dependencies] + +[project.scripts] +syftbox = "syftbox.main:main" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.sdist] +include = ["syftbox", "default_apps", "./uv.lock", "./README.md"] +exclude = ["syftbox/assets/icon/*"] + +[tool.uv] +# add using `uv add --dev ` +# this will be completely ignored in the built wheel +dev-dependencies = [ + "bump2version>=1.0.1", + "faker>=30.3.5", + "ipykernel>=6.29.5", + "locust>=2.32.0", + "pre-commit>=4.0.1", + "pytest-asyncio>=0.24.0", + "pytest-cov>=5.0.0", + "pytest-httpx>=0.35.0", + "pytest-timeout>=2.3.1", + "pytest-xdist[psutil]>=3.6.1", + "pytest>=8.3.3", + "textual-dev>=1.7.0", +] + +[tool.pytest.ini_options] +pythonpath = ["."] +asyncio_default_fixture_loop_scope = "function" + +[tool.ruff] +line-length = 120 +exclude = [".venv", "dist", ".clients", ".e2e", ".server"] + +[tool.ruff.lint] +extend-select = ["I"] + +[tool.ruff.lint.per-file-ignores] +"**/__init__.py" = ["F401"] + +[tool.coverage.report] +skip_empty = true + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.package-data] +'syftbox' = ['syftbox/server2client_version.json'] diff --git a/packages/syftbox/renovate.json b/packages/syftbox/renovate.json new file mode 100644 index 00000000000..35b4eae91cc --- /dev/null +++ b/packages/syftbox/renovate.json @@ -0,0 +1,12 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended", + ":maintainLockFilesWeekly", + ":prHourlyLimitNone" + ], + "prConcurrentLimit": 0, + "lockFileMaintenance": { + "enabled": true + } +} diff --git a/packages/syftbox/scripts/upgrade_version_matrix.py b/packages/syftbox/scripts/upgrade_version_matrix.py new file mode 100644 index 00000000000..17c9ec7ef14 --- /dev/null +++ b/packages/syftbox/scripts/upgrade_version_matrix.py @@ -0,0 +1,59 @@ +import argparse +import json + +from packaging.version import Version + +parser = argparse.ArgumentParser("upgrade_version_matrix") +parser.add_argument("upgrade_type", choices=["major", "minor", "patch"]) +parser.add_argument("--breaking_changes", action="store_true") + +args = parser.parse_args() +print(args.upgrade_type) +print(args.breaking_changes) + +with open("../syftbox/server2client_version.json") as json_file: + version_matrix = json.load(json_file) + +versions = list(version_matrix.keys()) +versions.sort(key=Version) +last_version = versions[-1] +version_numbers = last_version.split(".") + +if args.upgrade_type == "patch": + if args.breaking_changes: + raise Exception( + "Patch upgrades imply no breaking changes. If you have breaking changes please consider a minor version upgrade" + ) + version_numbers[2] = str(int(version_numbers[2]) + 1) + new_version = ".".join(version_numbers) + # new_version = last_version + version_matrix[new_version] = version_matrix[last_version] +elif args.upgrade_type == "minor": + version_numbers[1] = str(int(version_numbers[1]) + 1) + version_numbers[2] = "0" + new_version = ".".join(version_numbers) + if args.breaking_changes: + version_matrix[new_version] = [new_version, ""] + for version in versions: + version_range = version_matrix[version] + if version_range[1] == "": + version_range[1] = new_version + version_matrix[version] = version_range + else: + version_matrix[new_version] = version_matrix[last_version] + +elif args.upgrade_type == "major": + raise NotImplementedError + +with open("../syftbox/server2client_version.json", "w") as json_file: + # json.dump(version_matrix, json_file, indent=4) + json_file.write("{\n") + json_file.write( + ",\n".join( + [ + f""" "{key}": ["{version_range[0]}", "{version_range[1]}"]""" + for key, version_range in version_matrix.items() + ] + ) + ) + json_file.write("\n}\n") diff --git a/packages/syftbox/syftbox/__init__.py b/packages/syftbox/syftbox/__init__.py new file mode 100644 index 00000000000..612801daff8 --- /dev/null +++ b/packages/syftbox/syftbox/__init__.py @@ -0,0 +1,8 @@ +# ____ __ _ ____ +# / ___| _ _ / _| |_| __ ) _____ __ +# \___ \| | | | |_| __| _ \ / _ \ \/ / +# ___) | |_| | _| |_| |_) | (_) > < +# |____/ \__, |_| \__|____/ \___/_/\_\ +# |___/ + +__version__ = "0.3.5" diff --git a/packages/syftbox/syftbox/app/__init__.py b/packages/syftbox/syftbox/app/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/app/cli.py b/packages/syftbox/syftbox/app/cli.py new file mode 100644 index 00000000000..83c8a361755 --- /dev/null +++ b/packages/syftbox/syftbox/app/cli.py @@ -0,0 +1,173 @@ +import sys +from pathlib import Path +from typing import List + +from loguru import logger +from rich import print as rprint +from typer import Argument, Exit, Option, Typer +from typing_extensions import Annotated + +from syftbox import __version__ +from syftbox.app.manager import install_app, list_app, uninstall_app +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.core import SyftBoxRunner +from syftbox.client.plugins.apps import find_and_run_script +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.constants import DEFAULT_CONFIG_PATH +from syftbox.lib.exceptions import ClientConfigException +from syftbox.lib.workspace import SyftWorkspace + +app = Typer( + name="SyftBox Apps", + help="Manage SyftBox apps", + no_args_is_help=True, + pretty_exceptions_enable=False, + context_settings={"help_option_names": ["-h", "--help"]}, +) + +CONFIG_OPTS = Option("-c", "--config", "--config_path", help="Path to the SyftBox config") +CALLED_BY_OPTS = Option("--called-by", help="The context from which the command is called", hidden=True) +REPO_ARGS = Argument(..., show_default=False, help="SyftBox App git repo URL") +BRANCH_OPTS = Option("-b", "--branch", help="git branch name") +UNINSTALL_ARGS = Argument(..., show_default=False, help="Name of the SyftBox App to uninstall") +APP_ENV_SCRIPT = """ +if [ ! -d .venv ]; then + uv venv .venv +fi +. .venv/bin/activate +""" + + +@app.command() +def list(config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH) -> None: + """List all installed Syftbox apps""" + workspace = get_workspace(config_path) + result = list_app(workspace) + + if len(result.apps) == 0: + rprint(f"No apps installed in '{result.apps_dir}'") + sys.exit(0) + + rprint(f"Apps installed in '{result.apps_dir}'") + for app in result.apps: + rprint(f"- [bold cyan]{app.name}[/bold cyan]") + + +@app.command() +def install( + repository: Annotated[str, REPO_ARGS], + branch: Annotated[str, BRANCH_OPTS] = "main", + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, + called_by: Annotated[str, CALLED_BY_OPTS] = "user", +) -> None: + """Install a new Syftbox app""" + context = get_syftbox_context(config_path) + result = install_app(context.workspace, repository, branch) + if result.error: + rprint(f"[bold red]Error:[/bold red] {result.error}") + raise Exit(1) + + try: + context.client.log_analytics_event( + event_name="app_install", + app_name=result.app_name, + called_by=called_by, + ) + except Exception as e: + logger.debug(f"Failed to log analytics event: {e}") + + rprint(f"Installed app [bold]'{result.app_name}'[/bold]\nLocation: '{result.app_path}'") + + +@app.command() +def uninstall( + app_name: Annotated[str, UNINSTALL_ARGS], + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, +) -> None: + """Uninstall a Syftbox app""" + workspace = get_workspace(config_path) + result = uninstall_app(app_name, workspace) + if not result: + rprint(f"[bold red]Error:[/bold red] '{app_name}' app not found") + raise Exit(1) + + rprint(f"Uninstalled app [bold]'{app_name}'[/bold] from '{result}'") + + +@app.command() +def run( + app_name: str, + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, +) -> None: + """Run a Syftbox app""" + workspace = get_workspace(config_path) + + extra_args: List[str] = [] + try: + rprint(f"Running [bold]'{app_name}'[/bold]\nLocation: '{workspace.apps}'\n") + result = find_and_run_script(workspace.apps / app_name, extra_args, config_path) + rprint("[bold yellow]stdout:[/bold yellow]") + print(result.stdout) + rprint("[bold yellow]stderr:[/bold yellow]") + print(result.stderr) + except Exception as e: + rprint("[bold red]Error:[/bold red]", e) + raise Exit(1) + + +@app.command(rich_help_panel="General Options") +def env(with_syftbox: bool = False) -> None: + """Setup virtual env for app. With option to install syftbox matching client version""" + + script = APP_ENV_SCRIPT + if with_syftbox: + script += f"\nuv pip install -U syftbox=={__version__}" + print(script) + + +# @app.command() +# def update( +# app_name: Annotated[str, Argument(help="Name of the app to uninstall")], +# config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, +# ): +# """Update a Syftbox app""" +# pass + + +def get_syftbox_context(config_path: Path) -> SyftBoxContextInterface: + try: + conf = SyftClientConfig.load(config_path) + context = SyftBoxRunner(conf).context + return context + except ClientConfigException: + msg = ( + f"[bold red]Error:[/bold red] Couldn't load config at: [yellow]'{config_path}'[/yellow]\n" + "Please ensure that:\n" + " - The configuration file exists at the specified path.\n" + " - You've run the SyftBox atleast once.\n" + f" - For custom configs, provide the proper path using [cyan]--config[/cyan] flag" + ) + rprint(msg) + raise Exit(1) + except Exception as e: + rprint(f"[bold red]Error:[/bold red] {e}") + raise Exit(1) + + +def get_workspace(config_path: Path) -> SyftWorkspace: + try: + conf = SyftClientConfig.load(config_path) + return SyftWorkspace(conf.data_dir) + except ClientConfigException: + msg = ( + f"[bold red]Error:[/bold red] Couldn't load config at: [yellow]'{config_path}'[/yellow]\n" + "Please ensure that:\n" + " - The configuration file exists at the specified path.\n" + " - You've run the SyftBox atleast once.\n" + f" - For custom configs, provide the proper path using [cyan]--config[/cyan] flag" + ) + rprint(msg) + raise Exit(1) + except Exception as e: + rprint(f"[bold red]Error:[/bold red] {e}") + raise Exit(1) diff --git a/packages/syftbox/syftbox/app/install.py b/packages/syftbox/syftbox/app/install.py new file mode 100644 index 00000000000..12fbea3580f --- /dev/null +++ b/packages/syftbox/syftbox/app/install.py @@ -0,0 +1,773 @@ +import json +import os +import platform +import re +import shutil +import subprocess +from dataclasses import dataclass +from pathlib import Path +from tempfile import mkdtemp +from types import SimpleNamespace + +from typing_extensions import Any, Optional + +from syftbox.lib.types import PathLike + + +def is_git_installed() -> bool: + """ + Checks if Git is installed on the system. + + Returns: + bool: `True` if Git is installed, `False` otherwise. + + Functionality: + - Runs the `git --version` command to check if Git is installed. + - If the command runs successfully, returns `True`. + - If the command fails (e.g., Git is not installed), returns `False`. + + Example: + ```python + if is_git_installed(): + print("Git is installed on this system.") + else: + print("Git is not installed. Please install Git to proceed.") + ``` + This will print a message indicating whether Git is installed or not. + """ + try: + subprocess.run( + ["git", "--version"], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + return True + except subprocess.CalledProcessError: + return False + + +def sanitize_git_path(path: str) -> str: + """ + Validates and sanitizes a Git repository path, ensuring it matches the required format. + + Args: + path (str): The Git repository path to validate. + + Returns: + str: The sanitized Git repository path if it matches the valid pattern. + + Raises: + ValueError: If the provided path does not match the expected format for a Git repository. + + Functionality: + - Uses a regular expression pattern to ensure that the given path follows the format `owner/repository`. + - If the path matches the pattern, returns it as a valid Git path. + - If the path does not match the pattern, raises a `ValueError` with a descriptive message. + + Example: + Suppose you have a GitHub path like `OpenMined/logged_in` and want to validate it: + ```python + try: + sanitized_path = sanitize_git_path("OpenMined/logged_in") + except ValueError as e: + print(e) + ``` + If the path is valid, `sanitized_path` will contain the validated GitHub path. If it is not valid, the error message + "Invalid Git repository path format. (eg: OpenMined/logged_in)" will be printed. + """ + + if path.startswith("http://"): + path = path.replace("http://", "") + + if path.startswith("https://"): + path = path.replace("https://", "") + + if path.startswith("github.com/"): + path = path.replace("github.com/", "") + + # Define a regex pattern for a valid GitHub path + pattern = r"^[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+$" + + # Check if the path matches the pattern + if re.match(pattern, path): + return path + else: + raise ValueError("Invalid Git repository path format. (eg: OpenMined/logged_in)") + + +def delete_folder_if_exists(folder_path: PathLike) -> None: + """ + Deletes a folder if it exists at the specified path. + + Args: + folder_path (PathLike): The path to the folder to be deleted. + + Returns: + None: This function does not return any value. + + Functionality: + - Checks if the folder exists at the given path. + - If the folder exists and is a directory, deletes it and all of its contents using `shutil.rmtree()`. + + Example: + Suppose you want to delete a folder located at `/tmp/old_clone` if it exists: + ```python + delete_folder_if_exists("/tmp/old_clone") + ``` + This will delete the folder and all of its contents if it exists. + """ + if os.path.exists(folder_path) and os.path.isdir(folder_path): + shutil.rmtree(folder_path) + + +def is_repo_accessible(repo_url: str) -> bool: + """ + Checks if the specified Git repository is accessible. + + Args: + repo_url (str): The URL of the Git repository to check. + + Returns: + bool: `True` if the repository is accessible, `False` otherwise. + + Functionality: + - Uses the `git ls-remote` command to check if the Git repository is accessible. + - If the command succeeds, returns `True`. + - If the command fails or times out, returns `False`. + + Example: + Suppose you want to check if a repository located at `https://github.com/example/repo.git` is accessible. + You can call the function like this: + ```python + is_accessible = is_repo_accessible("https://github.com/example/repo.git") + ``` + This will return `True` if the repository is accessible, or `False` if it is not. + """ + try: + env = os.environ.copy() + env["GIT_TERMINAL_PROMPT"] = "0" + subprocess.run( + ["git", "ls-remote", repo_url], + check=True, + env=env, + capture_output=True, + ) + return True + except subprocess.CalledProcessError: + return False + + +def clone_repository(sanitized_git_path: str, branch: str) -> PathLike: + """ + Clones a Git repository from GitHub to a temporary directory. + + Args: + sanitized_git_path (str): The Git repository path in the format `owner/repository`. + + Returns: + str: The path to the cloned repository. + + Raises: + Exception: If Git is not installed on the system. + ValueError: If the provided repository path is not accessible. + CalledProcessError: If there is an error during the cloning process. + + Functionality: + - Checks if Git is installed on the system by calling `is_git_installed()`. + - Forms the GitHub repository URL from the provided `sanitized_git_path`. + - Checks if the repository is accessible by calling `is_repo_accessible()`. + - Clones the repository to a temporary directory (`/tmp`). + - Deletes any existing folder in `/tmp` with the same name before cloning. + - If cloning is successful, returns the path to the cloned repository. + - If any error occurs during cloning, raises the corresponding exception. + + Example: + Suppose you want to clone a repository located at `OpenMined/PySyft` to a temporary directory. + You can call the function like this: + ```python + try: + clone_path = clone_repository("OpenMined/PySyft") + print(f"Repository cloned to: {clone_path}") + except Exception as e: + print(e) + ``` + This will clone the repository to `/tmp/PySyft` if successful, or print an error message if any issues occur. + """ + if not is_git_installed(): + raise Exception( + "git cli isn't installed. Please, follow the instructions" + + " to install git according to your OS. (eg. brew install git)" + ) + repo_url = f"https://github.com/{sanitized_git_path}.git" + if not is_repo_accessible(repo_url): + raise ValueError(f"Cannot access repository {repo_url}") + + # Clone repository in /tmp + tmp_path = mkdtemp(prefix="syftbox_app_") + temp_clone_path = Path(tmp_path, sanitized_git_path.split("/")[-1]) + + # Delete if there's already an existent repository folder in /tmp path. + delete_folder_if_exists(temp_clone_path) + + try: + subprocess.run( + [ + "git", + "clone", + "-b", + branch, + "--single-branch", + repo_url, + temp_clone_path, + ], + check=True, + text=True, + capture_output=True, + ) + return temp_clone_path + except subprocess.CalledProcessError as e: + raise RuntimeError(e.stderr) + + +def dict_to_namespace(data: Any) -> Any: + """ + Converts a dictionary (or nested dictionary) to a SimpleNamespace object. + + Args: + data (dict or list): The data to convert. Can be a dictionary, list of dictionaries, or other types. + + Returns: + SimpleNamespace or list: A SimpleNamespace object representing the dictionary data, + or a list of SimpleNamespace objects if the input is a list. + If the input is not a dictionary or list, returns the input as-is. + + Functionality: + - Recursively converts dictionaries to SimpleNamespace objects. + - If the data is a list, each item in the list is recursively converted. + - If the data is neither a dictionary nor a list, returns the data unchanged. + + Example: + Suppose you have a dictionary with nested data: + ```python + data = { + "user": { + "name": "Alice", + "age": 30, + "address": { + "city": "Wonderland", + "zipcode": "12345" + } + }, + "active": True + } + namespace_data = dict_to_namespace(data) + print(namespace_data.user.name) # Output: Alice + print(namespace_data.user.address.city) # Output: Wonderland + ``` + This will allow you to access dictionary values using dot notation like attributes. + """ + if isinstance(data, dict): + return SimpleNamespace(**{key: dict_to_namespace(value) for key, value in data.items()}) + elif isinstance(data, list): + return [dict_to_namespace(item) for item in data] + else: + return data + + +def load_config(path: PathLike) -> SimpleNamespace: + """ + Loads a JSON configuration file and converts it to a SimpleNamespace object. + + Args: + path (str): The file path to the JSON configuration file. + + Returns: + SimpleNamespace: A SimpleNamespace object representing the configuration data. + + Raises: + ValueError: If the file does not exist, is not in JSON format, or does not contain a dictionary. + + Functionality: + - Checks if the provided file path exists. If not, raises a `ValueError` indicating the file is not found. + - Opens and reads the JSON file. If the file cannot be decoded or does not contain a dictionary, raises a `ValueError`. + - Converts the loaded dictionary to a SimpleNamespace object for easy attribute-based access. + + Example: + Suppose you have a JSON configuration file at `/path/to/config.json` with the following content: + ```json + { + "version": "0.1.0", + "app": { + "version": "1.0" + "env": { + "TEST_ENV": "testing", + }, + }, + } + ``` + You can load the configuration and access its fields using dot notation: + ```python + try: + config = load_config("/path/to/config.json") + print(config.app.version) # Output: MyApp + print(config.app.env.TEST_ENV) # Output: True + except ValueError as e: + print(e) + ``` + This will load the configuration and allow access to its values using attribute access. + """ + if not os.path.exists(path): + raise ValueError(f"config not found - {path}") + try: + error_msg = "File isn't in JSON format." + with open(path, "r") as f: + data = json.load(f) + if not isinstance(data, dict): + raise ValueError(error_msg) + except json.JSONDecodeError: + raise ValueError(error_msg) + return dict_to_namespace(data) + + +def create_symbolic_link(apps_dir: PathLike, sanitized_path: PathLike) -> str: + """ + Creates a symbolic link from the application directory in the Syftbox directory to the user's sync folder. + + Args: + apps_dir (Path): The path to the `apps` directory in the Syftbox configuration folder. + app_path (str): The actual path of the application directory. + sanitized_path (str): The sanitized Git repository path in the format `owner/repository`. + + Returns: + None: This function does not return any value. + + Functionality: + - Constructs the symbolic link path within the user's sync folder (`apps` folder). + - If a symlink already exists at the target location, deletes it to avoid conflicts. + - Creates a new symbolic link pointing from the sync folder to the application directory. + + Example: + Suppose you want to create a symbolic link for an application located at `/home/user/.syftbox/apps/PySyft`: + ```python + create_symbolic_link( + apps_dir=SyftWorkspace.apps, # ex "/home/user/SyftBox/apis", + sanitized_path="OpenMined/PySyft" + ) + ``` + This will create a symbolic link at `/apps/PySyft` pointing to the application directory. + """ + # TODO: Create a Symlink function + # - Handles if path doesn't exists. + target_symlink_path = f"{apps_dir}/{str(sanitized_path).split('/')[-1]}" + + # Create the symlink + if os.path.exists(target_symlink_path) and os.path.islink(target_symlink_path): + os.unlink(target_symlink_path) + + if not os.path.exists(target_symlink_path): + os.symlink(sanitized_path, target_symlink_path) + else: + raise Exception(f"Path exists and isn't a symlink: {target_symlink_path}") + return target_symlink_path + + +def move_repository_to_syftbox(apps_dir: Path, tmp_clone_path: PathLike, sanitized_path: PathLike) -> str: + """ + Moves a cloned Git repository to the Syftbox directory. + + Args: + tmp_clone_path (str): The file path to the temporarily cloned Git repository. + sanitized_path (str): The sanitized Git repository path in the format `owner/repository`. + + Returns: + str: The final destination path of the moved repository. + + Functionality: + - Constructs the destination path within the Syftbox configuration directory (`apps` folder). + - Deletes any existing folder at the destination path to avoid conflicts. + - Moves the repository from the temporary clone path to the destination path. + - Returns the new path of the moved repository. + + Example: + Suppose you have cloned a repository to a temporary path `/tmp/syftbox` and want to move it to the Syftbox directory: + ```python + output_path = move_repository_to_syftbox("/tmp/PySyft", "OpenMined/PySyft") + print(output_path) # Output: /path/to/config/apps/PySyft + ``` + This will move the cloned repository to the Syftbox `apps` directory and return the final destination path. + """ + output_path = f"{apps_dir}/{str(sanitized_path).split('/')[-1]}" + delete_folder_if_exists(output_path) + shutil.move(tmp_clone_path, output_path) + return output_path + + +def run_pre_install(app_config: SimpleNamespace, app_path: str) -> None: + """ + Runs pre-installation commands specified in the application configuration. + + Args: + app_config (SimpleNamespace): The configuration object for the application, which is expected to have an `app` + attribute with a `pre_install` attribute containing a list of commands to run. + app_path (string): The file path to the app folder. + + Returns: + None: This function does not return any value. + + Functionality: + - Checks if the `pre_install` attribute exists and contains commands in the application configuration. + - If the `pre_install` attribute is empty or does not exist, the function returns without executing any command. + - If there are pre-installation commands, runs them using `subprocess.run()`. + + Example: + Suppose you have an application configuration that specifies a pre-installation command to install dependencies: + ```python + app_config = SimpleNamespace( + app=SimpleNamespace(pre_install=["echo", "Installing dependencies..."]) + ) + run_pre_install(app_config) + ``` + This will run the specified pre-installation command using `subprocess.run()`. + """ + if len(getattr(app_config.app, "pre_install", [])) == 0: + return + + try: + subprocess.run( + app_config.app.pre_install, + cwd=app_path, + capture_output=True, + check=True, + text=True, + ) + except subprocess.CalledProcessError as e: + raise RuntimeError(e.stderr) + + +def run_post_install(app_config: SimpleNamespace, app_path: str) -> None: + """ + Runs post-installation commands specified in the application configuration. + + Args: + app_config (SimpleNamespace): The configuration object for the application, which is expected to have an `app` + attribute with a `post_install` attribute containing a list of commands to run. + + Returns: + None: This function does not return any value. + + Functionality: + - Checks if the `post_install` attribute exists and contains commands in the application configuration. + - If the `post_install` attribute is empty or does not exist, the function returns without executing any command. + - If there are post-installation commands, runs them using `subprocess.run()`. + + Example: + Suppose you have an application configuration that specifies a post-installation command to perform cleanup: + ```python + app_config = SimpleNamespace( + app=SimpleNamespace(post_install=["echo", "Performing post-installation cleanup..."]) + ) + run_post_install(app_config) + ``` + This will run the specified post-installation command using `subprocess.run()`. + """ + if len(getattr(app_config.app, "post_install", [])) == 0: + return + + try: + subprocess.run( + app_config.app.post_install, + cwd=app_path, + capture_output=True, + check=True, + text=True, + ) + except subprocess.CalledProcessError as e: + raise RuntimeError(e.stderr) + + +def check_os_compatibility(app_config: SimpleNamespace) -> None: + """ + Checks whether the current operating system is compatible with the application based on the configuration. + + Args: + app_config: The configuration object for the application, which is expected to have an `app` attribute + with a `platforms` attribute containing a list of supported operating systems. + + Returns: + None: This function does not return any value. + + Raises: + OSError: If the current operating system is not supported by the application. + + Functionality: + - Uses the `platform.system()` function to determine the current operating system. + - Checks the application's configuration (`app_config`) for a list of supported operating systems. + - If no platforms are defined in the configuration, the function simply returns without doing anything. + - If the current operating system is not in the list of supported platforms, raises an `OSError`. + + Example: + Suppose you have an application configuration that specifies supported platforms as `['Windows', 'Linux']`. + The function will determine the current operating system and raise an `OSError` if it is not supported: + ```python + try: + check_os_compatibility(app_config) + except OSError as e: + print(e) + ``` + If the current OS is not in the supported platforms list, the message "Your OS isn't supported by this app." will be printed. + """ + os_name = platform.system().lower() + supported_os = getattr(app_config.app, "platforms", []) + + # If there's no platforms field in config.json, just ignore it. + if len(supported_os) == 0: + return + + is_compatible = False + for operational_system in supported_os: + if operational_system.lower() == os_name: + is_compatible = True + + if not is_compatible: + raise OSError("Your OS isn't supported by this app.") + + +def get_current_commit(app_path: str) -> str: + """ + Retrieves the current commit hash for a Git repository located at the specified path. + + Args: + app_path (str): The file path to the Git repository. + + Returns: + str: The current commit hash of the repository if the command is successful. + If an error occurs, returns an error message describing the failure. + + Functionality: + - Uses the `git rev-parse HEAD` command to get the current commit hash. + - If the command succeeds, returns the commit hash as a string. + - If the command fails (e.g., if the provided path is not a valid Git repository), + returns an error message detailing what went wrong. + + Example: + Suppose you have a Git repository at `/path/to/repo` and want to retrieve its current commit hash. + You can call the function like this: + ```python + commit_hash = get_current_commit("/path/to/repo") + ``` + This will return the commit hash if the repository exists and the command runs successfully, + or an error message if there is an issue with the command. + """ + try: + # Navigate to the repository path and get the current commit hash + commit_hash = ( + subprocess.check_output(["git", "-C", app_path, "rev-parse", "HEAD"], stderr=subprocess.STDOUT) + .strip() + .decode("utf-8") + ) + return commit_hash + except subprocess.CalledProcessError: + return "local" + + +def update_app_config_file(app_path: str, sanitized_git_path: str, app_config: SimpleNamespace) -> None: + """ + Updates the `app.json` configuration file with the current commit and version information of an application. + + Args: + app_path (str): The file path of the application. + sanitized_git_path (str): The sanitized path representing the Git repository. + app_config: The configuration object for the application, which is expected to have an `app` attribute + with a `version` attribute, if available. + + Returns: + None: This function modifies the `app.json` configuration file in place and returns nothing. + + Functionality: + - Normalizes the provided application path. + - Determines the configuration directory by navigating two levels up from the application path. + - Checks if an `app.json` file exists in the configuration directory. + - If it exists, loads its contents into a dictionary. + - If it does not exist, creates an empty dictionary for new configuration entries. + - Retrieves the current commit information of the application using the `get_current_commit` function. + - If the application version is available from the `app_config` object, includes it in the configuration. + - Updates the `app.json` configuration file with the new commit and version information under the key + specified by `sanitized_git_path`. + - Writes the updated configuration back to the `app.json` file with indentation for readability. + + Example: + Suppose you have an application located at `/path/to/app` and you want to update the `app.json` file + with the latest commit and version. You can call the function like this: + ```python + update_app_config_file("/path/to/app", "my_sanitized_git_path", app_config) + ``` + This will update or create entries in `app.json` for the given Git path, storing commit and version details. + """ + normalized_app_path = os.path.normpath(app_path) + + conf_path = os.path.dirname(os.path.dirname(normalized_app_path)) + + app_json_path = conf_path + "/app.json" + app_json_config = {} + + if os.path.exists(app_json_path): + # Read from it. + with open(app_json_path, "r") as app_json_file: + app_json_config = json.load(app_json_file) + + app_version = None + if getattr(app_config.app, "version", None) is not None: + app_version = app_config.app.version + + current_commit = get_current_commit(normalized_app_path) + if current_commit == "local": + app_version = "dev" + + app_json_config[sanitized_git_path] = { + "commit": current_commit, + "version": app_version, + "path": app_path, + } + + with open(app_json_path, "w") as json_file: + json.dump(app_json_config, json_file, indent=4) + + +def check_app_config(tmp_clone_path: PathLike) -> Optional[SimpleNamespace]: + app_config_path = Path(tmp_clone_path) / "config.json" + if os.path.exists(app_config_path): + app_config = load_config(app_config_path) + check_os_compatibility(app_config) + return app_config + return None + + +@dataclass +class InstallResult: + app_name: str + app_path: Path + error: Optional[Exception] + details: Optional[str] + + +def install(apps_dir: Path, repository: str, branch: str) -> InstallResult: + """ + Installs an application by cloning the repository, checking compatibility, and running installation scripts. + + Args: + apps_dir (Path): Path where app will be installed. + + Returns: + None: If the installation is successful. + Tuple[str, Exception]: If an error occurs during any installation step, returns a tuple with the step description and the exception raised. + + Functionality: + - Parses command-line arguments to get the Git repository to install. + - Performs a series of steps to install the application, including: + 1. Sanitizing the Git repository path. + 2. Cloning the repository to a temporary directory. + 3. Loading the application's configuration (`config.json`). + 4. Checking platform compatibility. + 5. Moving the repository to the Syftbox directory. + 6. Creating a symbolic link on the user's desktop. + 7. Running pre-installation commands. + 8. Running post-installation commands. + 9. Updating the `apps.json` file to include the installed application. + - If any step fails, returns the step description and the exception raised. + + Example: + Suppose you have a client configuration and want to install an application from a repository: + ```python + result = install(Path("~/.syftbox/apps"), "OpenMined/PySyft", "main") + if result.error: + print(f"Error installing {result.app_name}: {result.error}") + print(f"Failed at step: {result.details}") + else: + print(f"Successfully installed {result.app_name} at {result.app_path}") + ``` + This will install the application, and if an error occurs, it will indicate the step where the failure happened. + """ + step = "" + try: + # NOTE: + # Sanitize git repository path + # Handles: bad format repository path. + # Returns: Sanitized repository path. + step = "checking app name" + + sanitized_path = repository + if not os.path.exists(repository): + sanitized_path = sanitize_git_path(repository) + + # NOTE: + # Clones the app repository + # Handles: Git cli tool not installed. + # Handles: Repository path doesn't exits / isn't public. + # Handles: If /tmp/apps/ already exists (replaces it) + # Returns: Path where the repository folder was cloned temporarily. + step = "pulling App" + tmp_clone_path = clone_repository(sanitized_path, branch) + + # NOTE: + # Load config.json + # Handles: config.json doesn't exist in the pulled repository + # Handles: config.json version is different from syftbox config version. + # Returns: Loaded app config as SimpleNamespace instance. + else: + tmp_clone_path = os.path.abspath(repository) + + # make optional + app_config: Optional[SimpleNamespace] = None + try: + check_app_config(tmp_clone_path) + except Exception: + # this function is run in cli context + # dont loguru here, either rprint or bubble up the error + app_config = None + + # NOTE: + # Moves the repository from /tmp to ~/.syftbox/apps/ + # Handles: If ~/.syftbox/apps/ already exists (replaces it) + if not os.path.exists(repository): + app_config_path = move_repository_to_syftbox( + apps_dir, + tmp_clone_path=tmp_clone_path, + sanitized_path=sanitized_path, + ) + else: + # Creates a Symbolic Link ( ~/Desktop/Syftbox/app/ -> ~/.syftbox/apps/) + # Handles: If ~/.syftbox/apps/ already exists (replaces it) + step = "creating Symbolic Link" + app_config_path = create_symbolic_link( + apps_dir=apps_dir, + sanitized_path=tmp_clone_path, + ) + + # NOTE: + # Executes config.json pre-install command list + # Handles: Exceptions from pre-install command execution + if app_config: + step = "running pre-install commands" + run_pre_install(app_config, app_config_path) + + # NOTE: + # Executes config.json post-install command list + # Handles: Exceptions from post-install command execution + if app_config: + step = "running post-install commands" + run_post_install(app_config, app_config_path) + + # NOTE: + # Updates the apps.json file + # Handles: If apps.json file doesn't exist yet. + # Handles: If apps.json already have the repository_name app listed. + # Handles: If apps.json exists but doesn't have the repository_name app listed. + if app_config: + step = "updating apps.json config" + update_app_config_file(app_config_path, sanitized_path, app_config) + + app_dir = Path(app_config_path) + return InstallResult(app_name=app_dir.name, app_path=app_dir, error=None, details=None) + except Exception as e: + return InstallResult(app_name="", app_path=Path(""), error=e, details=step) diff --git a/packages/syftbox/syftbox/app/manager.py b/packages/syftbox/syftbox/app/manager.py new file mode 100644 index 00000000000..1c3092bde31 --- /dev/null +++ b/packages/syftbox/syftbox/app/manager.py @@ -0,0 +1,41 @@ +import shutil +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional + +from syftbox.app.install import InstallResult, install +from syftbox.lib.workspace import SyftWorkspace + + +@dataclass +class InstalledApps: + apps_dir: Path + apps: List[Path] + + +def install_app(workspace: SyftWorkspace, repository: str, branch: str = "main") -> InstallResult: + return install(workspace.apps, repository, branch) + + +def list_app(workspace: SyftWorkspace) -> InstalledApps: + apps = [] + if workspace.apps.exists() and workspace.apps.is_dir(): + apps = sorted([app for app in workspace.apps.iterdir() if app.is_dir()]) + return InstalledApps(workspace.apps, apps) + + +def uninstall_app(app_name: str, workspace: SyftWorkspace) -> Optional[Path]: + app_dir = Path(workspace.apps, app_name) + # first check for symlink + if app_dir.exists() and app_dir.is_symlink(): + app_dir.unlink() + return app_dir + elif app_dir.exists() and app_dir.is_dir(): + shutil.rmtree(app_dir) + return app_dir + else: + return None + + +def update_app(ws: SyftWorkspace) -> None: + pass diff --git a/packages/syftbox/syftbox/assets/icon.zip b/packages/syftbox/syftbox/assets/icon.zip new file mode 100644 index 00000000000..316b77fa5ae Binary files /dev/null and b/packages/syftbox/syftbox/assets/icon.zip differ diff --git a/packages/syftbox/syftbox/assets/templates/sync_dashboard.jinja2 b/packages/syftbox/syftbox/assets/templates/sync_dashboard.jinja2 new file mode 100644 index 00000000000..1825cb06f2e --- /dev/null +++ b/packages/syftbox/syftbox/assets/templates/sync_dashboard.jinja2 @@ -0,0 +1,331 @@ + + + + Sync Dashboard + + + +
    + This dashboard is for development purposes only, and is not intended for production use. +
    +

    Sync Dashboard

    + +
    + +
    + +
    + Total results: 0 +
    +
    + +
    + + + + + + + + + + + + +
    PathDateStatusMessageAction
    +
    +
    +
    +
    + + + + \ No newline at end of file diff --git a/packages/syftbox/syftbox/assets/tui.tcss b/packages/syftbox/syftbox/assets/tui.tcss new file mode 100644 index 00000000000..3bbee628104 --- /dev/null +++ b/packages/syftbox/syftbox/assets/tui.tcss @@ -0,0 +1,40 @@ +.main { + width: 4fr; + padding-right: 1; +} + +.info { + height: 1fr; +} + +.syftbox-logs { + height: 1fr; + margin-top: 1; + background: $surface; +} + +.status { + width: 1fr; + height: 100%; + background: $surface; + margin-right: 1; +} + +.dim { + opacity: 0.6; +} + +.sidebar { + margin-right: 1; + width: 1fr; +} + +.api-logs { + width: 4fr; + height: 100%; + background: $surface; +} + +.padding-top { + padding-top: 1; +} diff --git a/packages/syftbox/syftbox/client/__init__.py b/packages/syftbox/syftbox/client/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/client/api.py b/packages/syftbox/syftbox/client/api.py new file mode 100644 index 00000000000..c08a3f4dd8a --- /dev/null +++ b/packages/syftbox/syftbox/client/api.py @@ -0,0 +1,68 @@ +import contextlib + +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from starlette.responses import Response +from typing_extensions import AsyncGenerator + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.routers import app_router, datasite_router, index_router, sync_router + + +class NoCacheMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: + response = await call_next(request) + response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" + response.headers["Pragma"] = "no-cache" + response.headers["Expires"] = "0" + return response + + +@contextlib.asynccontextmanager +async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: + yield + + +def create_api(context: SyftBoxContextInterface) -> FastAPI: + app = FastAPI(lifespan=lifespan) + + allow_origins = [ + "http://localhost", + "http://localhost:5001", + "http://localhost:8080", + "http://localhost:8081", + "http://localhost:8083", + "https://syftbox.openmined.org", + ] + port = context.config.client_url.port + if port: + # Allow origins for client localhost client API + allow_origins.extend( + [ + f"http://localhost:{port}", + f"http://127.0.0.1:{port}", + f"http://localhost:{port}/", + f"http://127.0.0.1:{port}/", + ] + ) + + app.add_middleware( + CORSMiddleware, + allow_origins=allow_origins, + allow_credentials=True, + allow_methods=["*"], # Allow all HTTP methods + allow_headers=["*"], # Allow all headers + ) + + app.state.context = context + + # Include routers + app.include_router(index_router.router, tags=["index"]) + app.include_router(datasite_router.router, prefix="/datasites", tags=["datasites"]) + app.include_router(app_router.router, prefix="/apps", tags=["apps"]) + app.include_router(sync_router.router, prefix="/sync", tags=["sync"]) + + app.add_middleware(NoCacheMiddleware) + + return app diff --git a/packages/syftbox/syftbox/client/auth.py b/packages/syftbox/syftbox/client/auth.py new file mode 100644 index 00000000000..9b0c458ccc5 --- /dev/null +++ b/packages/syftbox/syftbox/client/auth.py @@ -0,0 +1,97 @@ +from typing import Optional + +import httpx +import typer +from rich import print as rprint +from rich.prompt import Prompt + +from syftbox import __version__ +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.http import HEADER_SYFTBOX_VERSION + + +def has_valid_access_token(conf: SyftClientConfig, auth_client: httpx.Client) -> bool: + """Returns True if conf has a valid access token that matches the email in the config.""" + if not conf.access_token: + return False + response = auth_client.post( + "/auth/whoami", headers={"Authorization": f"Bearer {conf.access_token}", HEADER_SYFTBOX_VERSION: __version__} + ) + if response.status_code == 401: + rprint("[red]Invalid access token, re-authenticating.[/red]") + return False + elif response.status_code >= 400: + rprint(f"[red]An unexpected error occurred: {response.text}, re-authenticating.[/red]") + return False + + authed_email = response.json().get("email", None) + is_valid = authed_email == conf.email + if not is_valid: + rprint( + f"[red]Invalid access token for {conf.email}, this token is for {authed_email}. re-authenticating.[/red]" + ) + return is_valid + + +def request_email_token(auth_client: httpx.Client, conf: SyftClientConfig) -> Optional[str]: + """ + if auth is enabled, send an email token to the user's email address. + if auth is disabled, the token will be returned directly in the response instead. + + Args: + auth_client (httpx.Client): httpx client + conf (SyftClientConfig): client config + + Returns: + Optional[str]: email token if auth is disabled, None if auth is enabled + """ + response = auth_client.post( + "/auth/request_email_token", json={"email": conf.email}, headers={HEADER_SYFTBOX_VERSION: __version__} + ) + response.raise_for_status() + return response.json().get("email_token", None) + + +def get_access_token( + conf: SyftClientConfig, + auth_client: httpx.Client, + email_token: Optional[str] = None, +) -> str: + """ + Validate the email token and return the access token. + + Args: + auth_client (httpx.Client): httpx client + email_token (Optional[str]): Optional email token. If not provided, + the user will be prompted to input it. + + Returns: + str: access token + """ + if not email_token: + email_token = Prompt.ask( + f"[yellow]Please enter the token sent to {conf.email}. Also check your spam folder[/yellow]" + ) + + response = auth_client.post( + "/auth/validate_email_token", + headers={"Authorization": f"Bearer {email_token}", HEADER_SYFTBOX_VERSION: __version__}, + params={"email": conf.email}, + ) + + if response.status_code == 200: + return response.json()["access_token"] + elif response.status_code == 401: + rprint("[red]Invalid token, please copy the full token from your email[/red]") + return get_access_token(conf, auth_client) + else: + rprint(f"[red]An unexpected error occurred: {response.text}[/red]") + raise typer.Exit(1) + + +def authenticate_user(conf: SyftClientConfig, login_client: httpx.Client) -> Optional[str]: + if has_valid_access_token(conf, login_client): + return conf.access_token + + email_token = request_email_token(login_client, conf) + return get_access_token(conf, login_client, email_token) diff --git a/packages/syftbox/syftbox/client/base.py b/packages/syftbox/syftbox/client/base.py new file mode 100644 index 00000000000..ca12b83d1ad --- /dev/null +++ b/packages/syftbox/syftbox/client/base.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Optional + +import httpx +from packaging import version +from rich import print as rprint +from typing_extensions import Protocol, Self + +from syftbox import __version__ +from syftbox.client.exceptions import SyftAuthenticationError, SyftPermissionError, SyftServerError, SyftServerTooOld +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.http import HEADER_SYFTBOX_USER, HEADER_SYFTBOX_VERSION, SYFTBOX_HEADERS +from syftbox.lib.version_utils import get_range_for_version +from syftbox.lib.workspace import SyftWorkspace + + +class PluginManagerInterface(Protocol): + """All initialized plugins.""" + + if TYPE_CHECKING: + from syftbox.client.plugins.apps import AppRunner + from syftbox.client.plugins.sync.manager import SyncManager + + @property + def sync_manager(self) -> SyncManager: + """SyncManager instance for managing synchronization tasks.""" + ... + + @property + def app_runner(self) -> AppRunner: + """AppRunner instance for managing application execution.""" + ... + + +class SyftBoxContextInterface(Protocol): + """ + Protocol defining the essential attributes required by SyftClient plugins/components. + + This interface serves two main purposes: + 1. Prevents circular dependencies by providing a minimal interface that + plugins/components can import and type hint against, instead of importing + the full SyftClient class. + 2. Enables dependency injection by defining a contract that any context + or mock implementation can fulfill for testing or modular configuration. + + Attributes: + config: Configuration settings for the Syft client + workspace: Workspace instance managing data and computation + server_client: HTTP client for server communication + """ + + if TYPE_CHECKING: + from syftbox.client.server_client import SyftBoxClient + + config: SyftClientConfig + """Configuration settings for the Syft client.""" + + workspace: SyftWorkspace + """Paths to different dirs in Syft""" + + plugins: Optional[PluginManagerInterface] + """All initialized plugins.""" + + client: "SyftBoxClient" + """Client for communicating with the SyftBox server.""" + + @property + def email(self) -> str: + """Email address of the current user.""" + ... + + @property + def my_datasite(self) -> Path: + """Path to the datasite directory for the current user.""" + ... # pragma: no cover + + @property + def all_datasites(self) -> list[str]: + """Path to the datasite directory for the current user.""" + ... # pragma: no cover + + +class ClientBase: + def __init__(self, conn: httpx.Client): + self.conn = conn + + def raise_for_status(self, response: httpx.Response) -> None: + endpoint = response.request.url.path + if response.status_code == 401: + raise SyftAuthenticationError() + elif response.status_code == 403: + raise SyftPermissionError(f"No permission to access this resource: {response.text}") + elif response.status_code != 200: + raise SyftServerError(f"[{endpoint}] Server returned {response.status_code}: {response.text}") + server_version = response.headers.get(HEADER_SYFTBOX_VERSION) + + version_range = get_range_for_version(server_version) + if isinstance(version_range, str): + rprint(f"[bold yellow]{version_range}[/bold yellow]") + else: + lower_bound_version = version_range[0] + upper_bound_version = version_range[1] + + if len(upper_bound_version) > 0 and version.parse(upper_bound_version) < version.parse(__version__): + raise SyftServerTooOld( + f"Server version is {server_version} and can only work with clients between \ + {lower_bound_version} and {upper_bound_version}. Your client has version {__version__}." + ) + + @staticmethod + def _make_headers(config: SyftClientConfig) -> dict: + headers = { + **SYFTBOX_HEADERS, + HEADER_SYFTBOX_USER: config.email, + "email": config.email, # legacy + } + if config.access_token is not None: + headers["Authorization"] = f"Bearer {config.access_token}" + + return headers + + @classmethod + def from_config( + cls, + config: SyftClientConfig, + transport: Optional[httpx.BaseTransport] = None, + ) -> Self: + conn = httpx.Client( + base_url=str(config.server_url), + follow_redirects=True, + headers=cls._make_headers(config), + timeout=config.client_timeout, + transport=transport, + ) + return cls(conn) diff --git a/packages/syftbox/syftbox/client/benchmark/__init__.py b/packages/syftbox/syftbox/client/benchmark/__init__.py new file mode 100644 index 00000000000..b88841a9f89 --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/__init__.py @@ -0,0 +1,80 @@ +import json +from dataclasses import asdict, dataclass +from pathlib import Path +from statistics import mean, quantiles, stdev +from typing import TYPE_CHECKING, Any, Optional + +from typing_extensions import Protocol, TypeVar + +from syftbox.lib.client_config import SyftClientConfig + + +@dataclass +class BenchmarkResult: + """Base class for all metrics with common fields.""" + + num_runs: int + + def dict_report(self) -> dict: + return asdict(self) + + def readable_report(self) -> str: + raise NotImplementedError + + +class Benchmark(Protocol): + """ + Protocol for classes that collect performance metrics. + """ + + client_config: SyftClientConfig + + def __init__(self, config: SyftClientConfig): + self.client_config = config + + def collect_metrics(self, num_runs: int) -> BenchmarkResult: + """Calculate performance metrics.""" + ... + + +class BenchmarkReporter(Protocol): + """Protocol defining the interface for benchmark result reporters.""" + + def generate(self, metrics: dict[str, BenchmarkResult]) -> Any: + """Generate the benchmark report.""" + ... + + +@dataclass +class Stats: + """Common statistics structure.""" + + min: float + max: float + mean: float + stddev: float + p50: float + p95: float + p99: float + + @classmethod + def from_values(cls, values: list) -> "Stats": + assert len(values) > 1, "At least 2 values are required to calculate" + values = sorted(values) + + q = quantiles(values, n=100) + return Stats( + min=min(values), + max=max(values), + mean=mean(values), + stddev=stdev(values), + p50=q[49], # median + p95=q[94], # 95th percentile + p99=q[98], # 99th percentile + ) + + def as_list(self) -> list: + return [self.mean, self.stddev, self.min, self.p50, self.p95, self.p99, self.max] + + def __str__(self) -> str: + return f"{self.mean:.3f} ± {self.stddev:.3f} [min: {self.min:.3f}, p50: {self.p50:.3f}, p95: {self.p95:.3f}, p99: {self.p99:.3f}, max: {self.max:.3f}]" diff --git a/packages/syftbox/syftbox/client/benchmark/netstats_http.py b/packages/syftbox/syftbox/client/benchmark/netstats_http.py new file mode 100644 index 00000000000..286b1a6a35c --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/netstats_http.py @@ -0,0 +1,150 @@ +import time +from dataclasses import dataclass +from io import BytesIO + +from curl_cffi import Curl, CurlInfo, CurlOpt +from typing_extensions import Optional + +from syftbox.client.benchmark import Stats + + +@dataclass +class HTTPTimingStats: + """Container for HTTP timing statistics.""" + + dns: Stats + """Time taken to resolve the host name""" + tcp_connect: Stats + """Time taken to establish a TCP connection""" + ssl_handshake: Stats + """Time taken to perform the SSL handshake""" + send: Stats + """Time taken to send the request""" + server_wait: Stats + """Time spent waiting for the server to send the first byte of the response""" + content: Stats + """Time taken to download the response""" + total: Stats + """Total time taken for the request""" + redirect: Stats + """Time taken for all redirection steps before the final request""" + success_rate: float + """Percentage of successful requests""" + + +@dataclass +class HTTPTimings: + dns: float + """Time taken to resolve the host name""" + tcp_connect: float + """Time taken to establish a TCP connection""" + ssl_handshake: float + """Time taken to perform the SSL handshake""" + send: float + """Time taken to send the request""" + server_wait: float + """Time spent waiting for the server to send the first byte of the response""" + content: float + """Time taken to download the response""" + total: float + """Total time taken for the request""" + redirect: float + """Time taken for all redirection steps before the final request""" + + +class HTTPPerfStats: + """Measure HTTP connection performance using curl_cffi""" + + def __init__(self, url: str): + self.url = url + self.connect_timeout: int = 30 + self.total_timeout: int = 60 + self.max_redirects: int = 5 + + def get_stats(self, n_runs: int) -> HTTPTimingStats: + """Aggregate performance stats from multiple runs""" + + measurements: list[HTTPTimings] = [] + for _ in range(n_runs): + if stats := self.__make_request(self.url): + measurements.append(stats) + time.sleep(0.5) # Small delay between requests + + if not measurements: + raise RuntimeError("No successful measurements") + + # Calculate aggregated stats + def _stats_for_measurement(metric: str) -> Stats: + values = [getattr(m, metric) for m in measurements] + return Stats.from_values(values) + + return HTTPTimingStats( + dns=_stats_for_measurement("dns"), + tcp_connect=_stats_for_measurement("tcp_connect"), + ssl_handshake=_stats_for_measurement("ssl_handshake"), + send=_stats_for_measurement("send"), + server_wait=_stats_for_measurement("server_wait"), + content=_stats_for_measurement("content"), + total=_stats_for_measurement("total"), + redirect=_stats_for_measurement("redirect"), + success_rate=len(measurements) / n_runs * 100, + ) + + def __make_request(self, url: str) -> Optional[HTTPTimings]: + """Get HTTP performance stats for a single request""" + + buff = BytesIO() + curl = Curl() + + opts = { + CurlOpt.URL: url.encode(), + CurlOpt.WRITEDATA: buff, + CurlOpt.FOLLOWLOCATION: 1, + CurlOpt.MAXREDIRS: self.max_redirects, + CurlOpt.CONNECTTIMEOUT: self.connect_timeout, + CurlOpt.TIMEOUT: self.total_timeout, + CurlOpt.SSL_VERIFYPEER: 1, + CurlOpt.SSL_VERIFYHOST: 2, + } + [curl.setopt(option, value) for option, value in opts.items()] + + try: + curl.perform() + + # Curl Timings https://curl.se/libcurl/c/curl_easy_getinfo.html#TIMES + # from start of request to stage (in microseconds) + namelookup_t = curl.getinfo(CurlInfo.NAMELOOKUP_TIME) # DNS lookup + connect_t = curl.getinfo(CurlInfo.CONNECT_TIME) + appconnect_t = curl.getinfo(CurlInfo.APPCONNECT_TIME) + pretransfer_t = curl.getinfo(CurlInfo.PRETRANSFER_TIME) + starttransfer_t = curl.getinfo(CurlInfo.STARTTRANSFER_TIME) # TTFB + total_t = curl.getinfo(CurlInfo.TOTAL_TIME) # total time or TTLB + redirect_t = curl.getinfo(CurlInfo.REDIRECT_TIME) + + # 1. Time spent resolving the host name + dns = namelookup_t + # 2. Time spent establishing a TCP connection + tcp_connect = connect_t - namelookup_t + # 3. Time spent performing the SSL handshake + ssl_handshake = appconnect_t - connect_t + # 4. Time spent sending the request + send = pretransfer_t - appconnect_t + # 5. Time spent waiting for server to send the first byte + server_wait = starttransfer_t - pretransfer_t + # 6. Time to download the response + content = total_t - starttransfer_t + + return HTTPTimings( + dns=dns * 1000, + tcp_connect=tcp_connect * 1000, + ssl_handshake=ssl_handshake * 1000, + send=send * 1000, + server_wait=server_wait * 1000, + content=content * 1000, + total=total_t * 1000, + redirect=redirect_t * 1000, + ) + except Exception as e: + raise e + finally: + curl.close() diff --git a/packages/syftbox/syftbox/client/benchmark/netstats_tcp.py b/packages/syftbox/syftbox/client/benchmark/netstats_tcp.py new file mode 100644 index 00000000000..364ded7ed74 --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/netstats_tcp.py @@ -0,0 +1,173 @@ +import socket +import threading +import time +from collections import deque +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import datetime, timedelta + +from typing_extensions import Deque, Generator, Optional + +from syftbox.client.benchmark import Stats + + +@dataclass +class ConnectionMetadata: + timestamp: datetime + host: str + port: int + + +@dataclass +class TCPTimingStats: + """TCP performance metrics.""" + + latency_stats: Stats + jitter_stats: Stats + connection_success_rate: float + requests_per_minute: int + max_requests_per_minute: int + max_concurrent_connections: int + requests_in_last_minute: int + + +class RateLimiter: + """Manages connection rate limiting""" + + def __init__(self, max_requests_per_minute: int): + self.max_requests = max_requests_per_minute + self.requests: Deque[datetime] = deque() + self.lock = threading.Lock() + + def _clean_old_requests(self) -> None: + """Remove requests older than 1 minute""" + cutoff = datetime.now() - timedelta(minutes=1) + while self.requests and self.requests[0] < cutoff: + self.requests.popleft() + + @contextmanager + def rate_limit(self) -> Generator[None, None, None]: + """Context manager for rate limiting""" + with self.lock: + self._clean_old_requests() + while len(self.requests) >= self.max_requests: + time.sleep(0.1) + self._clean_old_requests() + self.requests.append(datetime.now()) + yield + + +@dataclass +class TCPConnection: + """Handles single TCP connection measurement""" + + host: str + port: int + timeout: float + previous_latency: Optional[float] = None + + def connect(self) -> tuple[float, float]: + """Establish TCP connection and measure performance""" + try: + start_time = time.time() + with socket.create_connection((self.host, self.port), timeout=self.timeout): + latency = (time.time() - start_time) * 1000 + + # Calculate jitter + jitter = 0.0 + if self.previous_latency is not None: + jitter = abs(latency - self.previous_latency) + + return latency, jitter + + except socket.error: + return -1.0, -1.0 + + +class TCPPerfStats: + """Measure TCP connection performance""" + + max_connections_per_minute: int = 30 + max_concurrent_connections: int = 3 + connection_timeout: float = 10.0 + min_delay_between_requests: float = 0.5 + + def __init__(self, host: str, port: int): + self.host = host + self.port = port + self.__post_init__() + + def __post_init__(self) -> None: + self.previous_latency: Optional[float] = None + self.jitter_values: list[float] = [] + self.request_history: Deque[ConnectionMetadata] = deque() + self.rate_limiter = RateLimiter(self.max_connections_per_minute) + self.connection_lock = threading.Lock() + + @contextmanager + def _connection_context(self) -> Generator[None, None, None]: + """Context manager for connection tracking""" + metadata = ConnectionMetadata(datetime.now(), self.host, self.port) + try: + with self.connection_lock: + self.request_history.append(metadata) + yield + finally: + # Clean old history + with self.connection_lock: + cutoff = datetime.now() - timedelta(minutes=1) + while self.request_history and self.request_history[0].timestamp < cutoff: + self.request_history.popleft() + + def measure_single_connection(self) -> tuple[float, float]: + """Measure a single TCP connection with rate limiting""" + with self.rate_limiter.rate_limit(): + with self._connection_context(): + conn = TCPConnection(self.host, self.port, self.connection_timeout, self.previous_latency) + latency, jitter = conn.connect() + + if latency >= 0: + self.previous_latency = latency + if jitter >= 0: + self.jitter_values.append(jitter) + + time.sleep(self.min_delay_between_requests) + return latency, jitter + + def get_stats(self, num_runs: int) -> TCPTimingStats: + """Perform multiple TCP connections and gather statistics.""" + latencies = [] + jitters = [] + + # Use ThreadPoolExecutor for parallel connections + with ThreadPoolExecutor(max_workers=self.max_concurrent_connections) as executor: + futures = [executor.submit(self.measure_single_connection) for _ in range(num_runs)] + + for future in futures: + try: + latency, jitter = future.result() + if latency >= 0: + latencies.append(latency) + if jitter >= 0: + jitters.append(jitter) + except Exception as e: + raise e + + if not latencies: + raise RuntimeError("No successful TCP measurements") + + return TCPTimingStats( + latency_stats=self._calculate_stats(latencies), + jitter_stats=self._calculate_stats(jitters), + connection_success_rate=len(latencies) / num_runs * 100, + requests_per_minute=len(self.request_history), + max_requests_per_minute=self.max_connections_per_minute, + max_concurrent_connections=self.max_concurrent_connections, + requests_in_last_minute=len(self.request_history), + ) + + def _calculate_stats(self, values: list[float]) -> Stats: + if not values: + return Stats(0, 0, 0, 0, 0, 0, 0) + return Stats.from_values(values) diff --git a/packages/syftbox/syftbox/client/benchmark/network.py b/packages/syftbox/syftbox/client/benchmark/network.py new file mode 100644 index 00000000000..8c3360d1bd7 --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/network.py @@ -0,0 +1,85 @@ +from dataclasses import dataclass +from datetime import datetime, timezone +from urllib.parse import urlparse + +import requests + +from syftbox.client.benchmark import Benchmark, BenchmarkResult +from syftbox.client.benchmark.netstats_http import HTTPPerfStats, HTTPTimingStats +from syftbox.client.benchmark.netstats_tcp import TCPPerfStats, TCPTimingStats +from syftbox.lib.client_config import SyftClientConfig + + +class NetworkBenchmark(Benchmark): + """Class for collecting network performance metrics for a server.""" + + tcp_perf: TCPPerfStats + http_perf: HTTPPerfStats + + def __init__(self, config: SyftClientConfig): + self.url = str(config.server_url) + parsed = urlparse(self.url) + host = str(parsed.hostname) + port = parsed.port or (443 if parsed.scheme == "https" else 80) + self.tcp_perf = TCPPerfStats(host, port) + self.http_perf = HTTPPerfStats(self.url) + + def collect_metrics(self, num_runs: int) -> "NetworkBenchmarkResult": + """Calculate network performance metrics.""" + + # Check if the server is reachable + self.ping() + + # Collect HTTP performance stats + http_stats = self.http_perf.get_stats(num_runs) + + # Collect TCP performance stats + tcp_stats = self.tcp_perf.get_stats(num_runs) + + return NetworkBenchmarkResult( + timestamp=datetime.now(timezone.utc).isoformat(), + num_runs=num_runs, + url=self.url, + http_stats=http_stats, + tcp_stats=tcp_stats, + ) + + def ping(self) -> bool: + """Check if the server is reachable.""" + result = requests.get(str(self.url)) + result.raise_for_status() + return True + + +@dataclass +class NetworkBenchmarkResult(BenchmarkResult): + """Dataclass for network performance metrics.""" + + timestamp: str + url: str + http_stats: HTTPTimingStats + tcp_stats: TCPTimingStats + + def readable_report(self) -> str: + return ( + f"===== Network Benchmark =====\n" + f"Server URL : {self.url}\n" + f"Timestamp : {self.timestamp} UTC\n" + f"Runs : {self.num_runs}\n" + f"\n" + f"HTTP Timings\n" + f"DNS (ms) : {self.http_stats.dns}\n" + f"Connect (ms) : {self.http_stats.tcp_connect}\n" + f"SSL (ms) : {self.http_stats.ssl_handshake}\n" + f"Send (ms) : {self.http_stats.send}\n" + f"Server (ms) : {self.http_stats.server_wait}\n" + f"Download (ms) : {self.http_stats.content}\n" + f"Total Time (ms) : {self.http_stats.total}\n" + f"Redirects (ms) : {self.http_stats.redirect}\n" + f"Success Rate : {self.http_stats.success_rate} %\n" + "\n" + f"TCP Timings\n" + f"Latency (ms) : {self.tcp_stats.latency_stats}\n" + f"Jitter (ms) : {self.tcp_stats.jitter_stats}\n" + f"Success Rate : {self.tcp_stats.connection_success_rate} %" + ) diff --git a/packages/syftbox/syftbox/client/benchmark/report.py b/packages/syftbox/syftbox/client/benchmark/report.py new file mode 100644 index 00000000000..bbcf69bb0f1 --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/report.py @@ -0,0 +1,38 @@ +import json +from pathlib import Path + +from syftbox.client.benchmark import BenchmarkReporter, BenchmarkResult + + +class JSONReport(BenchmarkReporter): + """JSON format benchmark report.""" + + def __init__(self, path: Path): + self.output_path = path / "benchmark_report.json" + + def generate(self, metrics: dict[str, BenchmarkResult]) -> None: + """Generate the benchmark report in JSON format.""" + + report: dict = {"result": {}} + + for name, metric in metrics.items(): + report["result"][name] = metric.dict_report() + + with open(self.output_path, "w") as fp: + json.dump(report, fp, indent=4) + + print("Benchmark result saved at: " + str(self.output_path)) + + +class ConsoleReport(BenchmarkReporter): + """Human readable format benchmark report""" + + def generate(self, metrics: dict[str, BenchmarkResult]) -> None: + """Generate the benchmark report in human readable format.""" + + report = [] + for name, metric in metrics.items(): + report.append(metric.readable_report()) + + print("\n") + print("\n\n".join(report)) diff --git a/packages/syftbox/syftbox/client/benchmark/runner.py b/packages/syftbox/syftbox/client/benchmark/runner.py new file mode 100644 index 00000000000..f2f4d0b1b6b --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/runner.py @@ -0,0 +1,53 @@ +"""Benchmark class for Syft client.""" + +from typing import Type + +from rich.progress import Progress, SpinnerColumn + +from syftbox.client.benchmark import Benchmark, BenchmarkReporter +from syftbox.client.benchmark.network import NetworkBenchmark +from syftbox.client.benchmark.sync import SyncBenchmark +from syftbox.lib.client_config import SyftClientConfig + + +class SyftBenchmarkRunner: + """Class to run the benchmark tests for the SyftBox client.""" + + def __init__( + self, + config: SyftClientConfig, + reporter: BenchmarkReporter, + ): + self.config = config + self.reporter = reporter + + def get_collectors(self) -> dict[str, Type[Benchmark]]: + """Get the metric collectors for the benchmark tests.""" + return { + "network": NetworkBenchmark, + "sync": SyncBenchmark, + } + + def run(self, num_runs: int) -> None: + """Run the benchmark tests.""" + + # Get the metric collectors + collectors = self.get_collectors() + + # Collect all metrics + metrics = {} + for name, collector in collectors.items(): + collector_instance = collector(self.config) + try: + with Progress( + SpinnerColumn(), + "{task.description}", + ) as progress: + task = progress.add_task(f"Collecting {name.capitalize()} metrics", total=1) + metrics[name] = collector_instance.collect_metrics(num_runs) + progress.update(task, completed=True, description=f"Collected {name.capitalize()} metrics") + except Exception as e: + print(f"Failed to collect metrics for {name}: {e}") + + # Generate the benchmark report + self.reporter.generate(metrics) diff --git a/packages/syftbox/syftbox/client/benchmark/sync.py b/packages/syftbox/syftbox/client/benchmark/sync.py new file mode 100644 index 00000000000..efc2f4415f5 --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/sync.py @@ -0,0 +1,63 @@ +from dataclasses import dataclass + +from syftbox.client.benchmark import Benchmark, BenchmarkResult +from syftbox.client.benchmark.syncstats import DataTransferStats, SyncDataTransferStats +from syftbox.lib.client_config import SyftClientConfig + + +@dataclass +class SyncBenchmarkResult(BenchmarkResult): + """Dataclass for sync upload/download performance metrics.""" + + url: str + """URL of the server""" + + file_size_stats: list[DataTransferStats] + """Data transfer statistics for different file sizes""" + + def readable_report(self) -> str: + """Generate a human-readable report of the sync benchmark results""" + + report = f"\n===== Sync Benchmark =====\nServer URL : {self.url}\nRuns: {self.num_runs}\n" + for stats in self.file_size_stats: + report += ( + f"\n" + f"File Size: {stats.file_size_mb} MB\n" + f"Upload Timings (ms): {stats.upload}\n" + f"Download Timings (ms): {stats.download}\n" + f"Success Rate: {100 * stats.successful_runs/stats.total_runs} %\n" + ) + + return report + + +class SyncBenchmark(Benchmark): + """Class for collecting sync performance metrics for a server""" + + BENCHMARK_FILE_SIZES = [1, 5, 9] # MB + sync_perf: SyncDataTransferStats + + def __init__(self, config: SyftClientConfig): + self.config = config + self.url = str(config.server_url) + self.sync_perf = SyncDataTransferStats( + url=self.url, + token=str(config.access_token), + email=config.email, + ) + + def collect_metrics(self, num_runs: int) -> SyncBenchmarkResult: + """Collect and compile performance metrics for different file sizes""" + + performance_results: list[DataTransferStats] = [] + + # Collect performance metrics for different file sizes + for size_mb in self.BENCHMARK_FILE_SIZES: + stats = self.sync_perf.get_stats(size_mb, num_runs) + performance_results.append(stats) + + return SyncBenchmarkResult( + url=self.url, + file_size_stats=performance_results, + num_runs=num_runs, + ) diff --git a/packages/syftbox/syftbox/client/benchmark/syncstats.py b/packages/syftbox/syftbox/client/benchmark/syncstats.py new file mode 100644 index 00000000000..3dcde68ec32 --- /dev/null +++ b/packages/syftbox/syftbox/client/benchmark/syncstats.py @@ -0,0 +1,156 @@ +import time +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Optional +from urllib.parse import urljoin +from uuid import uuid4 + +import requests + +from syftbox.client.benchmark import Stats + + +def generate_byte_string(size_mb: int) -> bytes: + """Generate a sample byte string of the specified size""" + return b"\0" * int(1024 * 1024 * size_mb) + + +def random_filename(size_mb: int) -> str: + """Generate a random filename based on the size""" + return f"{size_mb}mb-{uuid4().hex[:8]}.bytes" + + +@dataclass +class DataTransferStats: + """Data transfer statistics for a specific file size""" + + file_size_mb: int + """Size of the file in MB""" + upload: Stats + """Time taken to upload the file""" + download: Stats + """Time taken to download the file""" + successful_runs: int + """Number of successful runs""" + total_runs: int + """Total number of attempted runs""" + + +@dataclass +class FileTransferDuration: + """Time taken to transfer a file""" + + upload: float + """ Time taken to upload the file in milliseconds""" + download: float + """ Time taken to download the file in milliseconds""" + + +class SyncDataTransferStats: + """Measure the data transfer performance of sync operations""" + + def __init__(self, url: str, token: str, email: str): + """Initialize the server URL, token, and email""" + self.url = url + self.token = token + self.email = email + + def __make_request( + self, + path: str, + ignore_errors: bool = False, + **kwargs: Any, + ) -> float: + """Make a request to the server and measure the time taken""" + headers = {"Authorization": f"Bearer {self.token}", "email": self.email} + start_time = time.time() + url = str(urljoin(self.url, path)) + response = requests.post( + url=url, + headers=headers, + **kwargs, + ) + if not ignore_errors: + response.raise_for_status() + return (time.time() - start_time) * 1000 + + def upload_file(self, filepath: str, data: bytes) -> float: + """Upload a file to the server and measure the time taken""" + files = {"file": (filepath, data, "plain/text")} + return self.__make_request( + "/sync/create/", + files=files, + ) + + def download_file(self, filepath: str) -> float: + """Download a file from the server and measure the time taken""" + return self.__make_request("/sync/download/", json={"path": filepath}) + + def delete_file(self, filepath: str) -> float: + """Delete a file from the server and measure the time taken""" + return self.__make_request("/sync/delete/", json={"path": filepath}, ignore_errors=True) + + def measure_file_transfer(self, file_size_mb: int) -> Optional[FileTransferDuration]: + """Measure time taken to upload and download a file of the specified size""" + filepath = Path(self.email) / "benchmark" / random_filename(file_size_mb) + file_bytes = generate_byte_string(file_size_mb) + + try: + # Delete the file if it already exists + self.delete_file(str(filepath)) + + # Measure the time taken to upload the file + upload_time = self.upload_file(str(filepath), file_bytes) + + # Measure the time taken to download the file + download_time = self.download_file(str(filepath)) + + return FileTransferDuration( + upload=upload_time, + download=download_time, + ) + except Exception as e: + print(f"Error during file transfer: {str(e)}") + return None + finally: + # Delete the file after the test + try: + self.delete_file(str(filepath)) + except Exception as e: + print(f"Error deleting file: {str(e)}") + + def get_stats(self, file_size_mb: int, num_runs: int) -> DataTransferStats: + """Get data transfer statistics for a specific file size""" + # Collect measurements for each run + measurements: list[FileTransferDuration] = [] + successful_runs = 0 + + for run in range(num_runs): + try: + file_transfer_duration = self.measure_file_transfer(file_size_mb) + + if file_transfer_duration is not None: + measurements.append(file_transfer_duration) + successful_runs += 1 + + if run < num_runs - 1: + # Wait for a short interval between runs + time.sleep(5) + except Exception as e: + print(f"Error during run {run + 1}: {str(e)}") + continue + + if not measurements: + raise RuntimeError(f"All {num_runs} runs failed. No statistics available.") + + # Calculate statistics from the measurements + def get_values(attr: str) -> list[float]: + return [getattr(duration, attr) for duration in measurements] + + return DataTransferStats( + file_size_mb=file_size_mb, + upload=Stats.from_values(get_values("upload")), + download=Stats.from_values(get_values("download")), + successful_runs=successful_runs, + total_runs=num_runs, + ) diff --git a/packages/syftbox/syftbox/client/cli.py b/packages/syftbox/syftbox/client/cli.py new file mode 100644 index 00000000000..46165307ad0 --- /dev/null +++ b/packages/syftbox/syftbox/client/cli.py @@ -0,0 +1,186 @@ +from pathlib import Path + +from rich import print as rprint +from typer import Context, Exit, Option, Typer +from typing_extensions import Annotated, Optional + +from syftbox.lib.constants import ( + DEFAULT_BENCHMARK_RUNS, + DEFAULT_CONFIG_PATH, + DEFAULT_DATA_DIR, + DEFAULT_PORT, + DEFAULT_SERVER_URL, +) + +app = Typer( + name="SyftBox Client", + pretty_exceptions_enable=False, + add_completion=False, + context_settings={"help_option_names": ["-h", "--help"]}, +) + +# Define options separately to keep the function signature clean +# fmt: off + +# client commands opts +CLIENT_PANEL = "Client Options" +LOCAL_SERVER_PANEL = "Local Server Options" + +EMAIL_OPTS = Option( + "-e", "--email", + rich_help_panel=CLIENT_PANEL, + help="Email for the SyftBox datasite", +) +SERVER_OPTS = Option( + "-s", "--server", + rich_help_panel=CLIENT_PANEL, + help="SyftBox cache server URL", +) +DATA_DIR_OPTS = Option( + "-d", "--data-dir", "--sync_folder", + rich_help_panel=CLIENT_PANEL, + help="Directory where SyftBox stores data", +) +CONFIG_OPTS = Option( + "-c", "--config", "--config_path", + rich_help_panel=CLIENT_PANEL, + help="Path to SyftBox configuration file", +) +OPEN_OPTS = Option( + is_flag=True, + rich_help_panel=CLIENT_PANEL, + help="Open SyftBox sync/data dir folder on client start", +) +PORT_OPTS = Option( + "-p", "--port", + rich_help_panel=LOCAL_SERVER_PANEL, + help="Local port for the SyftBox client", +) +RELOAD_OPTS = Option( + rich_help_panel=LOCAL_SERVER_PANEL, + help="Run server in hot reload. Should not see this in production", +) +VERBOSE_OPTS = Option( + "-v", "--verbose", + is_flag=True, + help="Enable verbose mode", +) + + + +TOKEN_OPTS = Option( + "--token", + help="Token for password reset", +) + +# report command opts +REPORT_PATH_OPTS = Option( + "-o", "--output-dir", + help="Directory to save the report file", +) + +# benchmark command opts +JSON_BENCHMARK_REPORT_OPTS = Option( + "--json", "-j", + help="Path where benchmark report will be stored in JSON format", +) + +# fmt: on + + +@app.callback(invoke_without_command=True) +def client( + ctx: Context, + data_dir: Annotated[Path, DATA_DIR_OPTS] = DEFAULT_DATA_DIR, + email: Annotated[str, EMAIL_OPTS] = "", + server: Annotated[str, SERVER_OPTS] = DEFAULT_SERVER_URL, + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, + port: Annotated[int, PORT_OPTS] = DEFAULT_PORT, + open_dir: Annotated[bool, OPEN_OPTS] = True, + verbose: Annotated[bool, VERBOSE_OPTS] = False, +) -> None: + """Run the SyftBox client""" + + if ctx.invoked_subcommand is not None: + # If a subcommand is being invoked, just return + return + + # lazy import to imporve cli startup speed + from syftbox.client.cli_setup import get_migration_decision, setup_config_interactive + from syftbox.client.core import run_syftbox + from syftbox.client.utils.net import get_free_port, is_port_in_use + + if port == 0: + port = get_free_port() + elif is_port_in_use(port): + # new_port = get_free_port() + # port = new_port + rprint(f"[bold red]Error:[/bold red] Client cannot start because port {port} is already in use!") + raise Exit(1) + + client_config = setup_config_interactive(config_path, email, data_dir, server, port) + + migrate_datasite = get_migration_decision(client_config.data_dir) + + log_level = "DEBUG" if verbose else "INFO" + code = run_syftbox( + client_config=client_config, + open_dir=open_dir, + log_level=log_level, + migrate_datasite=migrate_datasite, + ) + raise Exit(code) + + +@app.command() +def report( + output_path: Annotated[Path, REPORT_PATH_OPTS] = Path(".").resolve(), + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, +) -> None: + """Generate a report of the SyftBox client""" + from datetime import datetime + + from syftbox.client.logger import zip_logs + from syftbox.lib.client_config import SyftClientConfig + + try: + config = SyftClientConfig.load(config_path) + name = f"syftbox_logs_{datetime.now().strftime('%Y_%m_%d_%H%M')}" + output_path = Path(output_path, name).resolve() + output_path_with_extension = zip_logs(output_path, log_dir=config.data_dir / "logs") + rprint(f"Logs from {config.data_dir} saved at {output_path_with_extension}.") + except Exception as e: + rprint(f"[red]Error[/red]: {e}") + raise Exit(1) + + +@app.command() +def benchmark( + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, + json: Annotated[Optional[Path], JSON_BENCHMARK_REPORT_OPTS] = None, + num_runs: int = DEFAULT_BENCHMARK_RUNS, +) -> None: + """Run the SyftBox benchmark""" + + # Lazy import to improve cli startup speed + from syftbox.client.benchmark.report import ConsoleReport, JSONReport + from syftbox.client.benchmark.runner import SyftBenchmarkRunner + from syftbox.lib.client_config import SyftClientConfig + + try: + print("Running benchmarks") + config = SyftClientConfig.load(config_path) + benchmark_reporter = JSONReport(json) if json else ConsoleReport() + benchmark_runner = SyftBenchmarkRunner(config, benchmark_reporter) + benchmark_runner.run(num_runs) + except Exception as e: + rprint(f"[red]Error[/red]: {e}") + raise e + + +def main() -> None: + app() + + +if __name__ == "__main__": + main() diff --git a/packages/syftbox/syftbox/client/cli_setup.py b/packages/syftbox/syftbox/client/cli_setup.py new file mode 100644 index 00000000000..762da9fd94c --- /dev/null +++ b/packages/syftbox/syftbox/client/cli_setup.py @@ -0,0 +1,182 @@ +""" +SyftBox CLI - Setup scripts +""" + +import json +import shutil +from pathlib import Path + +import httpx +import typer +from rich import print as rprint +from rich.prompt import Confirm, Prompt +from typing_extensions import Optional + +from syftbox import __version__ +from syftbox.client.auth import authenticate_user +from syftbox.client.core import METADATA_FILENAME +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.constants import DEFAULT_DATA_DIR +from syftbox.lib.exceptions import ClientConfigException +from syftbox.lib.http import HEADER_SYFTBOX_USER, SYFTBOX_HEADERS +from syftbox.lib.validators import DIR_NOT_EMPTY, is_valid_dir, is_valid_email +from syftbox.lib.workspace import SyftWorkspace + +__all__ = ["setup_config_interactive"] + + +def is_empty(data_dir: Path) -> bool: + """True if the data_dir is empty""" + return not any(data_dir.iterdir()) + + +def has_old_syftbox_version(data_dir: Path) -> bool: + """True if the data_dir was created with an older version of SyftBox""" + metadata_file = data_dir / METADATA_FILENAME + if not metadata_file.exists(): + return True + metadata = json.loads(metadata_file.read_text()) + current_version = __version__ + old_version = metadata.get("version", None) + return old_version != current_version + + +def prompt_delete_old_data_dir(data_dir: Path) -> bool: + msg = f"[yellow]Found old SyftBox folder at {data_dir}.[/yellow]\n" + msg += "[yellow]Press Y to remove the old folder and download it from the server [bold](recommended)[/bold]. Press N to keep the old folder and migrate it.[/yellow]" + return Confirm.ask(msg) + + +def get_migration_decision(data_dir: Path) -> bool: + migrate_datasite = False + if data_dir.exists(): + if is_empty(data_dir): + migrate_datasite = False + elif has_old_syftbox_version(data_dir): + # we need this extra if because we do 2 things: + # 1. determine if we want to remove + # 2. determine if we want to migrate + if prompt_delete_old_data_dir(data_dir): + rprint("Removing old syftbox folder") + apps_dir = SyftWorkspace(data_dir).apps + paths_to_exclude = [apps_dir] + # Remove everything except the paths in paths_to_exclude + for item in data_dir.iterdir(): + if item not in paths_to_exclude: + if item.is_dir(): + shutil.rmtree(item) + else: + item.unlink() + migrate_datasite = False + else: + migrate_datasite = True + return migrate_datasite + + +def setup_config_interactive( + config_path: Path, + email: str, + data_dir: Path, + server: str, + port: int, + skip_auth: bool = False, + skip_verify_install: bool = False, +) -> SyftClientConfig: + """Setup the client configuration interactively. Called from CLI""" + + config_path = config_path.expanduser().resolve() + conf: Optional[SyftClientConfig] = None + if data_dir: + data_dir = data_dir.expanduser().resolve() + + # try to load the existing config + try: + conf = SyftClientConfig.load(config_path) + except ClientConfigException: + pass + + if not conf: + # first time setup + if not data_dir or data_dir == DEFAULT_DATA_DIR: + data_dir = prompt_data_dir() + + if not email: + email = prompt_email() + + # create a new config with the input params + conf = SyftClientConfig( + path=config_path, + sync_folder=data_dir, + email=email, + server_url=server, + port=port, + ) + else: + if server and server != conf.server_url: + conf.set_server_url(server) + if port != conf.client_url.port: + conf.set_port(port) + + # Short-lived client for all pre-authentication requests + login_client = httpx.Client( + base_url=str(conf.server_url), + headers={ + **SYFTBOX_HEADERS, + HEADER_SYFTBOX_USER: conf.email, + }, + transport=httpx.HTTPTransport(retries=10), + ) + if not skip_verify_install: + verify_installation(conf, login_client) + + if not skip_auth: + conf.access_token = authenticate_user(conf, login_client) + + # DO NOT SAVE THE CONFIG HERE. + # We don't know if the client will accept the config yet + return conf + + +def prompt_data_dir(default_dir: Path = DEFAULT_DATA_DIR) -> Path: + prompt_dir = "[bold]Where do you want SyftBox to store data?[/bold] [grey70]Press Enter for default[/grey70]" + prompt_overwrite = "[bold yellow]Directory '{sync_folder}' is not empty![/bold yellow] Do you want to overwrite it?" + + while True: + sync_folder = Prompt.ask(prompt_dir, default=str(default_dir)) + valid, reason = is_valid_dir(sync_folder) + if reason == DIR_NOT_EMPTY: + overwrite = Confirm.ask(prompt_overwrite.format(sync_folder=sync_folder)) + if not overwrite: + continue + valid = True + + if not valid: + rprint(f"[bold red]{reason}[/bold red] '{sync_folder}'") + continue + + path = Path(sync_folder).expanduser().resolve() + rprint(f"Selected directory [bold]'{path}'[/bold]") + return path + + +def prompt_email() -> str: + while True: + email = Prompt.ask("[bold]Enter your email address[/bold]") + if not is_valid_email(email): + rprint(f"[bold red]Invalid email[/bold red]: '{email}'") + continue + return email + + +def verify_installation(conf: SyftClientConfig, client: httpx.Client) -> None: + try: + response = client.get("/info?verify_installation=1") + + response.raise_for_status() + + except (httpx.HTTPError, KeyError): + should_continue = Confirm.ask( + "\n[bold red]Could not connect to the SyftBox server, continue anyway?[/bold red]" + ) + if not should_continue: + raise typer.Exit() diff --git a/packages/syftbox/syftbox/client/core.py b/packages/syftbox/syftbox/client/core.py new file mode 100644 index 00000000000..ab5e6faa5b5 --- /dev/null +++ b/packages/syftbox/syftbox/client/core.py @@ -0,0 +1,334 @@ +import asyncio +import json +import platform +import shutil +from pathlib import Path +from types import TracebackType + +import uvicorn +from httpx import BaseTransport +from loguru import logger +from pid import PidFile, PidFileAlreadyLockedError, PidFileAlreadyRunningError +from typing_extensions import Optional, Type + +from syftbox import __version__ +from syftbox.client.api import create_api +from syftbox.client.base import PluginManagerInterface, SyftBoxContextInterface +from syftbox.client.env import syftbox_env +from syftbox.client.exceptions import SyftBoxAlreadyRunning +from syftbox.client.logger import setup_logger +from syftbox.client.plugin_manager import PluginManager +from syftbox.client.server_client import SyftBoxClient +from syftbox.client.utils import error_reporting, file_manager, macos +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.datasite import create_datasite +from syftbox.lib.exceptions import SyftBoxException +from syftbox.lib.ignore import IGNORE_FILENAME +from syftbox.lib.platform import OS_NAME, OS_VERSION, PYTHON_VERSION +from syftbox.lib.workspace import SyftWorkspace + +SCRIPT_DIR = Path(__file__).parent +ASSETS_FOLDER = SCRIPT_DIR.parent / "assets" +ICON_FOLDER = ASSETS_FOLDER / "icon" +METADATA_FILENAME = ".metadata.json" + + +class SyftBoxRunner: + """The local SyftBox instance. + + This is the main SyftBox instance that handles workspace data, server + communication, and local API services. Only one instance can run + for a given workspace directory. + + Warning: + This class should not be imported directly by sub-systems. + Use the provided interfaces and context objects instead. + + Raises: + SyftBoxAlreadyRunning: If another client is already running for the same workspace + Exception: If the client fails to start due to any reason + """ + + def __init__( + self, + config: SyftClientConfig, + log_level: str = "INFO", + server_transport: Optional[BaseTransport] = None, + **kwargs: dict, + ) -> None: + self.config = config + self.log_level = log_level + + self.workspace = SyftWorkspace(self.config.data_dir) + self.pid = PidFile(pidname="syftbox.pid", piddir=self.workspace.data_dir) + self.client = SyftBoxClient.from_config(self.config, transport=server_transport) + + # create a single client context shared across components + self.__ctx = SyftBoxContext( + self.config, + self.workspace, + client=self.client, + plugins=None, + ) + self.plugins = PluginManager(self.__ctx, sync_manager=None, app_runner=None, **kwargs) + # make plugins available to the context + self.__ctx.plugins = self.plugins + + # kwargs for making customization/unit testing easier + # this will be replaced with a sophisticated plugin system + self.__local_server: uvicorn.Server = None + + @property + def is_registered(self) -> bool: + """Check if the current user is registered with the server""" + return bool(self.config.token) + + @property + def datasite(self) -> Path: + """The datasite of the current user""" + return self.workspace.datasites / self.config.email + + @property + def public_dir(self) -> Path: + """The public directory in the datasite of the current user""" + return self.datasite / "public" + + @property + def context(self) -> "SyftBoxContext": + return self.__ctx + + def start(self) -> None: + try: + self.pid.create() + except PidFileAlreadyLockedError: + raise SyftBoxAlreadyRunning(f"Another instance of SyftBox is running on {self.config.data_dir}") + self.create_metadata_file() + + logger.info("Started SyftBox") + + self.config.save() # commit config changes (like migration) to disk after PID is created + self.workspace.mkdirs() # create the workspace directories + self.register_self() # register the email with the server + self.init_datasite() # init the datasite on local machine + + # start plugins/components + self.plugins.start() + return self.__run_local_server() + + @property + def metadata_path(self) -> Path: + return self.workspace.data_dir / METADATA_FILENAME + + def create_metadata_file(self) -> None: + metadata_json = self.config.model_dump(mode="json") + metadata_json["version"] = __version__ + self.metadata_path.write_text(json.dumps(metadata_json, indent=2)) + + def shutdown(self) -> None: + if self.__local_server: + _result = asyncio.run(self.__local_server.shutdown()) + + self.plugins.stop() + + self.pid.close() + logger.info("SyftBox shutdown complete") + + def check_pidfile(self) -> str: + """Check if another instance of SyftBox is running""" + + try: + return self.pid.check() + except PidFileAlreadyRunningError: + raise SyftBoxAlreadyRunning(f"Another instance of SyftBox is running on {self.config.data_dir}") + + def init_datasite(self) -> None: + if self.datasite.exists(): + return + create_datasite(self.context) + + def register_self(self) -> None: + """Register the user's email with the SyftBox cache server""" + if self.is_registered: + return + try: + token = self.client.register(self.config.email) + # TODO + FIXME - once we have JWT, we should not store token in config! + # ideally in OS keychain (using keyring) or + # in a separate location under self.workspace.plugins + self.config.token = str(token) + self.config.save() + logger.info("Email registration successful") + except Exception as e: + raise SyftBoxException(f"Failed to register with the server - {e}") from e + + def __run_local_server(self) -> None: + logger.info(f"Starting local server on {self.config.client_url}") + app = create_api(self.__ctx) + self.__local_server = uvicorn.Server( + config=uvicorn.Config( + app=app, + host=self.config.client_url.host, + port=self.config.client_url.port, + log_level=self.log_level.lower(), + ) + ) + return self.__local_server.run() + + # utils + def open_datasites_dir(self) -> None: + file_manager.open_dir(str(self.workspace.datasites)) + + def copy_icons(self) -> None: + self.workspace.mkdirs() + if platform.system() == "Darwin": + macos.copy_icon_file(ICON_FOLDER, self.workspace.data_dir) + + def log_system_info(self) -> None: + self.client.log_analytics_event( + event_name="system_info", + os_name=OS_NAME, + os_version=OS_VERSION, + syftbox_version=__version__, + python_version=PYTHON_VERSION, + ) + + def __enter__(self) -> "SyftBoxRunner": + return self + + def __exit__( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType] + ) -> None: + self.shutdown() + + +class SyftBoxContext(SyftBoxContextInterface): + """ + Provides a light-weight context object for sub-systems to interact with. + It will be instantiated by LocalSyftBox, but sub-systems can freely pass it around. + """ + + def __init__( + self, + config: SyftClientConfig, + workspace: SyftWorkspace, + client: SyftBoxClient, + plugins: Optional[PluginManagerInterface], + ): + self.config = config + self.workspace = workspace + self.client = client + self.plugins = plugins + + @property + def email(self) -> str: + return self.config.email + + @property + def my_datasite(self) -> Path: + return self.workspace.datasites / self.config.email + + @property + def all_datasites(self) -> list[str]: + """List all datasites in the workspace""" + return [d.name for d in self.workspace.datasites.iterdir() if (d.is_dir() and "@" in d.name)] + + def __repr__(self) -> str: + return f"SyftBoxContext<{self.config.email}, {self.config.data_dir.as_posix()}>" + + +def run_apps_to_api_migration(new_ws: SyftWorkspace) -> None: + old_sync_folder = new_ws.data_dir + old_apps_dir = old_sync_folder / "apps" + new_apps_dir = new_ws.apps + + if old_apps_dir.exists(): + logger.info(f"Migrating directory apps —> {new_apps_dir.relative_to(new_ws.data_dir)}...") + if new_apps_dir.exists(): + shutil.rmtree(new_apps_dir) + shutil.move(str(old_apps_dir), str(new_apps_dir)) + + +def run_migration(config: SyftClientConfig, migrate_datasite: bool = True) -> None: + # first run config migration + config.migrate() + + # then run workspace migration + new_ws = SyftWorkspace(config.data_dir) + + # migrate workspace/apps to workspace/apis + run_apps_to_api_migration(new_ws) + + # check for old dir structure and migrate to new + # data_dir == sync_folder + old_sync_folder = new_ws.data_dir + old_datasite_path = Path(old_sync_folder, config.email) + + if not migrate_datasite: + return + + # Option 2: if syftbox folder has old structure, migrate to new + if old_datasite_path.exists(): + logger.info("Migrating to new datasite structure") + new_ws.mkdirs() + + # create the datasites directory & move all under it + for dir in old_sync_folder.glob("*@*"): + shutil.move(str(dir), str(new_ws.datasites)) + + # move syftignore file + old_ignore_file = old_sync_folder / IGNORE_FILENAME + if old_ignore_file.exists(): + shutil.move(str(old_ignore_file), str(new_ws.datasites / IGNORE_FILENAME)) + + # move old sync state file + old_sync_state = old_sync_folder / ".syft" / "local_syncstate.json" + if old_sync_state.exists(): + shutil.move(str(old_sync_state), str(new_ws.plugins / "local_syncstate.json")) + if old_sync_state.parent.exists(): + shutil.rmtree(str(old_sync_state.parent)) + + +def run_syftbox( + client_config: SyftClientConfig, + open_dir: bool = False, + log_level: str = "INFO", + migrate_datasite: bool = True, +) -> int: + """Run the SyftBox client""" + syftbox_instance = None + + setup_logger(log_level, log_dir=client_config.data_dir / "logs") + + error_config = error_reporting.make_error_report(client_config) + logger.info( + f"Client metadata\n{error_config.model_dump_json(indent=2, exclude={'client_config': {'access_token'}})}" + ) + + # a flag to disable icons + # GitHub CI needs to zip sync dir in tests and fails when it encounters Icon\r files + if syftbox_env.DISABLE_ICONS: + logger.debug("Directory icons are disabled") + + try: + syftbox_instance = SyftBoxRunner(client_config, log_level=log_level) + # we don't want to run migration if another instance of client is already running + if syftbox_instance.check_pidfile(): + run_migration(client_config, migrate_datasite=migrate_datasite) + if not syftbox_env.DISABLE_ICONS: + syftbox_instance.copy_icons() + if open_dir: + syftbox_instance.open_datasites_dir() + syftbox_instance.log_system_info() + syftbox_instance.start() + except SyftBoxAlreadyRunning as e: + logger.error(e) + return -1 + except KeyboardInterrupt: + logger.info("Received keyboard interrupt. Shutting down the client") + except Exception as e: + logger.exception("Unhandled exception when starting the client", e) + return -2 + finally: + if syftbox_instance is not None: + syftbox_instance.shutdown() + return 0 diff --git a/packages/syftbox/syftbox/client/env.py b/packages/syftbox/syftbox/client/env.py new file mode 100644 index 00000000000..82e27f78090 --- /dev/null +++ b/packages/syftbox/syftbox/client/env.py @@ -0,0 +1,23 @@ +from pathlib import Path + +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + +from syftbox.lib.constants import DEFAULT_CONFIG_PATH + +__all__ = ["syftbox_env"] + + +class SyftEnvVars(BaseSettings): + """SyftBox environment variables.""" + + DISABLE_ICONS: bool = Field(default=False) + """Disable copying icons to the datasite dir.""" + + CLIENT_CONFIG_PATH: Path = Field(default=DEFAULT_CONFIG_PATH) + """Path to the client configuration file.""" + + model_config = SettingsConfigDict(env_file="client.env", env_prefix="SYFTBOX_") + + +syftbox_env = SyftEnvVars() diff --git a/packages/syftbox/syftbox/client/exceptions.py b/packages/syftbox/syftbox/client/exceptions.py new file mode 100644 index 00000000000..5984070b52b --- /dev/null +++ b/packages/syftbox/syftbox/client/exceptions.py @@ -0,0 +1,39 @@ +from typing import Optional + +from syftbox.lib.exceptions import SyftBoxException + + +class SyftPluginException(SyftBoxException): + pass + + +class SyftInitializationError(SyftBoxException): + pass + + +class SyftBoxAlreadyRunning(SyftBoxException): + pass + + +class SyftServerError(SyftBoxException): + pass + + +class SyftServerTooOld(SyftBoxException): + pass + + +class SyftAuthenticationError(SyftServerError): + default_message = "Authentication failed, please log in again." + + def __init__(self, message: Optional[str] = None): + message = self.default_message if message is None else message + super().__init__(message) + + +class SyftNotFound(SyftServerError): + pass + + +class SyftPermissionError(SyftServerError): + pass diff --git a/packages/syftbox/syftbox/client/logger.py b/packages/syftbox/syftbox/client/logger.py new file mode 100644 index 00000000000..642c061a005 --- /dev/null +++ b/packages/syftbox/syftbox/client/logger.py @@ -0,0 +1,46 @@ +import sys +from datetime import datetime +from pathlib import Path +from shutil import make_archive +from typing import Union + +import loguru +from loguru import logger + +from syftbox.lib.constants import DEFAULT_LOGS_DIR +from syftbox.lib.types import PathLike, to_path + +LOGS_FORMAT = loguru + + +def setup_logger( + level: Union[str, int] = "DEBUG", + log_dir: PathLike = DEFAULT_LOGS_DIR, + keep_logs: int = 10, +) -> None: + logger.remove() + logger.add(level=level, sink=sys.stderr, diagnose=False, backtrace=False) + + # new file per run - no rotation needed + # always log debug level + log_file = Path(log_dir, f"syftbox_{int(datetime.now().timestamp())}.log") + logger.add( + log_file, + level="DEBUG", + rotation=None, + compression=None, + colorize=True, + ) + + # keep last 5 logs + logs_to_delete = sorted(Path(log_dir).glob("syftbox_*.log"))[:-keep_logs] + for log in logs_to_delete: + try: + log.unlink() + except Exception: + pass + + +def zip_logs(output_path: PathLike, log_dir: PathLike = DEFAULT_LOGS_DIR) -> str: + logs_folder = to_path(log_dir) + return make_archive(str(output_path), "zip", logs_folder) diff --git a/packages/syftbox/syftbox/client/plugin_manager.py b/packages/syftbox/syftbox/client/plugin_manager.py new file mode 100644 index 00000000000..49610011db8 --- /dev/null +++ b/packages/syftbox/syftbox/client/plugin_manager.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from typing_extensions import Optional + +from syftbox.client.base import PluginManagerInterface, SyftBoxContextInterface +from syftbox.client.exceptions import SyftPluginException +from syftbox.client.plugins.apps import AppRunner +from syftbox.client.plugins.sync.manager import SyncManager + + +class PluginManager(PluginManagerInterface): + def __init__( + self, + context: SyftBoxContextInterface, + sync_manager: Optional[SyncManager] = None, + app_runner: Optional[AppRunner] = None, + **kwargs: dict, + ) -> None: + self.__context = context + self.__sync_manager = sync_manager + self.__app_runner = app_runner + + @property + def sync_manager(self) -> SyncManager: + """the sync manager. lazily initialized""" + if self.__sync_manager is None: + try: + self.__sync_manager = SyncManager(self.__context) + except Exception as e: + raise SyftPluginException(f"Failed to initialize sync manager - {e}") from e + return self.__sync_manager + + @property + def app_runner(self) -> AppRunner: + """the app runner. lazily initialized""" + if self.__app_runner is None: + try: + self.__app_runner = AppRunner(self.__context) + except Exception as e: + raise SyftPluginException(f"Failed to initialize app runner - {e}") from e + return self.__app_runner + + def start(self) -> None: + self.sync_manager.start() + self.app_runner.start() + + def stop(self) -> None: + if self.__sync_manager is not None: + self.__sync_manager.stop() + + if self.__app_runner is not None: + self.__app_runner.stop() diff --git a/packages/syftbox/syftbox/client/plugins/apps.py b/packages/syftbox/syftbox/client/plugins/apps.py new file mode 100644 index 00000000000..6c36803687e --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/apps.py @@ -0,0 +1,399 @@ +import hashlib +import json +import logging +import os +import shutil +import subprocess +import threading +import time +from datetime import datetime +from logging.handlers import RotatingFileHandler +from pathlib import Path +from subprocess import CompletedProcess +from types import SimpleNamespace + +from croniter import croniter +from loguru import logger +from typing_extensions import Any, Optional, Union + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.lib.client_config import CONFIG_PATH_ENV +from syftbox.lib.types import PathLike + +APP_LOG_FILE_NAME_FORMAT = "{app_name}.log" +DEFAULT_INTERVAL = 10 +RUNNING_APPS: dict = {} +DEFAULT_APPS_PATH = Path(os.path.join(os.path.dirname(__file__), "..", "..", "..", "default_apps")).absolute().resolve() +EVENT = threading.Event() + + +def path_without_virtualenvs() -> str: + env_path = os.getenv("PATH", "") + if not env_path: + return env_path + + venv_hints = [ + f"env{os.sep}bin", + f"env{os.sep}Scripts", + "conda", + ".virtualenvs", + "pyenv", + ] + + # activated venv will have VIRTUAL_ENV and VIRTUAL_ENV/bin in PATH + # so axe it + env_venv = os.getenv("VIRTUAL_ENV", "") + if env_venv: + venv_hints.append(env_venv) + + cleaned_path = [ + entry for entry in env_path.split(os.pathsep) if not any(hint in entry.lower() for hint in venv_hints) + ] + + return os.pathsep.join(cleaned_path) + + +def get_clean_env() -> dict: + clean_env: dict = {} + + essential_vars = { + "PATH", + "HOME", + "USER", + "TEMP", + "TMP", + "TMPDIR", + "SHELL", + "LANG", + "LC_ALL", + "DISPLAY", # X11 specific (Linux) + "DBUS_SESSION_BUS_ADDRESS", # X11 specific (Linux) + "SYSTEMROOT", # Windows specific + } + + # Copy essential and SYFTBOX_* variables + for key, value in os.environ.items(): + if key in essential_vars or key.startswith("SYFTBOX_"): + clean_env[key] = value + + return clean_env + + +def find_and_run_script( + app_path: Path, extra_args: list[str], config_path: Path, app_log_dir: Optional[Path] = None +) -> CompletedProcess[str]: + script_path = os.path.join(app_path, "run.sh") + + clean_env = get_clean_env() + clean_env.update( + { + "PATH": path_without_virtualenvs(), + CONFIG_PATH_ENV: str(config_path), + } + ) + + # Check if the script exists + if os.path.isfile(script_path): + # Set execution bit (+x) + os.chmod(script_path, os.stat(script_path).st_mode | 0o111) + + # Prepare the command based on whether there's a shebang or not + command = ["sh", script_path] + extra_args + + result, _ = run_with_logging( + command, + app_path, + clean_env, + app_log_dir, + ) + return result + else: + raise FileNotFoundError(f"run.sh not found in {app_path}") + + +def create_app_logger(log_file: Path) -> tuple[logging.Logger, RotatingFileHandler]: + """Create an isolated logger for app runs""" + # Create a new logger instance + logger = logging.getLogger(f"app_logger_{log_file.name}") + logger.setLevel(logging.INFO) + + # Remove any existing handlers + logger.handlers.clear() + + # Create formatter + formatter = logging.Formatter("%(asctime)s | %(levelname)-8s | %(message)s", datefmt="%Y-%m-%d at %H:%M:%S") + + # Create and configure file handler + file_handler = RotatingFileHandler( + filename=log_file, + maxBytes=100 * 1024 * 1024, # 100Mb + backupCount=3, + encoding="utf-8", + ) + file_handler.setFormatter(formatter) + file_handler.setLevel(logging.INFO) + + # Add handler to logger + logger.addHandler(file_handler) + + return logger, file_handler + + +def run_with_logging( + command: list[str], app_path: Path, clean_env: dict, log_path: Optional[Path] = None +) -> tuple[CompletedProcess[str], Path]: + """ + Run a subprocess command and capture output to both a log file and return results. + """ + # Create logs directory if it doesn't exist + if log_path is None: + log_path = app_path / "logs" + log_path.mkdir(parents=True, exist_ok=True) + + # Create a unique log filename with timestamp and app name + app_name = app_path.name + log_file = log_path / APP_LOG_FILE_NAME_FORMAT.format(app_name=app_name) + + # Create isolated logger for this run + app_logger, file_handler = create_app_logger(log_file=log_file) + + try: + # Log run metadata + app_logger.info(f"Working directory: {app_path}") + app_logger.info(f"Command: {command}") + + # Run the subprocess + process = subprocess.run( + command, + cwd=app_path, + check=True, + capture_output=True, + text=True, + env=clean_env, + ) + + # log this in app log + app_logger.info( + ( + f"exit code: {process.returncode}\n" + f"===stdout===\n{process.stdout}" + f"===stderr===\n{process.stderr or '-'}" + ) + ) + # log this in console + logger.info(f"Process completed with exit code: {process.returncode}. Log file: {log_file}") + return process, log_file + + except subprocess.CalledProcessError as e: + # log this in app log + app_logger.error( + ("process output\n" f"> exit code: {e.returncode}\n" f"> stdout:\n{e.stdout}" f"> stderr:\n{e.stderr}") + ) + # log this in console + logger.error(f"Process failed with exit code: {e.returncode}") + raise e + + except Exception as e: + app_logger.error(f"Unexpected error: {str(e)}") + raise e + + finally: + app_logger.removeHandler(file_handler) + file_handler.close() + + +def copy_default_apps(apps_path: Path) -> None: + if not DEFAULT_APPS_PATH.exists(): + logger.info(f"Default apps directory not found: {DEFAULT_APPS_PATH}") + return + + for app in DEFAULT_APPS_PATH.iterdir(): + src_app_path = DEFAULT_APPS_PATH / app + dst_app_path = apps_path / app.name + + if src_app_path.is_dir(): + if dst_app_path.exists(): + logger.info(f"App already installed at: {dst_app_path}") + # shutil.rmtree(dst_app_path) + else: + shutil.copytree(src_app_path, dst_app_path) + logger.info(f"Copied default app:: {app}") + + +def dict_to_namespace(data: Union[dict, list, Any]) -> Union[SimpleNamespace, list, Any]: + if isinstance(data, dict): + return SimpleNamespace(**{key: dict_to_namespace(value) for key, value in data.items()}) + elif isinstance(data, list): + return [dict_to_namespace(item) for item in data] + else: + return data + + +def load_config(path: PathLike) -> Optional[Union[SimpleNamespace, list, Any]]: + try: + with open(path, "r") as f: + data = json.load(f) + return dict_to_namespace(data) + except Exception: + return None + + +def bootstrap(context: SyftBoxContextInterface) -> None: + # create the directory + apps_path = context.workspace.apps + + apps_path.mkdir(exist_ok=True) + + # Copy default apps if they don't exist + copy_default_apps(apps_path) + + +def run_apps(apps_path: Path, client_config: Path) -> None: + # create the directory + + for app in apps_path.iterdir(): + app_path = apps_path.absolute() / app + if app_path.is_dir(): + app_config = load_config(app_path / "config.json") + if app_config is None: + run_app(app_path, client_config) + elif RUNNING_APPS.get(app, None) is None: + logger.info("⏱ Scheduling a new app run.") + thread = threading.Thread( + target=run_custom_app_config, + args=( + app_config, + app_path, + client_config, + ), + ) + thread.start() + RUNNING_APPS[os.path.basename(app)] = thread + + +def get_file_hash(file_path: Union[str, Path], digest: str = "md5") -> str: + with open(file_path, "rb") as f: + return hashlib.file_digest(f, digest).hexdigest() + + +def output_published(app_output: Union[str, Path], published_output: Union[str, Path]) -> bool: + return ( + os.path.exists(app_output) + and os.path.exists(published_output) + and get_file_hash(app_output, "md5") == get_file_hash(published_output, "md5") + ) + + +def run_custom_app_config(app_config: SimpleNamespace, app_path: Path, client_config: Path) -> None: + app_name = os.path.basename(app_path) + clean_env = { + "PATH": path_without_virtualenvs(), + CONFIG_PATH_ENV: str(client_config), + } + # Update environment with any custom variables in app_config + app_envs = getattr(app_config.app, "env", {}) + if not isinstance(app_envs, dict): + app_envs = vars(app_envs) + clean_env.update(app_envs) + + # Retrieve the cron-style schedule from app_config + cron_iter = None + interval = None + cron_schedule = getattr(app_config.app.run, "schedule", None) + if cron_schedule is not None: + base_time = datetime.now() + cron_iter = croniter(cron_schedule, base_time) + elif getattr(app_config.app.run, "interval", None) is not None: + raw_interval = app_config.app.run.interval + if not isinstance(raw_interval, (int, float)): + raise ValueError(f"Invalid interval type: {type(raw_interval)}. Expected int or float.") + interval = raw_interval + else: + raise Exception("There's no schedule configuration. Please add schedule or interval in your app config.json") + + while not EVENT.is_set(): + current_time = datetime.now() + logger.info(f"👟 Running {app_name} at scheduled time {current_time.strftime('%Y-%m-%d %H:%M:%S')}") + logger.info(f"Running command: {app_config.app.run.command}") + try: + app_log_dir = app_path / "logs" + run_with_logging( + app_config.app.run.command, + app_path, + clean_env, + app_log_dir, + ) + log_file = app_log_dir / APP_LOG_FILE_NAME_FORMAT.format(app_name=app_name) + logger.info(f"App '{app_name}' ran successfully. \nDetailed logs at: {log_file.resolve()}") + except subprocess.CalledProcessError as _: + logger.error(f"Error calling subprocess for api '{app_name}'") + logger.error(f"Check {app_name}'s api logs at: {log_file.resolve()}") + except Exception as _: + logger.error(f"Error running '{app_name}'") + logger.error(f"Check {app_name} api logs at: {log_file.resolve()}") + + if cron_iter is not None: + # Schedule the next execution + next_execution = cron_iter.get_next(datetime) + time_to_wait = int((next_execution - current_time).total_seconds()) + logger.info( + f"⏲ Waiting for scheduled time. Current time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}, Next execution: {next_execution.strftime('%Y-%m-%d %H:%M:%S')}" + ) + else: + if interval is not None: + time_to_wait = int(interval) + time.sleep(time_to_wait) + + +def run_app(app_path: Path, config_path: Path) -> None: + app_name = os.path.basename(app_path) + app_log_dir = app_path / "logs" + log_file = app_log_dir / APP_LOG_FILE_NAME_FORMAT.format(app_name=app_name) + + extra_args: list = [] + try: + logger.info(f"Running '{app_name}' app") + find_and_run_script(app_path, extra_args, config_path, app_log_dir) + logger.info(f"`{app_name}` App ran successfully. \nDetailed logs at: {log_file.resolve()}") + except FileNotFoundError as e: + logger.error(f"Error running '{app_name}'") + logger.error(f"Error: {str(e)}") + except subprocess.CalledProcessError as _: + logger.error(f"Error calling subprocess for api '{app_name}'") + logger.error(f"Check {app_name}'s api logs at: {log_file.resolve()}") + except Exception as _: + logger.error(f"Error running '{app_name}'") + logger.error(f"Check {app_name} api logs at: {log_file.resolve()}") + + +class AppRunner: + def __init__(self, context: SyftBoxContextInterface, interval: int = DEFAULT_INTERVAL): + self.context = context + self.__event = threading.Event() + self.interval = interval + self.__run_thread: Optional[threading.Thread] = None + + def start(self) -> None: + def run() -> None: + bootstrap(self.context) + while not self.__event.is_set(): + try: + run_apps( + apps_path=self.context.workspace.apps, + client_config=self.context.config.path, + ) + self.__event.wait(self.interval) + except Exception as e: + logger.error(f"Error running apps: {str(e)}") + + self.__run_thread = threading.Thread(target=run) + self.__run_thread.start() + + def stop(self, blocking: bool = False) -> None: + if not self.__run_thread: + return + + EVENT.set() + self.__event.set() + if blocking: + self.__run_thread.join() diff --git a/packages/syftbox/syftbox/client/plugins/sync/__init__.py b/packages/syftbox/syftbox/client/plugins/sync/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/client/plugins/sync/constants.py b/packages/syftbox/syftbox/client/plugins/sync/constants.py new file mode 100644 index 00000000000..2d76ebbfda1 --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/constants.py @@ -0,0 +1,2 @@ +# TODO move to client config after refactor +MAX_FILE_SIZE_MB = 10 diff --git a/packages/syftbox/syftbox/client/plugins/sync/consumer.py b/packages/syftbox/syftbox/client/plugins/sync/consumer.py new file mode 100644 index 00000000000..3acdf949c1f --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/consumer.py @@ -0,0 +1,145 @@ +from pathlib import Path +from typing import Optional + +import httpx +from loguru import logger + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.exceptions import SyftPermissionError, SyftServerError +from syftbox.client.plugins.sync.datasite_state import DatasiteState +from syftbox.client.plugins.sync.exceptions import ( + FatalSyncError, + SyncEnvironmentError, + SyncValidationError, +) +from syftbox.client.plugins.sync.local_state import LocalState +from syftbox.client.plugins.sync.queue import SyncQueue, SyncQueueItem +from syftbox.client.plugins.sync.sync_action import SyncAction, determine_sync_action +from syftbox.client.plugins.sync.types import SyncActionType +from syftbox.lib.hash import hash_file +from syftbox.lib.ignore import filter_ignored_paths +from syftbox.server.models.sync_models import FileMetadata, RelativePath + + +def create_local_batch(context: SyftBoxContextInterface, paths_to_download: list[Path]) -> list[RelativePath]: + try: + file_list = context.client.sync.download_files_streaming(paths_to_download, context.workspace.datasites) + except SyftServerError as e: + logger.error(e) + return [] + return file_list + + +class SyncConsumer: + def __init__(self, context: SyftBoxContextInterface, queue: SyncQueue, local_state: LocalState): + self.context = context + self.queue = queue + self.local_state = local_state + + def validate_sync_environment(self) -> None: + if not Path(self.context.workspace.datasites).is_dir(): + raise SyncEnvironmentError("Your sync folder has been deleted by a different process.") + if not self.local_state.path.is_file(): + raise SyncEnvironmentError("Your previous sync state has been deleted by a different process.") + + def consume_all(self) -> None: + while not self.queue.empty(): + self.validate_sync_environment() + item = self.queue.get(timeout=0.1) + try: + self.process_filechange(item) + except FatalSyncError as e: + # Fatal error, syncing should be interrupted + raise e + except Exception as e: + logger.error(f"Failed to sync file {item.data.path}, it will be retried in the next sync. Reason: {e}") + + def download_all_missing(self, datasite_states: list[DatasiteState]) -> None: + try: + missing_files: list[Path] = [] + for datasite_state in datasite_states: + if not datasite_state.remote_state: + continue + for file in datasite_state.remote_state: + path = file.path + if not self.local_state.states.get(path): + missing_files.append(path) + missing_files = filter_ignored_paths(self.context.workspace.datasites, missing_files) + + logger.info(f"Downloading {len(missing_files)} files in batch") + received_files = create_local_batch(self.context, missing_files) + for file_path in received_files: + state = self.get_current_local_metadata(Path(file_path)) + self.local_state.insert_synced_file( + path=Path(file_path), + state=state, + action=SyncActionType.CREATE_LOCAL, + save=False, + ) + self.local_state.save() + except FatalSyncError as e: + raise e + except Exception as e: + logger.error( + f"Failed to download missing files, files will be downloaded individually instead. Reason: {e}" + ) + + def determine_action(self, item: SyncQueueItem) -> SyncAction: + path = item.data.path + current_local_metadata = self.get_current_local_metadata(path) + previous_local_metadata = self.get_previous_local_metadata(path) + current_remote_metadata = self.get_current_remote_metadata(path) + + return determine_sync_action( + current_local_metadata=current_local_metadata, + previous_local_metadata=previous_local_metadata, + current_remote_metadata=current_remote_metadata, + ) + + def process_action(self, action: SyncAction) -> SyncAction: + """ + Execute an action and handle any exceptions that may occur. Actions are either: + - Executed successfully (status = SYNCED) + - Rejected by the server (status = REJECTED). Rejection behaviour is defined by the action. + For example, a rejected local deletion will be reverted by creating the file again. + - Error occurred during execution (status = ERROR), the action will be retried in the next sync. + Errors could be either validation errors (file is too large, etc.) or server errors (connection issues, etc.) + """ + try: + logger.info(action.info_message) + action.validate(self.context) + action.execute(self.context) + except SyftPermissionError as e: + action.process_rejection(self.context, reason=str(e)) + except SyncValidationError as e: + # TODO Should we reject validation errors as well? + action.error(e) + logger.warning(f"Validation error: {e}") + except (SyftServerError, httpx.RequestError) as e: + action.error(e) + logger.error(f"Failed to sync file {action.path}, it will be retried in the next sync. Reason: {e}") + + return action + + def process_filechange(self, item: SyncQueueItem) -> None: + action = self.determine_action(item) + if action.is_noop(): + return + + action = self.process_action(action) + self.local_state.insert_completed_action(action) + + def get_current_local_metadata(self, path: Path) -> Optional[FileMetadata]: + abs_path = self.context.workspace.datasites / path + if not abs_path.is_file(): + return None + return hash_file(abs_path, root_dir=self.context.workspace.datasites) + + def get_previous_local_metadata(self, path: Path) -> Optional[FileMetadata]: + return self.local_state.states.get(path, None) + + def get_current_remote_metadata(self, path: Path) -> Optional[FileMetadata]: + try: + return self.context.client.sync.get_metadata(path) + except SyftServerError: + return None diff --git a/packages/syftbox/syftbox/client/plugins/sync/datasite_state.py b/packages/syftbox/syftbox/client/plugins/sync/datasite_state.py new file mode 100644 index 00000000000..6e79368415d --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/datasite_state.py @@ -0,0 +1,222 @@ +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional + +from loguru import logger + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.plugins.sync.types import FileChangeInfo, SyncSide +from syftbox.lib.hash import collect_files, hash_dir +from syftbox.lib.ignore import filter_ignored_paths, get_syftignore_matches +from syftbox.lib.permissions import SyftPermission +from syftbox.server.models.sync_models import FileMetadata + + +def format_paths(path_list: list[Path]) -> str: + tree = "" + folders_seen = set() + + for p in path_list: + parts = p.parts + for i in range(len(parts)): + current_path = "/".join(parts[: i + 1]) + if current_path not in folders_seen: + depth = i + name = parts[i] + is_file = i == len(parts) - 1 + prefix = " " * depth + "├── " + + if is_file: + tree += f"{prefix}{name}\n" + else: + tree += f"{prefix}{name}/\n" + folders_seen.add(current_path) + + return tree + + +@dataclass +class DatasiteChanges: + permissions: list[FileChangeInfo] + files: list[FileChangeInfo] + + +class DatasiteState: + def __init__( + self, + context: SyftBoxContextInterface, + email: str, + remote_state: Optional[list[FileMetadata]] = None, + ) -> None: + """A class to represent the state of a datasite + + Args: + ctx (SyftClientInterface): Context of the syft client + email (str): Email of the datasite + remote_state (Optional[list[FileMetadata]], optional): Remote state of the datasite. + If not provided, it will be fetched from the server. Defaults to None. + """ + self.context = context + self.email: str = email + self.remote_state: Optional[list[FileMetadata]] = remote_state + + def __repr__(self) -> str: + return f"DatasiteState<{self.email}>" + + def tree_repr(self) -> str: + remote_state = self.remote_state or [] + rel_paths = sorted([file.path for file in remote_state]) + path_str = format_paths(rel_paths) + return f"""DatasiteState: +{path_str} + """ + + @property + def path(self) -> Path: + p = self.context.workspace.datasites / self.email + return p.expanduser().resolve() + + def get_current_local_state(self) -> list[FileMetadata]: + return hash_dir(self.path, root_dir=self.context.workspace.datasites) + + def get_remote_state(self) -> list[FileMetadata]: + if self.remote_state is None: + self.remote_state = self.context.client.sync.get_remote_state(Path(self.email)) + return self.remote_state + + def is_in_sync(self) -> bool: + changes = self.get_datasite_changes() + return len(changes.files) == 0 and len(changes.permissions) == 0 + + def get_syftignore_matches(self) -> List[Path]: + """ + Return the paths that are ignored by the syftignore file + + NOTE: symlinks and hidden files are ignored by default, and not added here. + This is to avoid spamming the logs with .venv and .git folders. + """ + all_paths = collect_files(self.path) + relative_paths = [file.relative_to(self.context.workspace.datasites) for file in all_paths] + return get_syftignore_matches( + datasites_dir=self.context.workspace.datasites, + relative_paths=relative_paths, + include_symlinks=False, + ) + + def get_datasite_changes( + self, + ) -> DatasiteChanges: + """ + calculate the files that are out of sync + + NOTE: we are not handling local permissions here, + they will be handled by the server and consumer + TODO: we are not handling empty folders + """ + try: + local_state = self.get_current_local_state() + except Exception as e: + logger.error(f"Failed to get local state for {self.email}: {e}") + return DatasiteChanges(permissions=[], files=[]) + + try: + remote_state = self.get_remote_state() + except Exception as e: + logger.error(f"Failed to get remote state for {self.email}: {e}") + return DatasiteChanges(permissions=[], files=[]) + + local_state_dict = {file.path: file for file in local_state} + remote_state_dict = {file.path: file for file in remote_state} + all_files = set(local_state_dict.keys()) | set(remote_state_dict.keys()) + all_files_filtered = filter_ignored_paths( + datasites_dir=self.context.workspace.datasites, + relative_paths=list(all_files), + ignore_hidden_files=True, + ignore_symlinks=True, + ) + + all_changes = [] + for afile in all_files_filtered: + local_info = local_state_dict.get(afile) + remote_info = remote_state_dict.get(afile) + + try: + change_info = compare_fileinfo(self.context.workspace.datasites, afile, local_info, remote_info) + except Exception as e: + logger.error( + f"Failed to compare file {afile.as_posix()}, it will be retried in the next sync. Reason: {e}" + ) + continue + + if change_info is not None: + all_changes.append(change_info) + + permission_changes, file_changes = split_permissions(all_changes) + return DatasiteChanges( + permissions=permission_changes, + files=file_changes, + ) + + +def split_permissions( + changes: list[FileChangeInfo], +) -> tuple[list[FileChangeInfo], list[FileChangeInfo]]: + permissions = [] + files = [] + for change in changes: + if SyftPermission.is_permission_file(change.path): + permissions.append(change) + else: + files.append(change) + return permissions, files + + +def compare_fileinfo( + local_sync_folder: Path, + path: Path, + local_info: Optional[FileMetadata], + remote_info: Optional[FileMetadata], +) -> Optional[FileChangeInfo]: + if local_info is None and remote_info is None: + return None + + if local_info is None and remote_info is not None: + # File only exists on remote + return FileChangeInfo( + local_sync_folder=local_sync_folder, + path=path, + side_last_modified=SyncSide.REMOTE, + date_last_modified=remote_info.last_modified, + file_size=remote_info.file_size, + ) + + if remote_info is None and local_info is not None: + # File only exists on local + return FileChangeInfo( + local_sync_folder=local_sync_folder, + path=path, + side_last_modified=SyncSide.LOCAL, + date_last_modified=local_info.last_modified, + file_size=local_info.file_size, + ) + + if local_info and remote_info and local_info.hash != remote_info.hash: + # File is different on both sides + if local_info.last_modified > remote_info.last_modified: + date_last_modified = local_info.last_modified + side_last_modified = SyncSide.LOCAL + file_size = local_info.file_size + else: + date_last_modified = remote_info.last_modified + side_last_modified = SyncSide.REMOTE + file_size = remote_info.file_size + + return FileChangeInfo( + local_sync_folder=local_sync_folder, + path=path, + side_last_modified=side_last_modified, + date_last_modified=date_last_modified, + file_size=file_size, + ) + + return None diff --git a/packages/syftbox/syftbox/client/plugins/sync/exceptions.py b/packages/syftbox/syftbox/client/plugins/sync/exceptions.py new file mode 100644 index 00000000000..990e95b5cb7 --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/exceptions.py @@ -0,0 +1,17 @@ +from syftbox.lib.exceptions import SyftBoxException + + +class FatalSyncError(SyftBoxException): + """Base exception to signal sync should be interrupted.""" + + pass + + +class SyncEnvironmentError(FatalSyncError): + """the sync environment is corrupted (e.g. sync folder deleted), syncing cannot continue.""" + + pass + + +class SyncValidationError(SyftBoxException): + pass diff --git a/packages/syftbox/syftbox/client/plugins/sync/local_state.py b/packages/syftbox/syftbox/client/plugins/sync/local_state.py new file mode 100644 index 00000000000..6d62c8f33fe --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/local_state.py @@ -0,0 +1,117 @@ +import threading +from datetime import datetime, timezone +from pathlib import Path +from typing import Optional + +from loguru import logger +from pydantic import BaseModel, Field +from typing_extensions import Self, Type + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.plugins.sync.exceptions import SyncEnvironmentError +from syftbox.client.plugins.sync.sync_action import SyncAction +from syftbox.client.plugins.sync.types import SyncActionType, SyncStatus +from syftbox.server.models.sync_models import FileMetadata + +LOCAL_STATE_FILENAME = "local_syncstate.json" + + +class SyncStatusInfo(BaseModel): + path: Path + timestamp: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) + status: SyncStatus + message: Optional[str] = None + action: Optional[SyncActionType] = None + + +class LocalState(BaseModel): + path: Path = Field(description="Path to the LocalState file") + # The state of files on last successful sync + states: dict[Path, FileMetadata] = {} + # The last sync status of each file + status_info: dict[Path, SyncStatusInfo] = {} + + @classmethod + def for_context(cls: Type[Self], context: SyftBoxContextInterface) -> Self: + return cls(path=context.workspace.plugins / LOCAL_STATE_FILENAME) + + def insert_completed_action(self, action: SyncAction) -> None: + """Insert action result into local state.""" + if action.action_type == SyncActionType.NOOP: + return + + if action.status == SyncStatus.PROCESSING: + raise ValueError("Attempted to insert an action into LocalState that is still being processed.") + + if action.status == SyncStatus.SYNCED: + self.insert_synced_file( + path=action.path, + state=action.result_local_state, + action=action.action_type, + ) + else: + self.insert_status_info( + path=action.path, + status=action.status, + message=action.message, + action=action.action_type, + ) + + def insert_synced_file( + self, path: Path, state: Optional[FileMetadata], action: "SyncActionType", save: bool = True + ) -> None: + if not isinstance(path, Path): + raise ValueError(f"path must be a Path object, got {path}") + if not self.path.is_file(): + # If the LocalState file does not exist, the sync environment is corrupted and syncing should be aborted + + # NOTE: this can occur when the user deletes the sync folder, but a different plugin re-creates it. + # If the sync folder exists but the LocalState file does not, it means the sync folder was deleted + # during syncing and might cause unexpected behavior like deleting files on the remote + raise SyncEnvironmentError("Your previous sync state has been deleted by a different process.") + + if state is None: + self.states.pop(path, None) + else: + self.states[path] = state + + self.insert_status_info( + path, + SyncStatus.SYNCED, + action=action, + save=False, + ) + if save: + self.save() + + def insert_status_info( + self, + path: Path, + status: SyncStatus, + message: Optional[str] = None, + action: Optional["SyncActionType"] = None, + save: bool = True, + ) -> None: + if not isinstance(path, Path): + raise ValueError(f"path must be a Path object, got {path}") + self.status_info[path] = SyncStatusInfo(path=path, status=status, message=message, action=action) + if save: + self.save() + + def save(self) -> None: + try: + with threading.Lock(): + self.path.write_text(self.model_dump_json()) + except Exception as e: + logger.exception(f"Failed to save {self.path}: {e}") + + def load(self) -> None: + with threading.Lock(): + if self.path.exists(): + data = self.path.read_text() + loaded_state = self.model_validate_json(data) + self.states = loaded_state.states + self.status_info = loaded_state.status_info + else: + # Ensure the file exists for the next save + self.save() diff --git a/packages/syftbox/syftbox/client/plugins/sync/manager.py b/packages/syftbox/syftbox/client/plugins/sync/manager.py new file mode 100644 index 00000000000..9b4f3576483 --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/manager.py @@ -0,0 +1,106 @@ +import time +from threading import Thread +from typing import Optional + +from loguru import logger + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.exceptions import SyftAuthenticationError +from syftbox.client.plugins.sync.consumer import SyncConsumer +from syftbox.client.plugins.sync.exceptions import FatalSyncError, SyncEnvironmentError +from syftbox.client.plugins.sync.local_state import LocalState +from syftbox.client.plugins.sync.producer import SyncProducer +from syftbox.client.plugins.sync.queue import SyncQueue, SyncQueueItem +from syftbox.client.plugins.sync.types import FileChangeInfo + + +class SyncManager: + def __init__(self, context: SyftBoxContextInterface, health_check_interval: int = 300): + self.context = context + self.queue = SyncQueue() + self.local_state = LocalState.for_context(context) + self.producer = SyncProducer(context=self.context, queue=self.queue, local_state=self.local_state) + self.consumer = SyncConsumer(context=self.context, queue=self.queue, local_state=self.local_state) + + self.sync_interval = 1 # seconds + self.thread: Optional[Thread] = None + self.is_stop_requested = False + self.sync_run_once = False + self.last_health_check = 0.0 + self.health_check_interval = float(health_check_interval) + + self.setup() + + def setup(self) -> None: + try: + self.local_state.load() + except Exception as e: + raise SyncEnvironmentError(f"Failed to load previous sync state: {e}") from e + + def is_alive(self) -> bool: + return self.thread is not None and self.thread.is_alive() + + def stop(self, blocking: bool = False) -> None: + self.is_stop_requested = True + if blocking and self.thread is not None: + self.thread.join() + + def start(self) -> None: + def _start(manager: SyncManager) -> None: + while not manager.is_stop_requested: + try: + if manager._should_perform_health_check(): + manager.check_server_status() + manager.run_single_thread() + time.sleep(manager.sync_interval) + except FatalSyncError as e: + logger.error(f"Syncing encountered a fatal error. {e}") + break + except Exception as e: + logger.error(f"Syncing encountered an error: {e}. Retrying in {manager.sync_interval} seconds.") + + self.is_stop_requested = False + t = Thread(target=_start, args=(self,), daemon=True) + t.start() + logger.info(f"Sync started, syncing every {self.sync_interval} seconds") + self.thread = t + + def enqueue(self, change: FileChangeInfo) -> None: + self.queue.put(SyncQueueItem(priority=change.get_priority(), data=change)) + + def _should_perform_health_check(self) -> bool: + return time.time() - self.last_health_check > self.health_check_interval + + def check_server_status(self) -> None: + """ + check if the server is still available for syncing, + if the user cannot authenticate, the sync will stop. + + Raises: + FatalSyncError: If the server is not available. + """ + try: + _ = self.context.client.auth.whoami() + logger.debug("Health check succeeded, server is available.") + self.last_health_check = time.time() + except SyftAuthenticationError as e: + # Auth errors will never recover, sync should be stopped + raise FatalSyncError(f"Health check failed, {e}") from e + except Exception as e: + logger.error(f"Health check failed: {e}. Retrying in {self.health_check_interval} seconds.") + + def run_single_thread(self) -> None: + datasite_states = self.producer.get_datasite_states() + logger.debug(f"Syncing {len(datasite_states)} datasites") + + if not self.sync_run_once: + # Download all missing files at the start + self.consumer.download_all_missing(datasite_states=datasite_states) + + for datasite_state in datasite_states: + self.producer.enqueue_datasite_changes(datasite_state) + + # TODO stop consumer if self.is_stop_requested + self.consumer.consume_all() + + self.sync_run_once = True diff --git a/packages/syftbox/syftbox/client/plugins/sync/producer.py b/packages/syftbox/syftbox/client/plugins/sync/producer.py new file mode 100644 index 00000000000..63e65725c07 --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/producer.py @@ -0,0 +1,69 @@ +from loguru import logger + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.plugins.sync.datasite_state import DatasiteState +from syftbox.client.plugins.sync.local_state import LocalState +from syftbox.client.plugins.sync.queue import SyncQueue, SyncQueueItem +from syftbox.client.plugins.sync.types import FileChangeInfo, SyncStatus + + +class SyncProducer: + def __init__(self, context: SyftBoxContextInterface, queue: SyncQueue, local_state: LocalState): + self.context = context + self.queue = queue + self.local_state = local_state + + def get_datasite_states(self) -> list[DatasiteState]: + try: + remote_datasite_states = self.context.client.sync.get_datasite_states() + except Exception as e: + logger.error(f"Failed to retrieve datasites from server, only syncing own datasite. Reason: {e}") + remote_datasite_states = {} + + # Ensure we are always syncing own datasite + if self.context.email not in remote_datasite_states: + remote_datasite_states[self.context.email] = [] + + datasite_states = [ + DatasiteState(self.context, email, remote_state=remote_state) + for email, remote_state in remote_datasite_states.items() + ] + return datasite_states + + def add_ignored_to_local_state(self, datasite: DatasiteState) -> None: + """ + NOTE: to keep logic simple, we do not remove ignored files from the local state here. + Instead, they will be overwritten once the consumer processes the file. + + NOTE: To avoid spammy behaviour symlinks and hidden files are not included in the local state ignore list. + Example: the symlinked apps .venv folders can contain 10k+ files + """ + for path in datasite.get_syftignore_matches(): + prev_status_info = self.local_state.status_info.get(path, None) + # Only add to local state if it's not already ignored previously + is_ignored_previously = prev_status_info is not None and prev_status_info.status == SyncStatus.IGNORED + if not is_ignored_previously: + self.local_state.insert_status_info(path, SyncStatus.IGNORED) + + def enqueue_datasite_changes(self, datasite: DatasiteState) -> None: + """ + Enqueue all out of sync files for the datasite, + and track the ignored files in the local state. + """ + try: + datasite_changes = datasite.get_datasite_changes() + + if len(datasite_changes.permissions) or len(datasite_changes.files): + logger.debug( + f"Enqueuing {len(datasite_changes.permissions)} permissions and {len(datasite_changes.files)} files for {datasite.email}" + ) + except Exception as e: + logger.error(f"Failed to get out of sync files for {datasite.email}. Reason: {e}") + return + for change in datasite_changes.permissions + datasite_changes.files: + self.enqueue(change) + + self.add_ignored_to_local_state(datasite) + + def enqueue(self, change: FileChangeInfo) -> None: + self.queue.put(SyncQueueItem(priority=change.get_priority(), data=change)) diff --git a/packages/syftbox/syftbox/client/plugins/sync/queue.py b/packages/syftbox/syftbox/client/plugins/sync/queue.py new file mode 100644 index 00000000000..ceaecec7b1e --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/queue.py @@ -0,0 +1,44 @@ +import threading +from dataclasses import dataclass, field +from datetime import datetime, timezone +from pathlib import Path +from queue import PriorityQueue +from typing import Dict, Optional + +from syftbox.client.plugins.sync.types import FileChangeInfo + + +@dataclass(order=True) +class SyncQueueItem: + priority: int + data: FileChangeInfo + enqueued_at: datetime = field(default_factory=lambda: datetime.now(tz=timezone.utc)) + + +class SyncQueue: + """ + A thread-safe priority queue that supports deduplication based on the data field. + + Adding an item to the queue that already exists will be ignored, even if the priority is different. + """ + + def __init__(self, maxsize: int = 0): + self.queue: PriorityQueue[SyncQueueItem] = PriorityQueue(maxsize=maxsize) + self.all_items: Dict[Path, SyncQueueItem] = {} + + self.lock = threading.Lock() + + def put(self, item: SyncQueueItem, block: bool = False, timeout: Optional[float] = None) -> None: + with self.lock: + if item.data.path not in self.all_items: + self.queue.put(item, block=block, timeout=timeout) + self.all_items[item.data.path] = item + + def get(self, block: bool = False, timeout: Optional[float] = None) -> SyncQueueItem: + with self.lock: + item: SyncQueueItem = self.queue.get(block=block, timeout=timeout) + self.all_items.pop(item.data.path, None) + return item + + def empty(self) -> bool: + return self.queue.empty() diff --git a/packages/syftbox/syftbox/client/plugins/sync/sync_action.py b/packages/syftbox/syftbox/client/plugins/sync/sync_action.py new file mode 100644 index 00000000000..3f1330ba94d --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/sync_action.py @@ -0,0 +1,367 @@ +import hashlib +import shutil +from abc import ABC, abstractmethod +from pathlib import Path +from typing import ClassVar, Optional + +import py_fast_rsync +from loguru import logger + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.exceptions import SyftPermissionError +from syftbox.client.plugins.sync.constants import MAX_FILE_SIZE_MB +from syftbox.client.plugins.sync.exceptions import SyncValidationError +from syftbox.client.plugins.sync.types import SyncActionType, SyncSide, SyncStatus +from syftbox.lib.constants import REJECTED_FILE_SUFFIX +from syftbox.lib.permissions import SyftPermission +from syftbox.server.models.sync_models import FileMetadata + + +def determine_sync_action( + current_local_metadata: Optional[FileMetadata], + previous_local_metadata: Optional[FileMetadata], + current_remote_metadata: Optional[FileMetadata], +) -> "SyncAction": + """ + Determine the action syncing should take based on the local and remote states, and the previous local state. + + Args: + current_local_metadata (Optional[FileMetadata]): Metadata of the local file, None if it does not exist. + previous_local_metadata (Optional[FileMetadata]): Metadata of the local file when it was last synced, + None if it does not exist. + current_remote_metadata (Optional[FileMetadata]): Metadata of the remote file, None if it does not exist. + + Raises: + ValueError: If the action cannot be determined. + + Returns: + SyncAction: The action to take to sync the local and remote states. + """ + local_modified = current_local_metadata != previous_local_metadata + remote_modified = previous_local_metadata != current_remote_metadata + in_sync = current_remote_metadata == current_local_metadata + conflict = local_modified and remote_modified + # If the remote is modified, the local should be updated (possible conflicts are overwritten) + side_to_update = SyncSide.LOCAL if remote_modified else SyncSide.REMOTE + + local_exists = current_local_metadata is not None + remote_exists = current_remote_metadata is not None + both_exist = local_exists and remote_exists + + action: SyncAction + + if in_sync: + action = NoopAction(local_metadata=current_local_metadata, remote_metadata=current_remote_metadata) # type: ignore[arg-type] + + # Pull changes from remote + elif side_to_update == SyncSide.LOCAL and not local_exists: + action = CreateLocalAction(local_metadata=None, remote_metadata=current_remote_metadata) + elif side_to_update == SyncSide.LOCAL and both_exist: + action = ModifyLocalAction(local_metadata=current_local_metadata, remote_metadata=current_remote_metadata) + elif side_to_update == SyncSide.LOCAL and not remote_exists: + action = DeleteLocalAction(local_metadata=current_local_metadata, remote_metadata=None) + + # Push changes to remote + elif side_to_update == SyncSide.REMOTE and not remote_exists: + action = CreateRemoteAction(local_metadata=current_local_metadata, remote_metadata=None) + elif side_to_update == SyncSide.REMOTE and both_exist: + action = ModifyRemoteAction(local_metadata=current_local_metadata, remote_metadata=current_remote_metadata) + elif side_to_update == SyncSide.REMOTE and not local_exists: + action = DeleteRemoteAction(local_metadata=None, remote_metadata=current_remote_metadata) + else: + raise ValueError("Could not determine sync action") + + logger.debug( + f"path: {action.path}, " + f"local_modified: {local_modified}, " + f"remote_modified: {remote_modified}, " + f"in_sync: {in_sync}, " + f"conflict: {conflict}, " + f"action: {action.action_type.name}" + ) + return action + + +def format_rejected_path(path: Path) -> Path: + return path.with_suffix(REJECTED_FILE_SUFFIX + path.suffix) + + +class SyncAction(ABC): + action_type: ClassVar[SyncActionType] + path: Path + local_metadata: Optional[FileMetadata] + remote_metadata: Optional[FileMetadata] + status: SyncStatus + message: Optional[str] + + def __init_subclass__(cls) -> None: + if not hasattr(cls, "action_type"): + raise TypeError("SyncAction subclasses must define an action_type") + return super().__init_subclass__() + + def __init__(self, local_metadata: Optional[FileMetadata], remote_metadata: Optional[FileMetadata]): + if not local_metadata and not remote_metadata: + raise ValueError("At least one of local_metadata or remote_metadata must be provided") + self.local_metadata = local_metadata + self.remote_metadata = remote_metadata + self.path = local_metadata.path if local_metadata else remote_metadata.path # type: ignore + self.status = SyncStatus.PROCESSING + self.message = None + + @property + def side_to_update(self) -> SyncSide: + return self.action_type.target_side + + def validate(self, context: SyftBoxContextInterface) -> None: + """Raises a SyncValidationError if the action is invalid.""" + validate_sync_action(context, self) + + def is_valid(self, context: SyftBoxContextInterface) -> bool: + try: + self.validate(context) + return True + except SyncValidationError: + return False + + @abstractmethod + def execute(self, context: SyftBoxContextInterface) -> None: + pass + + @abstractmethod + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + pass + + def error(self, exception: Exception) -> None: + self.status = SyncStatus.ERROR + self.message = str(exception) + + @property + def info_message(self) -> str: + return f"Syncing {self.path} with action {self.action_type.name}" + + def is_noop(self) -> bool: + return self.action_type == SyncActionType.NOOP + + @property + def result_local_state(self) -> Optional[FileMetadata]: + """Metadata of the local file after the action is executed successfully.""" + if self.side_to_update == SyncSide.LOCAL: + return self.remote_metadata + return self.local_metadata + + +class NoopAction(SyncAction): + action_type = SyncActionType.NOOP + + def __init__(self, local_metadata: FileMetadata, remote_metadata: FileMetadata) -> None: + super().__init__(local_metadata, remote_metadata) + # noop actions are already synced + self.status = SyncStatus.SYNCED + + def execute(self, context: SyftBoxContextInterface) -> None: + pass + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + pass + + +class CreateLocalAction(SyncAction): + action_type = SyncActionType.CREATE_LOCAL + + def execute(self, context: SyftBoxContextInterface) -> None: + content_bytes = context.client.sync.download(self.path) + abs_path = context.workspace.datasites / self.path + abs_path.parent.mkdir(parents=True, exist_ok=True) + abs_path.write_bytes(content_bytes) + self.status = SyncStatus.SYNCED + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + # Client doesnt have permission, so no new files are downloaded. Action is a noop. + self.status = SyncStatus.REJECTED + self.message = reason + + +class ModifyLocalAction(SyncAction): + action_type = SyncActionType.MODIFY_LOCAL + + def execute(self, context: SyftBoxContextInterface) -> None: + if self.local_metadata is None: + raise ValueError("Local metadata is required for modify local action") + # Use rsync to update the local file with the remote changes + diff = context.client.sync.get_diff(self.path, self.local_metadata.signature) + + abs_path = context.workspace.datasites / self.path + local_data = abs_path.read_bytes() + new_data = py_fast_rsync.apply(local_data, diff.diff_bytes) + new_hash = hashlib.sha256(new_data).hexdigest() + + if new_hash != diff.hash: + # TODO error handling + raise ValueError("Hash mismatch after applying diff") + + # TODO implement safe write with tempfile + rename + abs_path.parent.mkdir(parents=True, exist_ok=True) + abs_path.write_bytes(new_data) + self.status = SyncStatus.SYNCED + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + # Client doesnt have read permission, so we do not apply any diff + # This only happens in rare race-conditions where the permission is revoked after the action is determined. + self.status = SyncStatus.REJECTED + self.message = reason + + +class DeleteLocalAction(SyncAction): + action_type = SyncActionType.DELETE_LOCAL + + def execute(self, context: SyftBoxContextInterface) -> None: + abs_path = context.workspace.datasites / self.path + abs_path.unlink() + self.status = SyncStatus.SYNCED + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + # local delete cannot be rejected by server, this is a noop + pass + + +class CreateRemoteAction(SyncAction): + action_type = SyncActionType.CREATE_REMOTE + + def execute(self, context: SyftBoxContextInterface) -> None: + abs_path = context.workspace.datasites / self.path + data = abs_path.read_bytes() + context.client.sync.create(self.path, data) + self.status = SyncStatus.SYNCED + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + # Attempted upload without permission, the local file is renamed to a rejected file + abs_path = context.workspace.datasites / self.path + rejected_abs_path = format_rejected_path(abs_path) + shutil.move(abs_path, rejected_abs_path) + self.status = SyncStatus.REJECTED + self.message = reason + + +class ModifyRemoteAction(SyncAction): + action_type = SyncActionType.MODIFY_REMOTE + + def execute(self, context: SyftBoxContextInterface) -> None: + abs_path = context.workspace.datasites / self.path + local_data = abs_path.read_bytes() + if self.remote_metadata is None: + raise ValueError("Remote metadata is required for modify remote action") + diff = py_fast_rsync.diff(self.remote_metadata.signature_bytes, local_data) + if self.local_metadata is None: + raise ValueError("Local metadata is required for modify remote action") + context.client.sync.apply_diff( + relative_path=self.path, + diff=diff, + expected_hash=self.local_metadata.hash, + ) + self.status = SyncStatus.SYNCED + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + # Client doesnt have write permission, so the local changes are rejected and reverted to the remote state + abs_path = context.workspace.datasites / self.path + rejected_abs_path = format_rejected_path(abs_path) + shutil.move(abs_path, rejected_abs_path) + + create_local_action = CreateLocalAction(local_metadata=None, remote_metadata=self.remote_metadata) + try: + create_local_action.execute(context) + except SyftPermissionError: + # Could not download the remote file due to lack of permission, + # so only the .rejected file is left locally + create_local_action.process_rejection(context) + self.status = SyncStatus.REJECTED + self.message = reason + + +class DeleteRemoteAction(SyncAction): + action_type = SyncActionType.DELETE_REMOTE + + def execute(self, context: SyftBoxContextInterface) -> None: + context.client.sync.delete(self.path) + self.status = SyncStatus.SYNCED + + def process_rejection(self, context: SyftBoxContextInterface, reason: Optional[str] = None) -> None: + # User does not have permission to delete the remote file, the delete is reverted + create_local_action = CreateLocalAction(local_metadata=None, remote_metadata=self.remote_metadata) + try: + create_local_action.execute(context) + except SyftPermissionError: + # Could not re-download the file due to lack of permissions, + create_local_action.process_rejection(context) + self.status = SyncStatus.REJECTED + self.message = reason + + +def _validate_local_action(context: SyftBoxContextInterface, action: SyncAction) -> None: + if action.action_type in {SyncActionType.DELETE_LOCAL, SyncActionType.NOOP}: + return + + abs_path = context.workspace.datasites / action.path + + # Create/modify local without remote metadata is invalid + if ( + action.action_type in {SyncActionType.CREATE_LOCAL, SyncActionType.MODIFY_LOCAL} + and action.remote_metadata is None + ): + raise SyncValidationError(f"Attempted to sync file {abs_path} to local, but remote file data is missing.") + + # Create/modify local over max file size is invalid + max_size_bytes = MAX_FILE_SIZE_MB * 1024 * 1024 + if ( + action.action_type in {SyncActionType.CREATE_LOCAL, SyncActionType.MODIFY_LOCAL} + and action.remote_metadata is not None + and action.remote_metadata.file_size > max_size_bytes + ): + raise SyncValidationError(f"File {abs_path} is larger than {MAX_FILE_SIZE_MB}MB.") + + +def _validate_remote_action(context: SyftBoxContextInterface, action: SyncAction) -> None: + # No validation needed for delete or noop actions + if action.action_type in {SyncActionType.DELETE_REMOTE, SyncActionType.NOOP}: + return + + abs_path = context.workspace.datasites / action.path + + # Create/modify remote without local metadata is invalid + if ( + action.action_type in {SyncActionType.CREATE_REMOTE, SyncActionType.MODIFY_REMOTE} + and action.local_metadata is None + ): + raise SyncValidationError(f"Attempted to sync file {abs_path} to remote, but local file data is missing.") + + # Create/modify remote over max file size is invalid + max_size_bytes = MAX_FILE_SIZE_MB * 1024 * 1024 + if ( + action.action_type in {SyncActionType.CREATE_REMOTE, SyncActionType.MODIFY_REMOTE} + and action.local_metadata is not None + and action.local_metadata.file_size > max_size_bytes + ): + raise SyncValidationError(f"File {abs_path} is larger than {MAX_FILE_SIZE_MB}MB.") + + # Create/modify with broken permissions is invalid + if ( + action.action_type in {SyncActionType.CREATE_REMOTE, SyncActionType.MODIFY_REMOTE} + and SyftPermission.is_permission_file(abs_path) + and not SyftPermission.is_valid(abs_path, abs_path.parent) # Path does not matter for validation + ): + raise SyncValidationError(f"Encountered invalid permission file {abs_path}.") + + +def validate_sync_action(context: SyftBoxContextInterface, action: SyncAction) -> None: + """ + Validate if the action can be executed. + + Args: + action (SyncAction): The action to validate. + + Raises: + SyncValidationError: If the action is invalid. + """ + if action.side_to_update == SyncSide.LOCAL: + _validate_local_action(context, action) + else: + _validate_remote_action(context, action) diff --git a/packages/syftbox/syftbox/client/plugins/sync/types.py b/packages/syftbox/syftbox/client/plugins/sync/types.py new file mode 100644 index 00000000000..6ad917d71ce --- /dev/null +++ b/packages/syftbox/syftbox/client/plugins/sync/types.py @@ -0,0 +1,64 @@ +from datetime import datetime +from enum import Enum +from pathlib import Path + +from pydantic import BaseModel + +from syftbox.lib.permissions import SyftPermission + +# TODO cleanup duplicate types + + +class SyncStatus(str, Enum): + QUEUED = "queued" + PROCESSING = "processing" + SYNCED = "synced" + ERROR = "error" + REJECTED = "rejected" + IGNORED = "ignored" + + +class SyncSide(str, Enum): + LOCAL = "local" + REMOTE = "remote" + + +class FileChangeInfo(BaseModel): + local_sync_folder: Path + path: Path + side_last_modified: SyncSide + date_last_modified: datetime + file_size: int = 1 + + @property + def local_abs_path(self) -> Path: + return self.local_sync_folder / self.path + + def get_priority(self) -> int: + if SyftPermission.is_permission_file(self.path): + return 0 + else: + return max(1, self.file_size) + + def __lt__(self, other: "FileChangeInfo") -> bool: + return self.path < other.path + + +class SyncActionType(str, Enum): + NOOP = "NOOP" + CREATE_REMOTE = "CREATE_REMOTE" + CREATE_LOCAL = "CREATE_LOCAL" + DELETE_REMOTE = "DELETE_REMOTE" + DELETE_LOCAL = "DELETE_LOCAL" + MODIFY_REMOTE = "MODIFY_REMOTE" + MODIFY_LOCAL = "MODIFY_LOCAL" + + @property + def target_side(self) -> SyncSide: + if self in [ + SyncActionType.CREATE_REMOTE, + SyncActionType.MODIFY_REMOTE, + SyncActionType.DELETE_REMOTE, + ]: + return SyncSide.REMOTE + return SyncSide.LOCAL diff --git a/packages/syftbox/syftbox/client/routers/app_router.py b/packages/syftbox/syftbox/client/routers/app_router.py new file mode 100644 index 00000000000..92a545229d5 --- /dev/null +++ b/packages/syftbox/syftbox/client/routers/app_router.py @@ -0,0 +1,201 @@ +import json +import os +import subprocess +from pathlib import Path + +import yaml +from aiofiles import open as aopen +from fastapi import APIRouter, HTTPException +from fastapi.responses import JSONResponse +from pydantic import BaseModel +from typing_extensions import List + +from syftbox.client.routers.common import APIContext +from syftbox.lib.types import PathLike + +router = APIRouter() + + +def parse_frontmatter(file_path: Path) -> dict: + """ + Parses frontmatter YAML from a README.md file and returns it as a Python dictionary. + + Args: + file_path (str): Path to the README.md file. + + Returns: + dict: The parsed YAML frontmatter as a dictionary. If no frontmatter is found, returns an empty dict. + """ + with open(file_path, "r", encoding="utf-8") as file: + lines = file.readlines() + + # Check for YAML frontmatter boundaries + if lines[0].strip() == "---": + yaml_lines = [] + for line in lines[1:]: + if line.strip() == "---": + break + yaml_lines.append(line) + + # Parse the YAML content + frontmatter = yaml.safe_load("".join(yaml_lines)) + return frontmatter if frontmatter else {} + else: + return {} + + +class AppDetails(BaseModel): + name: str + version: str + source: str + home: str + icon: str + path: str + + +def get_all_apps(apps_dir: PathLike) -> List[AppDetails]: + """ + Get all apps in the given directory. + + Args: + apps_dir (str): Path to the directory containing the apps. + + Returns: + list: A list of AppDetails objects. + """ + apps = [] + for app_dir in Path(apps_dir).iterdir(): + if app_dir.is_dir(): + readme_path = app_dir / "README.md" + if readme_path.exists(): + frontmatter = parse_frontmatter(readme_path) + app = AppDetails( + name=frontmatter.get("name", app_dir.name), + version=frontmatter.get("version", "0.0.1"), + source=frontmatter.get("source", ""), + home=frontmatter.get("home", ""), + icon=frontmatter.get("icon", ""), + path=str(app_dir), + ) + apps.append(app) + + return apps + + +@router.get("/") +async def index(ctx: APIContext) -> JSONResponse: + apps_dir = ctx.workspace.apps + apps = get_all_apps(apps_dir) + + return JSONResponse(content=[app.model_dump() for app in apps]) + + +@router.get("/status/{app_name}") +async def app_details(ctx: APIContext, app_name: str) -> JSONResponse: + apps_dir = ctx.workspace.apps + apps = get_all_apps(apps_dir) + for app in apps: + if app_name == app.name: + return JSONResponse(content=app.model_dump()) + return JSONResponse(status_code=404, content={"message": "App not found"}) + + +class InstallRequest(BaseModel): + source: str + version: str + + +@router.post("/install") +async def install_app(request: InstallRequest) -> JSONResponse: + command = ["syftbox", "app", "install", request.source, "--called-by", "api"] + try: + # Run the command and capture output + result = subprocess.run(command, capture_output=True, text=True, check=True) + print(result.stderr, result.stdout) + # If successful, return JSON payload indicating success + return { + "status": "success", + "message": f"App {request.source} version {request.version} installed successfully.", + "output": result.stdout, + } + + except subprocess.CalledProcessError as e: + # Handle command failure, return JSON with error details + raise HTTPException( + status_code=500, + detail={"status": "error", "message": f"Failed to install app {request.source}.", "output": e.stderr}, + ) + + +@router.post("/command/{app_name}") +async def app_command(ctx: APIContext, app_name: str, request: dict) -> JSONResponse: + apps_dir = ctx.workspace.apps + apps = get_all_apps(apps_dir) + + for app in apps: + if app_name == app.name: + # Convert request dictionary to JSON string and wrap with single quotes for shell + request_json = json.dumps(request) + json_arg = f"--input='{request_json}'" # Wrap entire JSON argument in single quotes + command = f"uv run {app.path}/command.py {json_arg}" # Complete command as a single string + print("command", command) + + # Create env dict with explicit string types + env: dict[str, str] = { + **{k: str(v) for k, v in os.environ.items()}, + "SYFTBOX_CLIENT_CONFIG_PATH": str(ctx.config.path), + } + + try: + # Execute the command with the specified environment + result = subprocess.run(command, check=True, capture_output=True, text=True, shell=True, env=env) + + # Trim the output and attempt to parse it as JSON + trimmed_output = result.stdout.strip() + try: + json_output = json.loads(trimmed_output) + return JSONResponse(content=json_output) + except json.JSONDecodeError: + # Return trimmed output as plain text if not valid JSON + return JSONResponse(content={"output": trimmed_output}) + except subprocess.CalledProcessError as e: + print("error", e) + return JSONResponse(status_code=500, content={"error": e.stderr.strip()}) + + raise HTTPException(status_code=404, detail="App not found") + + +@router.get("/logs/{app_name}") +async def app_logs( + ctx: APIContext, + app_name: str, + limit: int = 256, + offset: int = 0, +) -> JSONResponse: + apps_dir = ctx.workspace.apps + app_dir = Path(apps_dir) / app_name + if not app_dir.is_dir(): + raise HTTPException(status_code=404, detail="App not found") + + logs: List[str] = [] + log_file = app_dir / "logs" / f"{app_name}.log" + try: + if log_file.is_file(): + async with aopen(log_file, "r") as file: + logs = await file.readlines() + + # Calculate pagination indices + total_logs = len(logs) + start_idx = max(0, total_logs - offset - limit) + end_idx = total_logs - offset if offset > 0 else total_logs + logs = logs[start_idx:end_idx] + + return JSONResponse( + content={ + "logs": logs, + "total": total_logs, + "source": str(log_file), + } + ) + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to retrieve logs: {str(e)}") diff --git a/packages/syftbox/syftbox/client/routers/common.py b/packages/syftbox/syftbox/client/routers/common.py new file mode 100644 index 00000000000..dfe25c496cc --- /dev/null +++ b/packages/syftbox/syftbox/client/routers/common.py @@ -0,0 +1,14 @@ +from fastapi import Depends, Request +from typing_extensions import Annotated + +from syftbox.client.base import SyftBoxContextInterface + +__all__ = ["APIContext"] + + +# Create a dependency for typed access to the client +async def get_context(request: Request) -> SyftBoxContextInterface: + return request.app.state.context + + +APIContext = Annotated[SyftBoxContextInterface, Depends(get_context)] diff --git a/packages/syftbox/syftbox/client/routers/datasite_router.py b/packages/syftbox/syftbox/client/routers/datasite_router.py new file mode 100644 index 00000000000..3c64fa1e1e1 --- /dev/null +++ b/packages/syftbox/syftbox/client/routers/datasite_router.py @@ -0,0 +1,26 @@ +# routers/datasite_router.py + +from fastapi import APIRouter, HTTPException +from loguru import logger +from pydantic import BaseModel + +from syftbox.client.routers.common import APIContext + +router = APIRouter() + + +# Don't think we require this Request model until we have +# an endpoint that allows one to create a datasite +class DatasiteRequest(BaseModel): + name: str + + +@router.get("/") +async def list_datasites(ctx: APIContext) -> dict: + """List all available datasites""" + + try: + return {"datasites": ctx.all_datasites} + except Exception as e: + logger.error(f"Error listing datasites: {e}") + raise HTTPException(status_code=500, detail="Failed to list datasites") diff --git a/packages/syftbox/syftbox/client/routers/index_router.py b/packages/syftbox/syftbox/client/routers/index_router.py new file mode 100644 index 00000000000..645d67229e1 --- /dev/null +++ b/packages/syftbox/syftbox/client/routers/index_router.py @@ -0,0 +1,67 @@ +from aiofiles import open as aopen +from fastapi import APIRouter, HTTPException +from fastapi.responses import JSONResponse, PlainTextResponse + +from syftbox import __version__ +from syftbox.client.routers.common import APIContext + +router = APIRouter() + + +@router.get("/") +async def index() -> PlainTextResponse: + return PlainTextResponse(f"SyftBox {__version__}") + + +@router.get("/version") +async def version() -> dict: + return {"version": __version__} + + +@router.get("/logs") +async def get_logs( + context: APIContext, + limit: int = 256, + offset: int = 0, +) -> JSONResponse: + """Get last log lines from the log file. + + Args: + limit: Maximum number of log lines to return + offset: Number of lines to skip from the end of the log file + """ + logs_dir = context.workspace.data_dir / "logs" + + try: + log_files = sorted(logs_dir.glob("syftbox_*.log"), reverse=True) + if not log_files: + return JSONResponse( + content={ + "logs": [], + "total": 0, + } + ) + + last_log_file = log_files[0] + async with aopen(last_log_file, "r") as f: + content = await f.readlines() + + total_logs = len(content) + start_idx = max(total_logs - offset - limit, 0) + end_idx = total_logs - offset if offset > 0 else total_logs + + return JSONResponse( + content={ + "logs": content[start_idx:end_idx], + "total": total_logs, + "source": str(last_log_file), + } + ) + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to retrieve logs: {str(e)}") + + +@router.get("/metadata") +async def metadata(ctx: APIContext) -> dict: + return {"datasite": ctx.email} diff --git a/packages/syftbox/syftbox/client/routers/sync_router.py b/packages/syftbox/syftbox/client/routers/sync_router.py new file mode 100644 index 00000000000..ec29a400a62 --- /dev/null +++ b/packages/syftbox/syftbox/client/routers/sync_router.py @@ -0,0 +1,129 @@ +from pathlib import Path +from typing import List, Optional + +import wcmatch.glob +from fastapi import APIRouter, Depends, HTTPException +from fastapi.responses import HTMLResponse, JSONResponse +from jinja2 import Environment, FileSystemLoader + +from syftbox.client.exceptions import SyftPluginException +from syftbox.client.plugins.sync.local_state import SyncStatusInfo +from syftbox.client.plugins.sync.manager import SyncManager +from syftbox.client.plugins.sync.types import SyncStatus +from syftbox.client.routers.common import APIContext + +router = APIRouter() +jinja_env = Environment(loader=FileSystemLoader("syftbox/assets/templates")) + + +def get_sync_manager(context: APIContext) -> SyncManager: + if context.plugins is None: + raise HTTPException(status_code=500, detail="Plugin manager not initialized") + try: + return context.plugins.sync_manager + except SyftPluginException as e: + raise HTTPException(status_code=500, detail=f"Failed to get sync manager: {e}") + + +def _get_queued_items(sync_manager: SyncManager) -> List[SyncStatusInfo]: + # make copy to avoid changing size during iteration + queued_items = list(sync_manager.queue.all_items.values()) + return [ + SyncStatusInfo( + path=item.data.path, + status=SyncStatus.QUEUED, + timestamp=item.enqueued_at, + ) + for item in queued_items + ] + + +def _get_items_from_localstate(sync_manager: SyncManager) -> List[SyncStatusInfo]: + return list(sync_manager.local_state.status_info.values()) + + +def get_all_status_info(sync_manager: SyncManager) -> List[SyncStatusInfo]: + """ + Return all status info from both the queue and local state. + NOTE: the result might contain duplicates if the same path is present in both. + """ + queued_items = _get_queued_items(sync_manager) + localstate_items = _get_items_from_localstate(sync_manager) + return queued_items + localstate_items + + +def deduplicate_status_info(status_info_list: List[SyncStatusInfo]) -> List[SyncStatusInfo]: + """Deduplicate status info by path, keeping the entry with latest timestamp""" + path_to_info: dict[Path, SyncStatusInfo] = {} + for info in status_info_list: + existing_info = path_to_info.get(info.path) + if not existing_info or info.timestamp > existing_info.timestamp: + path_to_info[info.path] = info + return list(path_to_info.values()) + + +def sort_status_info(status_info_list: List[SyncStatusInfo], order_by: str, order: str) -> List[SyncStatusInfo]: + if order_by.lower() not in SyncStatusInfo.model_fields: + raise HTTPException( + status_code=400, + detail=f"Invalid order_by field: {order_by}. Available fields: {list(SyncStatusInfo.model_fields.keys())}", + ) + if order.lower() not in ["asc", "desc"]: + raise HTTPException(status_code=400, detail=f"Invalid order: {order}, expected 'asc' or 'desc'") + + return list( + sorted( + status_info_list, + key=lambda x: getattr(x, order_by), + reverse=order.lower() == "desc", + ) + ) + + +def filter_by_path_glob(items: List[SyncStatusInfo], pattern: Optional[str]) -> List[SyncStatusInfo]: + if not pattern: + return items + + result = [] + for item in items: + if wcmatch.glob.globmatch(item.path.as_posix(), pattern, flags=wcmatch.glob.GLOBSTAR): + result.append(item) + return result + + +def apply_limit_offset(items: List[SyncStatusInfo], limit: Optional[int], offset: int) -> List[SyncStatusInfo]: + if offset: + items = items[offset:] + if limit: + items = items[:limit] + return items + + +@router.get("/health") +def health_check(sync_manager: SyncManager = Depends(get_sync_manager)) -> JSONResponse: + if not sync_manager.is_alive(): + raise HTTPException(status_code=503, detail="Sync service unavailable") + return JSONResponse(content={"status": "ok"}) + + +@router.get("/state") +def get_status_info( + order_by: str = "timestamp", + order: str = "desc", + path_glob: Optional[str] = None, + sync_manager: SyncManager = Depends(get_sync_manager), + limit: Optional[int] = None, + offset: int = 0, +) -> List[SyncStatusInfo]: + all_items = get_all_status_info(sync_manager) + items_deduplicated = deduplicate_status_info(all_items) + items_filtered = filter_by_path_glob(items_deduplicated, path_glob) + items_sorted = sort_status_info(items_filtered, order_by, order) + items_paginated = apply_limit_offset(items_sorted, limit, offset) + return items_paginated + + +@router.get("/") +def sync_dashboard(context: APIContext) -> HTMLResponse: + template = jinja_env.get_template("sync_dashboard.jinja2") + return HTMLResponse(template.render(base_url=context.config.client_url)) diff --git a/packages/syftbox/syftbox/client/server_client.py b/packages/syftbox/syftbox/client/server_client.py new file mode 100644 index 00000000000..9d6d8985cc2 --- /dev/null +++ b/packages/syftbox/syftbox/client/server_client.py @@ -0,0 +1,180 @@ +import base64 +from pathlib import Path +from typing import Any, Union + +import httpx +import msgpack +from pydantic import BaseModel +from tqdm import tqdm + +from syftbox.client.base import ClientBase +from syftbox.server.models.sync_models import ApplyDiffResponse, DiffResponse, FileMetadata, RelativePath + +# TODO move shared models to lib/models + + +class StreamedFile(BaseModel): + path: RelativePath + content: bytes + + def write_bytes(self, output_dir: Path) -> None: + file_path = output_dir / self.path + file_path.parent.mkdir(parents=True, exist_ok=True) + file_path.write_bytes(self.content) + + +class SyftBoxClient(ClientBase): + def __init__(self, conn: httpx.Client): + super().__init__(conn) + + self.auth = AuthClient(conn) + self.sync = SyncClient(conn) + + def register(self, email: str) -> str: + response = self.conn.post("/register", json={"email": email}) + self.raise_for_status(response) + return response.json().get("token") + + def info(self) -> dict: + response = self.conn.get("/info?client=1") + self.raise_for_status(response) + return response.json() + + def log_analytics_event(self, event_name: str, **kwargs: Any) -> None: + """Log an event to the server""" + event_data = { + "event_name": event_name, + **kwargs, + } + + response = self.conn.post("/log_event", json=event_data) + self.raise_for_status(response) + + +class AuthClient(ClientBase): + def whoami(self) -> Any: + response = self.conn.post("/auth/whoami") + self.raise_for_status(response) + return response.json() + + +class SyncClient(ClientBase): + def get_datasite_states(self) -> dict[str, list[FileMetadata]]: + response = self.conn.post("/sync/datasite_states") + self.raise_for_status(response) + data = response.json() + + result = {} + for email, metadata_list in data.items(): + result[email] = [FileMetadata(**item) for item in metadata_list] + + return result + + def get_remote_state(self, relative_path: Path) -> list[FileMetadata]: + response = self.conn.post("/sync/dir_state", params={"dir": relative_path.as_posix()}) + self.raise_for_status(response) + data = response.json() + return [FileMetadata(**item) for item in data] + + def get_metadata(self, path: Path) -> FileMetadata: + response = self.conn.post("/sync/get_metadata", json={"path": path.as_posix()}) + self.raise_for_status(response) + return FileMetadata(**response.json()) + + def get_diff(self, relative_path: Path, signature: Union[str, bytes]) -> DiffResponse: + """Get rsync-style diff between local and remote file. + + Args: + relative_path: Path to file relative to workspace root + signature: b85 encoded signature of the local file + + Returns: + DiffResponse containing the diff and expected hash + """ + if not isinstance(signature, str): + signature = base64.b85encode(signature).decode("utf-8") + + response = self.conn.post( + "/sync/get_diff", + json={ + "path": relative_path.as_posix(), + "signature": signature, + }, + ) + + self.raise_for_status(response) + return DiffResponse(**response.json()) + + def apply_diff(self, relative_path: Path, diff: Union[str, bytes], expected_hash: str) -> ApplyDiffResponse: + """Apply an rsync-style diff to update a remote file. + + Args: + relative_path: Path to file relative to workspace root + diff: py_fast_rsync binary diff to apply + expected_hash: Expected hash of the file after applying diff, used for verification. + + Returns: + ApplyDiffResponse containing the result of applying the diff + """ + if not isinstance(diff, str): + diff = base64.b85encode(diff).decode("utf-8") + + response = self.conn.post( + "/sync/apply_diff", + json={ + "path": relative_path.as_posix(), + "diff": diff, + "expected_hash": expected_hash, + }, + ) + + self.raise_for_status(response) + return ApplyDiffResponse(**response.json()) + + def delete(self, relative_path: Path) -> None: + response = self.conn.post("/sync/delete", json={"path": relative_path.as_posix()}) + self.raise_for_status(response) + + def create(self, relative_path: Path, data: bytes) -> None: + response = self.conn.post( + "/sync/create", + files={"file": (relative_path.as_posix(), data, "text/plain")}, + ) + self.raise_for_status(response) + + def download(self, relative_path: Path) -> bytes: + response = self.conn.post("/sync/download", json={"path": relative_path.as_posix()}) + self.raise_for_status(response) + return response.content + + def download_files_streaming(self, relative_paths: list[Path], output_dir: Path) -> list[RelativePath]: + if not relative_paths: + return [] + relative_str_paths: list[str] = [Path(path).as_posix() for path in relative_paths] + + pbar = tqdm( + total=len(relative_str_paths), desc="Downloading files", unit="file", mininterval=1.0, dynamic_ncols=True + ) + extracted_files = [] + + with self.conn.stream( + "POST", + "/sync/download_bulk", + json={"paths": relative_str_paths}, + ) as response: + response.raise_for_status() + + unpacker = msgpack.Unpacker( + raw=False, + ) + + for chunk in response.iter_bytes(): + unpacker.feed(chunk) + for file_json in unpacker: + file = StreamedFile.model_validate(file_json) + file.write_bytes(output_dir) + extracted_files.append(file.path) + pbar.update(1) + + pbar.close() + return extracted_files diff --git a/packages/syftbox/syftbox/client/utils/__init__.py b/packages/syftbox/syftbox/client/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/client/utils/dir_tree.py b/packages/syftbox/syftbox/client/utils/dir_tree.py new file mode 100644 index 00000000000..bd51bc6f4a9 --- /dev/null +++ b/packages/syftbox/syftbox/client/utils/dir_tree.py @@ -0,0 +1,24 @@ +from pathlib import Path +from typing import Mapping, Union + +from loguru import logger + +from syftbox.lib.permissions import SyftPermission + +DirTree = Mapping[str, "Union[str, bytes, SyftPermission, DirTree]"] + + +def create_dir_tree(base_path: Path, tree: DirTree) -> None: + logger.debug(f"creating tree at {base_path}") + for name, content in tree.items(): + local_path = base_path / name + + if isinstance(content, str): + local_path.write_text(content) + elif isinstance(content, bytes): + local_path.write_bytes(content) + elif isinstance(content, SyftPermission): + content.save(local_path) + elif isinstance(content, dict): + local_path.mkdir(parents=True, exist_ok=True) + create_dir_tree(local_path, content) diff --git a/packages/syftbox/syftbox/client/utils/display.py b/packages/syftbox/syftbox/client/utils/display.py new file mode 100644 index 00000000000..a2ecffce8d8 --- /dev/null +++ b/packages/syftbox/syftbox/client/utils/display.py @@ -0,0 +1,24 @@ +from pathlib import Path + +from rich.console import Console +from rich.tree import Tree + +from syftbox.lib.constants import PERM_FILE + + +def display_file_tree(root_dir: Path) -> None: + def add_dir(tree: Tree, path: Path) -> None: + for child in path.iterdir(): + if child.is_dir(): + sub_tree = tree.add(f"📁 {child.name}") + add_dir(sub_tree, child) + elif child.name == PERM_FILE: + tree.add(f"🛡️ {child.name}") + else: + tree.add(f"📄 {child.name}") + + console = Console() + file_tree = Tree(f"📁 {root_dir.name}") + add_dir(file_tree, root_dir) + + console.print(file_tree) diff --git a/packages/syftbox/syftbox/client/utils/error_reporting.py b/packages/syftbox/syftbox/client/utils/error_reporting.py new file mode 100644 index 00000000000..eca046acf75 --- /dev/null +++ b/packages/syftbox/syftbox/client/utils/error_reporting.py @@ -0,0 +1,43 @@ +import datetime +import sys +from platform import platform + +import httpx +from httpx._models import Response +from pydantic import AnyHttpUrl, BaseModel, Field + +from syftbox import __version__ +from syftbox.client.env import syftbox_env +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.http import SYFTBOX_HEADERS + + +class ErrorReport(BaseModel): + client_config: SyftClientConfig + client_syftbox_version: str = __version__ + python_version: str = sys.version + platform: str = platform() + timestamp: datetime.datetime = Field(default_factory=lambda: datetime.datetime.now(datetime.timezone.utc)) + env: dict = Field(default=syftbox_env.model_dump()) + + @classmethod + def from_client_config(cls, client_config: SyftClientConfig) -> "ErrorReport": + return cls( + client_config=client_config, + server_version=try_get_server_version(client_config.server_url), + ) + + +def make_error_report(client_config: SyftClientConfig) -> ErrorReport: + return ErrorReport.from_client_config(client_config) + + +def try_get_server_version(server_url: AnyHttpUrl) -> Response: + try: + # do not use the server_client here, as it may not be in bad state + return httpx.get( + f"{server_url}/info?error_report=1", + headers=SYFTBOX_HEADERS, + ).json()["version"] + except Exception: + return None diff --git a/packages/syftbox/syftbox/client/utils/file_manager.py b/packages/syftbox/syftbox/client/utils/file_manager.py new file mode 100644 index 00000000000..30f8574849f --- /dev/null +++ b/packages/syftbox/syftbox/client/utils/file_manager.py @@ -0,0 +1,95 @@ +import os +import subprocess +from typing import Tuple + +from syftbox.lib.platform import OS_IS_WSL2, UNAME + + +def open_dir(folder_path: str) -> Tuple[bool, str]: + """ + Open the specified folder in the default file explorer. + + Args: + folder_path (str): The path to the folder to be opened. + + Returns: + Tuple[bool, str]: A tuple containing a boolean indicating success or failure, + and a message describing the result. + + Description: + This function attempts to open the folder specified by `folder_path` in the + default file explorer of the operating system. It handles different platforms + (Windows, macOS, Linux) and uses platform-specific commands to open the folder. + - On Windows, it uses `explorer`. + - On macOS, it uses `open`. + - On Linux, it uses `xdg-open` or `explorer.exe` in WSL environments. + + If the folder does not exist or an error occurs while attempting to open it, the + function returns `False` along with an appropriate error message. + """ + folder_path = os.path.expanduser(folder_path) + if not os.path.exists(folder_path): + return False, f"Folder does not exist: {folder_path}" + + try: + system_name = UNAME.system + if system_name == "Darwin": + subprocess.run(["open", folder_path]) + elif system_name == "Windows": + subprocess.run(["explorer", folder_path]) + elif system_name == "Linux": + if OS_IS_WSL2: + # Convert the path to Windows format for explorer.exe + windows_path = _convert_to_windows_path(folder_path) + if windows_path: + subprocess.run(["explorer.exe", windows_path]) + else: + return False, "Failed to convert path to Windows format for WSL" + else: + # Use the default Linux file explorer + distro_explorer = _get_linux_file_explorer() + subprocess.run([distro_explorer, folder_path]) + else: + return False, f"Unsupported OS for opening folders: {system_name}" + return True, "Folder opened successfully" + except Exception as e: + return False, str(e) + + +def _convert_to_windows_path(folder_path: str) -> str: + """ + Convert a Linux path to a Windows path in WSL. + + Args: + folder_path (str): The Linux path to be converted. + + Returns: + str: The corresponding Windows path, or an empty string if the conversion fails. + + Description: + This function uses the `wslpath` command to convert a Linux path to a Windows + path when running in a WSL environment. If an error occurs during the + conversion, it returns an empty string. + """ + try: + # Use wslpath to convert the path + result = subprocess.run(["wslpath", "-w", folder_path], capture_output=True, text=True, check=True) + return result.stdout.strip() + except Exception: + return "" + + +def _get_linux_file_explorer() -> str: + """ + Get the default file explorer for Linux distributions. + + Returns: + str: The command used to open folders in the default Linux file explorer. + + Description: + This function returns the command used to open folders in Linux. By default, + it returns "xdg-open", which is commonly available on many Linux + distributions to open files and folders with the default application. + """ + # implement as needed, for now just return xdg-open + return "xdg-open" diff --git a/packages/syftbox/syftbox/client/utils/macos.py b/packages/syftbox/syftbox/client/utils/macos.py new file mode 100644 index 00000000000..aaacf492950 --- /dev/null +++ b/packages/syftbox/syftbox/client/utils/macos.py @@ -0,0 +1,59 @@ +import subprocess +from pathlib import Path + +from typing_extensions import Optional + +from syftbox.lib.types import PathLike + +ASSETS_FOLDER = Path(__file__).parents[2] / "assets" +ICONS_PKG = ASSETS_FOLDER / "icon.zip" + + +# Function to search for Icon\r file +def search_icon_file(src_path: Path) -> Optional[Path]: + if not src_path.exists(): + return None + for file_path in src_path.iterdir(): + if "Icon" in file_path.name and "\r" in file_path.name: + return file_path + return None + + +# if you knew the pain of this function +def find_icon_file(src_path: Path) -> Path: + # First attempt to find the Icon\r file + icon_file = search_icon_file(src_path) + if icon_file: + return icon_file + + if not ICONS_PKG.exists(): + # If still not found, raise an error + raise FileNotFoundError(f"{ICONS_PKG} not found") + + try: + # cant use other zip tools as they don't unpack it correctly + subprocess.run( + ["ditto", "-xk", str(ICONS_PKG), str(src_path.parent)], + check=True, + ) + + # Try to find the Icon\r file again after extraction + icon_file = search_icon_file(src_path) + if icon_file: + return icon_file + + raise FileNotFoundError(f"Icon file not found for {src_path}") + except subprocess.CalledProcessError: + raise RuntimeError("Failed to unzip icon.zip using macOS CLI tool.") + + +def copy_icon_file(icon_folder: PathLike, dest_folder: PathLike) -> None: + dest_path = Path(dest_folder) + icon_path = Path(icon_folder) + src_icon_path = find_icon_file(icon_path) + if not dest_path.exists(): + raise FileNotFoundError(f"Destination folder '{dest_folder}' does not exist.") + + # shutil wont work with these special icon files + subprocess.run(["cp", "-p", src_icon_path, dest_folder], check=True) + subprocess.run(["SetFile", "-a", "C", dest_folder], check=True) diff --git a/packages/syftbox/syftbox/client/utils/net.py b/packages/syftbox/syftbox/client/utils/net.py new file mode 100644 index 00000000000..8cbb0dcba44 --- /dev/null +++ b/packages/syftbox/syftbox/client/utils/net.py @@ -0,0 +1,16 @@ +import socket + + +def is_port_in_use(port: int) -> bool: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("", port)) + return False + except socket.error: + return True + + +def get_free_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + return s.getsockname()[1] diff --git a/packages/syftbox/syftbox/lib/__init__.py b/packages/syftbox/syftbox/lib/__init__.py new file mode 100644 index 00000000000..02e93baf37f --- /dev/null +++ b/packages/syftbox/syftbox/lib/__init__.py @@ -0,0 +1,3 @@ +from syftbox import __version__ +from syftbox.lib.client_shim import Client +from syftbox.lib.permissions import SyftPermission diff --git a/packages/syftbox/syftbox/lib/client_config.py b/packages/syftbox/syftbox/lib/client_config.py new file mode 100644 index 00000000000..640a8fa88da --- /dev/null +++ b/packages/syftbox/syftbox/lib/client_config.py @@ -0,0 +1,140 @@ +import json +import os +import shutil +from pathlib import Path +from typing import Optional, Union + +from pydantic import AliasChoices, AnyHttpUrl, BaseModel, ConfigDict, EmailStr, Field, field_validator +from pydantic.main import IncEx +from pydantic_core import Url +from typing_extensions import Self + +from syftbox.lib.constants import DEFAULT_CONFIG_PATH, DEFAULT_DATA_DIR, DEFAULT_SERVER_URL +from syftbox.lib.exceptions import ClientConfigException +from syftbox.lib.types import PathLike, to_path + +__all__ = ["SyftClientConfig"] + +# env or default +CONFIG_PATH_ENV = "SYFTBOX_CLIENT_CONFIG_PATH" + +# Old configuration file path for the client +LEGACY_CONFIG_NAME = "client_config.json" + + +class SyftClientConfig(BaseModel): + """SyftBox client configuration""" + + # model config + model_config = ConfigDict(extra="ignore", json_encoders={AnyHttpUrl: lambda v: str(v)}) + + data_dir: Path = Field( + validation_alias=AliasChoices("data_dir", "sync_folder"), + default=DEFAULT_DATA_DIR, + description="Local directory where client data is stored", + ) + """Local directory where client data is stored""" + + server_url: AnyHttpUrl = Field( + default=DEFAULT_SERVER_URL, + description="URL of the remote SyftBox server", + ) + """URL of the remote SyftBox server""" + + client_url: AnyHttpUrl = Field( + validation_alias=AliasChoices("client_url", "port"), + description="URL where the client is running", + ) + """URL where the client is running""" + + email: EmailStr = Field(description="Email address of the user") + """Email address of the user""" + + token: Optional[str] = Field( + default=None, description="Depracated: Use access_token instead. API token for the user", deprecated=True + ) + """Depracated: Use access_token instead. API token for the user""" + + access_token: Optional[str] = Field(default=None, description="Access token for the user") + """Access token for the user""" + + # WARN: we don't need `path` to be serialized, hence exclude=True + path: Path = Field(exclude=True, description="Path to the config file") + """Path to the config file""" + + client_timeout: float = Field(default=5, description="Timeout used by the client connection to the SyftBox server") + """Timeout used by the client connection to the SyftBox server""" + + @field_validator("client_url", mode="before") + def port_to_url(cls, val: Union[int, str]) -> Optional[str]: + if isinstance(val, int): + return f"http://127.0.0.1:{val}" + return val + + @field_validator("token", mode="before") + def token_to_str(cls, v: Union[int, str, None]) -> Optional[str]: + if not v: + return None + elif isinstance(v, int): + return str(v) + return v + + def set_server_url(self, server: str) -> None: + self.server_url = Url(server) + + def set_port(self, port: int) -> None: + self.client_url = Url(f"http://127.0.0.1:{port}") + + @classmethod + def load(cls, conf_path: Optional[PathLike] = None) -> Self: + try: + # args or env or default + path = conf_path or os.getenv(CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH) + if path is None: + raise ClientConfigException(f"Config file path not provided or set in env '{CONFIG_PATH_ENV}'") + path = to_path(path) + data = {} + + # todo migration stuff we can remove later + legacy_path = Path(path.parent, LEGACY_CONFIG_NAME) + # prefer to load config.json instead of client_config.json + # initially config.json WILL NOT exist, so we fallback to client_config.json + if path.exists(): + data = json.loads(path.read_text()) + elif legacy_path.exists(): + data = json.loads(legacy_path.read_text()) + path = legacy_path + else: + raise FileNotFoundError(f"Config file not found at '{conf_path}'") + # todo end + + return cls(path=path, **data) + except Exception as e: + raise ClientConfigException(f"Failed to load config from '{conf_path}' - {e}") + + @classmethod + def exists(cls, path: PathLike) -> bool: + return to_path(path).exists() + + def migrate(self) -> Self: + """Explicit call to migrate the config file""" + + # if we loaded the legacy config, we need to move it to new config + if self.path.name == LEGACY_CONFIG_NAME: + new_path = Path(self.path.parent, DEFAULT_CONFIG_PATH.name) + shutil.move(str(self.path), str(new_path)) + self.path = new_path + self.save() + + return self + + def as_dict(self, exclude: Optional[IncEx] = None) -> dict: + return self.model_dump(exclude=exclude, exclude_none=True, warnings="none") + + def as_json(self, indent: int = 4) -> str: + return self.model_dump_json(indent=indent, exclude_none=True, warnings="none") + + def save(self) -> Self: + self.path.parent.mkdir(parents=True, exist_ok=True) + self.path.write_text(self.as_json()) + return self diff --git a/packages/syftbox/syftbox/lib/client_shim.py b/packages/syftbox/syftbox/lib/client_shim.py new file mode 100644 index 00000000000..20faa54849a --- /dev/null +++ b/packages/syftbox/syftbox/lib/client_shim.py @@ -0,0 +1,108 @@ +""" +SyftBox Client Shim for apps and external dependencies + + +NOTE: this will likely get refactored as it's own SDK. +But we need it to maintain compatibility with apps +""" + +from pathlib import Path + +from pydantic import EmailStr +from typing_extensions import Optional, Self + +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.types import PathLike, to_path +from syftbox.lib.workspace import SyftWorkspace + +# this just makes it a bit clear what the default is for the api_data() method +CURRENT_API_REQUEST_NAME = None +MY_DATASITE = None + + +class Client: + """ + Client shim for SyftBox Apps + + Minimal set of properties and methods exposed to the apps. + """ + + def __init__(self, conf: SyftClientConfig): + self.config = conf + self.workspace = SyftWorkspace(self.config.data_dir) + + @property + def email(self) -> EmailStr: + """Email of the current user""" + return self.config.email + + @property + def config_path(self) -> Path: + """Path to the config of the current user""" + return self.config.path + + @property + def my_datasite(self) -> Path: + """Path to the datasite of the current user""" + return self.workspace.datasites / self.config.email + + @property + def datasites(self) -> Path: + """Path to the datasites folder""" + return self.workspace.datasites + + @property + def sync_folder(self) -> Path: + """Deprecated property use `client.datasites` instead""" + return self.workspace.datasites + + @property + def datasite_path(self) -> Path: + """Deprecated property. Use `client.my_datasite` instead""" + return self.workspace.datasites / self.config.email + + @classmethod + def load(cls, filepath: Optional[PathLike] = None) -> Self: + """ + Load the client configuration from the given file path or env var or default location + Raises: ClientConfigException + """ + return cls(conf=SyftClientConfig.load(filepath)) + + @property + def api_request_name(self) -> str: + """Returns the name of root directory of the API request calling this property. + + Use this property instead of hardcoding your API request's directory name, + as SyftBox may dynamically change it to prevent conflicts. + """ + # The below works coz we set the cwd to the app's path before executing run.sh (see find_and_run_script method) + api_path = Path.cwd() + api_name = api_path.name + return api_name + + def api_data( + self, api_request_name: Optional[str] = CURRENT_API_REQUEST_NAME, datasite: Optional[str] = MY_DATASITE + ) -> Path: + """ + Gets the filesystem path to an application's API data directory for a specific datasite. + + Args: + api_request_name (Optional[str], default=CURRENT_API_REQUEST_NAME): The name of the API request + whose API data path is needed. + If None, defaults to the name of the API request from which this method is being called. + datasite (Optional[str], default=MY_DATASITE): The datasite's email. + If None, defaults to the current user's configured email. + + Returns: + Path: A filesystem path pointing to '/datasites//api_data/'. + """ + api_request_name = api_request_name or self.api_request_name + datasite = datasite or self.config.email + return self.workspace.datasites / datasite / "api_data" / api_request_name + + def makedirs(self, *paths: PathLike) -> None: + """Create directories""" + + for path in paths: + to_path(path).mkdir(parents=True, exist_ok=True) diff --git a/packages/syftbox/syftbox/lib/constants.py b/packages/syftbox/syftbox/lib/constants.py new file mode 100644 index 00000000000..69eb3427859 --- /dev/null +++ b/packages/syftbox/syftbox/lib/constants.py @@ -0,0 +1,30 @@ +from pathlib import Path + +# Default port for the SyftBox client +DEFAULT_PORT = 8080 + +# Default SyftBox cache server URL for the client +DEFAULT_SERVER_URL = "https://syftbox.openmined.org" + +# Default configuration directory for the client +DEFAULT_CONFIG_DIR = Path(Path.home(), ".syftbox") + +# Default configuration file path for the client +DEFAULT_CONFIG_PATH = Path(DEFAULT_CONFIG_DIR, "config.json") + +# Default logs directory for the client +DEFAULT_LOGS_DIR = Path(DEFAULT_CONFIG_DIR, "logs") + +# Default data directory for the client +DEFAULT_DATA_DIR = Path(Path.home(), "SyftBox") + +# Permissions file name +PERM_FILE = "syftperm.yaml" + +# Rejected files client-side +REJECTED_FILE_SUFFIX = ".syftrejected" + +SENDGRID_API_URL = "https://api.sendgrid.com/v3/mail/send" + +# Default benchmark runs +DEFAULT_BENCHMARK_RUNS = 5 diff --git a/packages/syftbox/syftbox/lib/datasite.py b/packages/syftbox/syftbox/lib/datasite.py new file mode 100644 index 00000000000..e4ecf10e365 --- /dev/null +++ b/packages/syftbox/syftbox/lib/datasite.py @@ -0,0 +1,40 @@ +from loguru import logger + +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.exceptions import SyftBoxException +from syftbox.lib.ignore import create_default_ignore_file +from syftbox.lib.lib import SyftBoxContext +from syftbox.lib.permissions import SyftPermission + +PUBLIC_DIR = "public" + + +def create_datasite(context: SyftBoxContext) -> None: + # Create workspace/datasites/.syftignore + create_default_ignore_file(context.workspace.datasites) + + user_root = context.my_datasite + user_public_dir = user_root / PUBLIC_DIR + + # Create perm file for the datasite + if not user_root.is_dir(): + try: + logger.info(f"creating datasite at {user_root}") + user_root.mkdir(parents=True, exist_ok=True) + perms = SyftPermission.datasite_default(context, user_root) + perms.save(user_root / PERM_FILE) + except Exception as e: + # this is a problematic scenario - probably because you can't setup the basic + # datasite structure. So, we should probably just exit here. + raise SyftBoxException(f"Failed to initialize datasite - {e}") from e + + if not user_public_dir.is_dir(): + try: + logger.info(f"creating public dir in datasite at {user_public_dir}") + user_public_dir.mkdir(parents=True, exist_ok=True) + perms = SyftPermission.mine_with_public_read(context, dir=user_public_dir) + perms.save(user_public_dir / PERM_FILE) + except Exception as e: + # not a big deal if we can't create the public folder + # more likely that the above step fails than this + logger.exception("Failed to create folder with public perms", e) diff --git a/packages/syftbox/syftbox/lib/debug.py b/packages/syftbox/syftbox/lib/debug.py new file mode 100644 index 00000000000..8138d479ac5 --- /dev/null +++ b/packages/syftbox/syftbox/lib/debug.py @@ -0,0 +1,80 @@ +import os +import platform +import shutil +import sys +from pathlib import PurePath +from typing import Any, Optional, Union +from venv import logger + +import psutil + +from syftbox import __version__ +from syftbox.app.manager import list_app +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.exceptions import ClientConfigException +from syftbox.lib.platform import OS_ARCH, OS_NAME, OS_VERSION +from syftbox.lib.types import PathLike +from syftbox.lib.workspace import SyftWorkspace + + +def debug_report(config_path: Optional[PathLike] = None) -> dict: + client_config: Optional[Union[SyftClientConfig, dict]] = None + apps = [] + app_dir = None + try: + client_config = SyftClientConfig.load(config_path) + workspace = SyftWorkspace(client_config.data_dir) + result = list_app(workspace) + app_dir = result.apps_dir + apps = [app.name for app in result.apps] + client_config = client_config.as_dict(exclude={"token", "access_token"}) + except ClientConfigException: + pass + except Exception as e: + logger.exception("Error loading client config", e) + pass + + syftbox_path = shutil.which("syftbox") + + return { + "system": { + "os": { + "name": OS_NAME, + "version": OS_VERSION, + "arch": OS_ARCH, + }, + "python": { + "version": platform.python_version(), + "binary_location": sys.executable, + }, + "resources": { + "cpus": psutil.cpu_count(logical=True), + "architecture": platform.machine(), + "ram": f"{psutil.virtual_memory().total / (1024**3):.2f} GB", + }, + }, + "syftbox": { + "version": __version__, + "command": syftbox_path or "syftbox executable not found in PATH", + "client_config_path": config_path, + "client_config": client_config, + "apps_dir": app_dir, + "apps": apps, + }, + "syftbox_env": {key: value for key, value in os.environ.items() if key.startswith("SYFT")}, + } + + +def debug_report_yaml(config_path: Optional[PathLike] = None) -> str: + import yaml + from pydantic_core import Url + + def str_representer(dumper: yaml.Dumper, val: Any) -> yaml.ScalarNode: + return dumper.represent_scalar("tag:yaml.org,2002:str", str(val)) + + # Register the custom representers + yaml.add_multi_representer(PurePath, str_representer) + yaml.add_representer(Url, str_representer) + + report = debug_report(config_path) + return yaml.dump(report, default_flow_style=False, sort_keys=False) diff --git a/packages/syftbox/syftbox/lib/email.py b/packages/syftbox/syftbox/lib/email.py new file mode 100644 index 00000000000..6965aa84bd8 --- /dev/null +++ b/packages/syftbox/syftbox/lib/email.py @@ -0,0 +1,174 @@ +from typing import Optional + +import httpx +from jinja2 import Template +from loguru import logger + +from syftbox.lib.constants import SENDGRID_API_URL +from syftbox.server.settings import ServerSettings + +SENDER_EMAIL = "SyftBox " + +token_email_template = """ + + + + + + +
    +

    Welcome!

    +

    + Use the following token in your CLI to complete your registration: +

    + {{ token }} +

    If you did not request this, please ignore this email.

    +
    + + +""" + +reset_password_token_email_template = """ + + + + + + +
    +

    Hello!

    +

    + Use the following command in your CLI to reset your password: +

    + syftbox reset-password --email {{ email }} --token {{ token }} +

    If you did not request this, please ignore this email.

    +
    + + +""" + + +def send_token_email(server_settings: ServerSettings, user_email: str, token: str) -> None: + template = Template(token_email_template) + body = template.render(email=user_email, token=token) + send_email( + server_settings=server_settings, + receiver_email=user_email, + subject="SyftBox Token", + body=body, + mimetype="text/html", + ) + + +def send_email( + server_settings: ServerSettings, + receiver_email: str, + subject: str, + body: str, + mimetype: str = "text/html", +) -> Optional[dict]: + payload = { + "personalizations": [{"to": [{"email": receiver_email}]}], + "from": {"email": SENDER_EMAIL}, + "subject": subject, + "content": [{"type": mimetype, "value": body}], + } + + if server_settings.sendgrid_secret is None: + raise ValueError("Sendgrid secret is not configured") + + headers = { + "Authorization": f"Bearer {server_settings.sendgrid_secret.get_secret_value()}", + "Content-Type": "application/json", + } + + try: + response = httpx.post(SENDGRID_API_URL, json=payload, headers=headers, timeout=10.0) + response.raise_for_status() + logger.info(f"Email sent to {receiver_email}") + return {"success": True, "status_code": response.status_code} + except httpx.HTTPError as e: + logger.error(str(e)) + return None diff --git a/packages/syftbox/syftbox/lib/exceptions.py b/packages/syftbox/syftbox/lib/exceptions.py new file mode 100644 index 00000000000..08bb2fb74b0 --- /dev/null +++ b/packages/syftbox/syftbox/lib/exceptions.py @@ -0,0 +1,6 @@ +class SyftBoxException(Exception): + pass + + +class ClientConfigException(SyftBoxException): + pass diff --git a/packages/syftbox/syftbox/lib/hash.py b/packages/syftbox/syftbox/lib/hash.py new file mode 100644 index 00000000000..57e969f1621 --- /dev/null +++ b/packages/syftbox/syftbox/lib/hash.py @@ -0,0 +1,106 @@ +import base64 +import hashlib +from concurrent.futures import ProcessPoolExecutor +from datetime import datetime, timezone +from functools import partial +from pathlib import Path +from typing import Optional, Union + +from loguru import logger +from py_fast_rsync import signature + +from syftbox.lib.ignore import filter_ignored_paths +from syftbox.server.models.sync_models import FileMetadata + + +def hash_file(file_path: Path, root_dir: Optional[Path] = None) -> Optional[FileMetadata]: + # ignore files larger then 100MB + try: + if file_path.stat().st_size > 100_000_000: + logger.warning("File too large: %s", file_path) + return None + + with open(file_path, "rb") as f: + # not ideal for large files + # but py_fast_rsync does not support files yet. + # TODO: add support for streaming hashing + data = f.read() + + if root_dir is None: + path = file_path + else: + path = file_path.relative_to(root_dir) + return FileMetadata( + path=path, + hash=hashlib.sha256(data).hexdigest(), + signature=base64.b85encode(signature.calculate(data)), + file_size=len(data), + last_modified=datetime.fromtimestamp(file_path.stat().st_mtime, timezone.utc), + ) + except Exception: + logger.error(f"Failed to hash file {file_path}") + return None + + +def hash_files_parallel(files: list[Path], root_dir: Path) -> list[FileMetadata]: + with ProcessPoolExecutor() as executor: + results = list(executor.map(partial(hash_file, root_dir=root_dir), files)) + return [r for r in results if r is not None] + + +def hash_files(files: list[Path], root_dir: Path) -> list[FileMetadata]: + result = [hash_file(file, root_dir) for file in files] + return [r for r in result if r is not None] + + +def hash_dir( + dir: Path, + root_dir: Path, + filter_ignored: bool = True, +) -> list[FileMetadata]: + """ + hash all files in dir recursively, return a list of FileMetadata. + + ignore_folders should be relative to root_dir. + returned Paths are relative to root_dir. + """ + files = collect_files(dir) + + relative_paths = [file.relative_to(root_dir) for file in files] + if filter_ignored: + relative_paths = filter_ignored_paths(root_dir, relative_paths) + + absolute_paths = [root_dir / file for file in relative_paths] + return hash_files(absolute_paths, root_dir) + + +def collect_files( + dir: Union[Path, str], + include_hidden: bool = False, + follow_symlinks: bool = False, +) -> list[Path]: + """Collect files recursively, excluding files in hidden/symlinked directories unless specified.""" + dir = Path(dir) + if not dir.is_dir(): + return [] + + files: list[Path] = [] + for entry in dir.iterdir(): + try: + # Skip hidden entries + if not include_hidden and entry.name.startswith("."): + continue + + # Skip symlinked entries + if not follow_symlinks and entry.is_symlink(): + continue + + if entry.is_file(): + files.append(entry) + elif entry.is_dir(): + files.extend(collect_files(entry, include_hidden, follow_symlinks)) + + except OSError: + continue + + return files diff --git a/packages/syftbox/syftbox/lib/http.py b/packages/syftbox/syftbox/lib/http.py new file mode 100644 index 00000000000..45356341a23 --- /dev/null +++ b/packages/syftbox/syftbox/lib/http.py @@ -0,0 +1,20 @@ +from syftbox import __version__ +from syftbox.lib.platform import OS_ARCH, OS_NAME, OS_VERSION, PYTHON_VERSION + +# keep these as bytes as otel hooks return headers as bytes +HEADER_SYFTBOX_VERSION = "x-syftbox-version" +HEADER_SYFTBOX_USER = "x-syftbox-user" +HEADER_SYFTBOX_PYTHON = "x-syftbox-python" +HEADER_OS_NAME = "x-os-name" +HEADER_OS_VERSION = "x-os-ver" +HEADER_OS_ARCH = "x-os-arch" +# HEADER_GEO_COUNTRY = "x-geo-country" # Country of the user, added by Azure Front Door + +SYFTBOX_HEADERS = { + "User-Agent": f"SyftBox/{__version__} (Python {PYTHON_VERSION}; {OS_NAME} {OS_VERSION}; {OS_ARCH})", + HEADER_SYFTBOX_VERSION: __version__, + HEADER_SYFTBOX_PYTHON: PYTHON_VERSION, + HEADER_OS_NAME: OS_NAME, + HEADER_OS_VERSION: OS_VERSION, + HEADER_OS_ARCH: OS_ARCH, +} diff --git a/packages/syftbox/syftbox/lib/ignore.py b/packages/syftbox/syftbox/lib/ignore.py new file mode 100644 index 00000000000..3812421a8ef --- /dev/null +++ b/packages/syftbox/syftbox/lib/ignore.py @@ -0,0 +1,183 @@ +from pathlib import Path +from typing import Optional + +import pathspec +from loguru import logger + +from syftbox.lib.constants import REJECTED_FILE_SUFFIX +from syftbox.lib.types import PathLike, to_path + +IGNORE_FILENAME = "_.syftignore" + +DEFAULT_IGNORE = """ +# Syft +/_.syftignore +/.syft* +/apps +/staging +/syft_changelog + +# Python +.ipynb_checkpoints/ +__pycache__/ +*.py[cod] +.venv/ + +# OS-specific +.DS_Store +Icon + +# IDE/Editor-specific +*.swp +*.swo +.vscode/ +.idea/ +*.iml + +# General excludes +*.tmp + +# excluded datasites +# example: +# /user_to_exclude@example.com/ +""" + + +def create_default_ignore_file(dir: Path) -> None: + """Create a default _.syftignore file in the dir""" + ignore_file = to_path(dir) / IGNORE_FILENAME + if not ignore_file.is_file(): + logger.info(f"Creating default ignore file: {ignore_file}") + ignore_file.parent.mkdir(parents=True, exist_ok=True) + ignore_file.write_text(DEFAULT_IGNORE) + + +def get_ignore_rules(dir: Path) -> Optional[pathspec.PathSpec]: + """Get the ignore rules from the _.syftignore file in the dir""" + ignore_file = to_path(dir) / IGNORE_FILENAME + if ignore_file.is_file(): + with open(ignore_file) as f: + lines = f.readlines() + return pathspec.PathSpec.from_lines("gitwildmatch", lines) + return None + + +def is_within_symlinked_path(path: Path, datasites_dir: PathLike) -> bool: + """ + Returns True if the path is within a symlinked path. + + Symlinks are checked up to the datasites_dir. + """ + base_dir = to_path(datasites_dir) + for parent in path.parents: + if parent == base_dir: + break + if parent.is_symlink(): + return True + return False + + +def is_symlinked_file(abs_path: Path, datasites_dir: PathLike) -> bool: + """True if this file is a symlink, or is inside a symlinked directory (recursive)""" + return abs_path.is_symlink() or is_within_symlinked_path(abs_path, datasites_dir) + + +def filter_symlinks(datasites_dir: Path, relative_paths: list[Path]) -> list[Path]: + result = [] + for path in relative_paths: + abs_path = datasites_dir / path + + if not is_symlinked_file(abs_path, datasites_dir): + result.append(path) + return result + + +def filter_hidden_files(relative_paths: list[Path]) -> list[Path]: + result = [] + for path in relative_paths: + if not any(part.startswith(".") for part in path.parts): + result.append(path) + return result + + +def _is_rejected_file(path: Path) -> bool: + return REJECTED_FILE_SUFFIX in path.name + + +def filter_rejected_files(relative_paths: list[Path]) -> list[Path]: + result = [] + for path in relative_paths: + if not _is_rejected_file(path): + result.append(path) + return result + + +def filter_ignored_paths( + datasites_dir: Path, + relative_paths: list[Path], + ignore_hidden_files: bool = True, + ignore_symlinks: bool = True, + ignore_rejected_files: bool = True, +) -> list[Path]: + """ + Filter out paths that are ignored. Ignore rules: + - By default hidden files, or files within hidden directories are ignored. + - By default symlinks are ignored, or files within symlinked directories are ignored. + - files that match the ignore rules in the _.syftignore file are ignored. + + Args: + datasites_dir (Path): Directory containing datasites. + relative_paths (list[Path]): List of relative paths to filter. Paths are relative to datasites_dir. + ignore_hidden_files (bool, optional): If True, all hidden files and directories are filtered. Defaults to True. + ignore_symlinks (bool, optional): if True, all symlinked files and folders are filtered. Defaults to True. + + Returns: + list[Path]: List of filtered relative paths. + """ + + if ignore_hidden_files: + relative_paths = filter_hidden_files(relative_paths) + + if ignore_symlinks: + relative_paths = filter_symlinks(datasites_dir, relative_paths) + + if ignore_rejected_files: + relative_paths = filter_rejected_files(relative_paths) + + ignore_rules = get_ignore_rules(datasites_dir) + if ignore_rules is None: + return relative_paths + + filtered_paths = [] + for path in relative_paths: + if not ignore_rules.match_file(path): + filtered_paths.append(path) + + return filtered_paths + + +def get_syftignore_matches( + datasites_dir: Path, + relative_paths: list[Path], + include_symlinks: bool = False, +) -> list[Path]: + """ + Get the paths that match the ignore rules in the _.syftignore file. + If include_symlinks is False, symlinks are ignored. + """ + + ignore_rules = get_ignore_rules(datasites_dir) + if ignore_rules is None: + return [] + + filtered_paths = [] + for path in relative_paths: + abs_path = datasites_dir / path + if not include_symlinks and is_symlinked_file(abs_path, datasites_dir): + continue + elif ignore_rules.match_file(path): + filtered_paths.append(path) + elif _is_rejected_file(path): + filtered_paths.append(path) + + return filtered_paths diff --git a/packages/syftbox/syftbox/lib/lib.py b/packages/syftbox/syftbox/lib/lib.py new file mode 100644 index 00000000000..2779a26d133 --- /dev/null +++ b/packages/syftbox/syftbox/lib/lib.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import TYPE_CHECKING + +from typing_extensions import Any, Iterator, Self, Union + +if TYPE_CHECKING: + pass + +SyftBoxContext = Union["Client", "SyftClientInterface"] # type: ignore[name-defined] + +USER_GROUP_GLOBAL = "GLOBAL" + +ICON_FILE = "Icon" # special +IGNORE_FILES: list = [] + + +def is_primitive_json_serializable(obj: Any) -> bool: + if isinstance(obj, (str, int, float, bool, type(None))): + return True + return False + + +def pack(obj: Any) -> Any: + if is_primitive_json_serializable(obj): + return obj + + if hasattr(obj, "to_dict"): + return obj.to_dict() + + if isinstance(obj, list): + return [pack(val) for val in obj] + + if isinstance(obj, dict): + return {k: pack(v) for k, v in obj.items()} + + if isinstance(obj, Path): + return str(obj) + + raise Exception(f"Unable to pack type: {type(obj)} value: {obj}") + + +class Jsonable: + def to_dict(self) -> dict: + output = {} + for k, v in self.__dict__.items(): + if k.startswith("_"): + continue + output[k] = pack(v) + return output + + def __iter__(self) -> Iterator[tuple[str, Any]]: + for key, val in self.to_dict().items(): + if key.startswith("_"): + yield key, val + + def __getitem__(self, key: str) -> Any: + if key.startswith("_"): + return None + return self.to_dict()[key] + + @classmethod + def load(cls, file_or_bytes: Union[str, Path, bytes]) -> Self: + data: Union[str, bytes] + try: + if isinstance(file_or_bytes, (str, Path)): + with open(file_or_bytes) as f: + data = f.read() + else: + data = file_or_bytes + d = json.loads(data) + return cls(**d) + except Exception as e: + raise e + + def save(self, filepath: str) -> None: + d = self.to_dict() + with open(Path(filepath).expanduser(), "w") as f: + f.write(json.dumps(d)) + + +def get_datasites(sync_folder: Union[str, Path]) -> list[str]: + sync_folder = str(sync_folder.resolve()) if isinstance(sync_folder, Path) else sync_folder + datasites = [] + folders = os.listdir(sync_folder) + for folder in folders: + if "@" in folder: + datasites.append(folder) + return datasites diff --git a/packages/syftbox/syftbox/lib/permissions.py b/packages/syftbox/syftbox/lib/permissions.py new file mode 100644 index 00000000000..43f14053a66 --- /dev/null +++ b/packages/syftbox/syftbox/lib/permissions.py @@ -0,0 +1,460 @@ +import json +import re +import sqlite3 +import traceback +from collections import defaultdict +from enum import Enum +from pathlib import Path +from typing import List, Optional, Tuple, Union + +import wcmatch +import yaml +from loguru import logger +from pydantic import BaseModel, model_validator +from wcmatch.glob import globmatch + +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.lib import SyftBoxContext +from syftbox.lib.types import PathLike +from syftbox.server.models.sync_models import RelativePath + + +# TODO "Client" naming for SDK is confusing, it is a context for the syftbox lib +# util +def issubpath(path1: RelativePath, path2: RelativePath) -> bool: + return path1 in path2.parents + + +class PermissionType(Enum): + CREATE = 1 + READ = 2 + WRITE = 3 + ADMIN = 4 + + +class PermissionParsingError(Exception): + pass + + +class PermissionRule(BaseModel): + dir_path: RelativePath # where does this permfile live + path: str # what paths does it apply to (e.g. **/*.txt) + user: str # can be *, + allow: bool = True + permissions: List[PermissionType] # read/write/create/admin + priority: int + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PermissionRule): + return NotImplemented + return self.model_dump() == other.model_dump() + + @property + def permfile_path(self) -> Path: + return self.dir_path / PERM_FILE + + @property + def depth(self) -> int: + return len(self.permfile_path.parts) + + # write model validator that accepts either a single string or a list of strings as permissions when initializing + + @model_validator(mode="before") + @classmethod + def validate_permissions(cls, values: dict) -> dict: + # check if values only contains keys that are in the model + invalid_keys = set(values.keys()) - (set(cls.model_fields.keys()) | set(["type"])) + if len(invalid_keys) > 0: + raise PermissionParsingError( + f"rule yaml contains invalid keys {invalid_keys}, only {cls.model_fields.keys()} are allowed" + ) + + # add that if the type value is "disallow" we set allow to false + if values.get("type") == "disallow": + values["allow"] = False + + # if path refers to a location higher in the directory tree than the current file, raise an error + path = values.get("path") + if path and path.startswith("../"): + raise PermissionParsingError( + f"path {path} refers to a location higher in the directory tree than the current file" + ) + + # if user is not a valid email, or *, raise an error + email = values.get("user", "") + is_valid_email = re.match(r"[^@]+@[^@]+", email or "") + if email != "*" and not is_valid_email: + raise PermissionParsingError(f"user {values.get('user')} is not a valid email or *") + + # listify permissions + perms = values.get("permissions") + if isinstance(perms, str): + perms = [perms] + if isinstance(perms, list): + values["permissions"] = [PermissionType[p.upper()] if isinstance(p, str) else p for p in perms] + else: + raise ValueError(f"permissions should be a list of strings or a single string, received {type(perms)}") + + path = values.get("path") + if path and "**" in path and "{useremail}" in path and path.index("**") < path.rindex("{useremail}"): + # this would make creating the path2rule mapping more challenging to compute beforehand + raise PermissionParsingError("** can never be after {useremail}") + + return values + + @classmethod + def from_rule_dict(cls, dir_path: RelativePath, rule_dict: dict, priority: int) -> "PermissionRule": + # initialize from dict + return cls(dir_path=dir_path, **rule_dict, priority=priority) + + @classmethod + def from_db_row(cls, row: sqlite3.Row) -> "PermissionRule": + """Create a PermissionRule from a database row""" + permissions = [] + if row["can_read"]: + permissions.append(PermissionType.READ) + if row["can_create"]: + permissions.append(PermissionType.CREATE) + if row["can_write"]: + permissions.append(PermissionType.WRITE) + if row["admin"]: + permissions.append(PermissionType.ADMIN) + + return cls( + dir_path=Path(row["permfile_path"]).parent, + path=row["path"], + user=row["user"], # Default to all users since DB schema doesn't show user field + allow=not row["disallow"], + priority=row["priority"], + permissions=permissions, + ) + + def to_db_row(self) -> dict: + """Convert PermissionRule to a database row dictionary""" + return { + "permfile_path": str(self.permfile_path), # Reconstruct full path + "permfile_dir": str(self.dir_path), + "permfile_depth": self.depth, + "priority": self.priority, + "path": self.path, + "user": self.user, + "can_read": PermissionType.READ in self.permissions, + "can_create": PermissionType.CREATE in self.permissions, + "can_write": PermissionType.WRITE in self.permissions, + "admin": PermissionType.ADMIN in self.permissions, + "disallow": not self.allow, + } + + @property + def permission_dict(self) -> dict: + return { + "read": PermissionType.READ in self.permissions, + "create": PermissionType.CREATE in self.permissions, + "write": PermissionType.WRITE in self.permissions, + "admin": PermissionType.ADMIN in self.permissions, + } + + def as_file_json(self) -> dict: + res = { + "path": self.path, + "user": self.user, + "permissions": [p.name.lower() for p in self.permissions], + } + if not self.allow: + res["type"] = "disallow" + return res + + def filepath_matches_rule_path(self, filepath: Path) -> Tuple[bool, Optional[str]]: + if issubpath(self.dir_path, filepath): + relative_file_path = filepath.relative_to(self.dir_path) + else: + return False, None + + match_for_email = None + if self.has_email_template: + match = False + emails_in_file_path = [ + part for part in str(relative_file_path).split("/") if "@" in part + ] # todo: improve this + for email in emails_in_file_path: + if globmatch( + str(relative_file_path), + self.path.replace("{useremail}", email), + flags=wcmatch.glob.GLOBSTAR, + ): + match = True + match_for_email = email + break + else: + match = globmatch(str(relative_file_path), self.path, flags=wcmatch.glob.GLOBSTAR) + return match, match_for_email + + @property + def has_email_template(self) -> bool: + return "{useremail}" in self.path + + def resolve_path_pattern(self, email: str) -> str: + return self.path.replace("{useremail}", email) + + +class SyftPermission(BaseModel): + relative_filepath: RelativePath + rules: List[PermissionRule] + + def save(self, path: Path) -> None: + if path.is_dir(): + path = path / PERM_FILE + with open(path, "w") as f: + yaml.dump([x.as_file_json() for x in self.rules], f) + + def ensure(self, path: Path) -> bool: + """For backwards compatibility, we ensure that the permission file exists with these permissions""" + self.save(path) + return True + + @property + def depth(self) -> int: + return len(self.relative_filepath.parts) + + def to_dict(self) -> list[dict]: + return [x.as_file_json() for x in self.rules] + + @staticmethod + def is_permission_file(path: Path) -> bool: + return path.name == PERM_FILE + + @classmethod + def is_valid(cls, path: Path, datasite_path: Path, _print: bool = True) -> bool: + try: + cls.from_file(path, datasite_path) + return True + except Exception as e: + if _print: + print(f"Invalid permission file {path}: {e}\n{traceback.format_exc()}") + return False + + @classmethod + def create(cls, context: SyftBoxContext, dir: Path) -> "SyftPermission": + if not dir.is_absolute(): + raise ValueError("dir must be an absolute") + + if dir.exists() and dir.is_file(): + raise ValueError("dir must be a directory") + + dir.mkdir(parents=True, exist_ok=True) + file_path = dir / PERM_FILE + + try: + relative_path = file_path.relative_to(context.workspace.datasites) + except ValueError: + raise ValueError("dir must be inside the datasites folder") + return cls(relative_filepath=relative_path, rules=[]) + + @classmethod + def datasite_default(cls, context: SyftBoxContext, dir: Path) -> "SyftPermission": + perm = cls.create(context, dir) + perm.add_rule( + path="**", + user=context.email, + permission=["admin", "create", "write", "read"], + ) + return perm + + @classmethod + def mine_with_public_read(cls, context: SyftBoxContext, dir: Path) -> "SyftPermission": + perm = cls.create(context, dir) + perm.add_rule(path="**", user=context.email, permission=["admin"]) + perm.add_rule(path="**", user="*", permission=["read"]) + return perm + + @classmethod + def mine_with_public_write(cls, context: SyftBoxContext, dir: Path) -> "SyftPermission": + # for backwards compatibility + return cls.mine_with_public_rw(context, dir) + + @classmethod + def mine_with_public_rw(cls, context: SyftBoxContext, dir: Path) -> "SyftPermission": + perm = cls.create(context, dir) + perm.add_rule(path="**", user=context.email, permission=["admin"]) + perm.add_rule(path="**", user="*", permission=["write", "read"]) + return perm + + def add_rule( + self, path: str, user: str, permission: Union[list[str], list[PermissionType]], allow: bool = True + ) -> None: + priority = len(self.rules) + if isinstance(permission, list) and isinstance(permission[0], PermissionType): + permission = [PermissionType[p.upper()] for p in permission if isinstance(p, str)] + rule = PermissionRule( + dir_path=self.dir_path, path=path, user=user, allow=allow, permissions=permission, priority=priority + ) + self.rules.append(rule) + + @property + def dir_path(self) -> Path: + return self.relative_filepath.parent + + @classmethod + def from_file(cls, path: Path, datasite_path: Path) -> "SyftPermission": + with open(path, "r") as f: + rule_dicts = yaml.safe_load(f) + relative_path = path.relative_to(datasite_path) + return cls.from_rule_dicts(relative_path, rule_dicts) + + @classmethod + def from_rule_dicts(cls, permfile_file_path: PathLike, rule_dicts: list[dict]) -> "SyftPermission": + if not isinstance(rule_dicts, list): + raise ValueError(f"rules should be passed as a list of dicts, received {type(rule_dicts)}") + rules = [] + dir_path = Path(permfile_file_path).parent + for i, rule_dict in enumerate(rule_dicts): + rule = PermissionRule.from_rule_dict(dir_path, rule_dict, priority=i) + rules.append(rule) + return cls(relative_filepath=permfile_file_path, rules=rules) + + @classmethod + def from_string(cls, s: str, path: PathLike) -> "SyftPermission": + dicts = yaml.safe_load(s) + return cls.from_rule_dicts(Path(path), dicts) + + @classmethod + def from_bytes(cls, b: bytes, path: PathLike) -> "SyftPermission": + return cls.from_string(b.decode("utf-8"), path) + + +class ComputedPermission(BaseModel): + user: str + file_path: RelativePath + + perms: dict[PermissionType, bool] = { + PermissionType.READ: False, + PermissionType.CREATE: False, + PermissionType.WRITE: False, + PermissionType.ADMIN: False, + } + + @classmethod + def from_user_rules_and_path(cls, rules: List[PermissionRule], user: str, path: Path) -> "ComputedPermission": + permission = cls(user=user, file_path=path) + for rule in rules: + permission.apply(rule) + return permission + + @property + def path_owner(self) -> str: + """owner of the datasite for this path""" + return str(self.file_path).split("/", 1)[0] + + def has_permission(self, permtype: PermissionType) -> bool: + # exception for owners: they can always read and write to their own datasite + if self.path_owner == self.user: + return True + # exception for admins: they can do anything for this path + if self.perms[PermissionType.ADMIN]: + return True + # exception for permfiles: any modifications to permfiles are only allowed for admins + if self.file_path.name == PERM_FILE and permtype in [PermissionType.CREATE, PermissionType.WRITE]: + return self.perms[PermissionType.ADMIN] + # exception for read/write, they are only allowed if read is also allowed + if permtype in [PermissionType.CREATE, PermissionType.WRITE]: + return self.perms[PermissionType.READ] and self.perms[permtype] + # default case + return self.perms[permtype] + + def user_matches(self, rule: PermissionRule) -> bool: + """Computes if the user in the rule""" + if rule.user == "*": + return True + elif rule.user == self.user: + return True + else: + return False + + def rule_applies_to_path(self, rule: PermissionRule) -> bool: + if rule.has_email_template: + # we fill in a/b/{useremail}/*.txt -> a/b/user@email.org/*.txt + resolved_path_pattern = rule.resolve_path_pattern(self.user) + else: + resolved_path_pattern = rule.path + + # target file path (the one that we want to check permissions for relative to the syftperm file + # we need this because the syftperm file specifies path patterns relative to its own location + + if issubpath(rule.dir_path, self.file_path): + relative_file_path = self.file_path.relative_to(rule.dir_path) + return globmatch(relative_file_path, resolved_path_pattern, flags=wcmatch.glob.GLOBSTAR) + else: + return False + + def is_invalid_permission(self, permtype: PermissionType) -> bool: + return self.file_path.name == PERM_FILE and permtype in [PermissionType.CREATE, PermissionType.WRITE] + + def apply(self, rule: PermissionRule) -> None: + if self.user_matches(rule) and self.rule_applies_to_path(rule): + for permtype in rule.permissions: + if self.is_invalid_permission(permtype): + continue + self.perms[permtype] = rule.allow + + +# migration code, can be deleted after prod migration is done + + +def map_email_to_permissions(json_data: dict) -> dict: + email_permissions = defaultdict(list) + for permission, emails in json_data.items(): + if permission not in ["read", "write", "create", "admin"]: + continue + for email in emails: + if email is None: + continue + email_permissions[email].append(permission) + return email_permissions + + +def convert_permission(old_perm_dict: dict) -> list: + old_perm_dict.pop("filepath", None) # not needed, we use the actual path of the perm file + + user_permissions = map_email_to_permissions(old_perm_dict) + output = [] + + for email in user_permissions: + new_perm_dict = { + "permissions": user_permissions[email], + "path": "**", + "user": (email if email != "GLOBAL" else "*"), # "*" is a wildcard for all users + } + output.append(new_perm_dict) + + return output + + +def migrate_permissions(snapshot_folder: Path) -> None: + """ + Migrate all `_.syftperm` files from old format to new format within a given snapshot folder. + This function: + - searches for files with the extension '_.syftperm' in the specified snapshot folder. + - converts their content from JSON to YAML format + - writes the converted content to new files with the name 'syftperm.yaml' in the same path + - deletes the original `_.syftperm` files + + Args: + snapshot_folder (str): The path to the snapshot folder containing the permission files. + Returns: + None + """ + old_syftperm_filename = "_.syftperm" + files = list(snapshot_folder.rglob(old_syftperm_filename)) + for file in files: + old_data = json.loads(file.read_text()) + try: + new_data = convert_permission(old_data) + except Exception as e: + logger.error(f"Failed to migrate old permission: {old_data}, {e} skipping") + continue + new_file_path = file.with_name(file.name.replace(old_syftperm_filename, PERM_FILE)) + logger.info( + f"migrating permission from {file.relative_to(snapshot_folder)} to {new_file_path.relative_to(snapshot_folder)}" + ) + new_file_path.write_text(yaml.dump(new_data)) + file.unlink() diff --git a/packages/syftbox/syftbox/lib/platform.py b/packages/syftbox/syftbox/lib/platform.py new file mode 100644 index 00000000000..b1446465044 --- /dev/null +++ b/packages/syftbox/syftbox/lib/platform.py @@ -0,0 +1,32 @@ +import platform + +__all__ = ["OS_NAME", "OS_VERSION", "OS_ARCH", "PYTHON_VERSION"] + +PYTHON_VERSION = platform.python_version() +UNAME = platform.uname() +OS_NAME = "" +OS_VERSION = "" +OS_ARCH = UNAME.machine +OS_IS_WSL2 = False + +if UNAME.system == "Darwin": + OS_NAME = "macOS" + OS_VERSION = platform.mac_ver()[0] + +elif UNAME.system == "Linux": + import distro + + OS_NAME = distro.name() + OS_VERSION = distro.version(best=True) + OS_IS_WSL2 = "wsl2" in UNAME.release.lower() + + if OS_IS_WSL2: + OS_NAME = "WSL2-" + OS_NAME + +elif UNAME.system == "Windows": + OS_NAME = UNAME.system + OS_VERSION = platform.win32_ver()[0] + +else: + OS_NAME = UNAME.system + OS_VERSION = UNAME.release diff --git a/packages/syftbox/syftbox/lib/profiling.py b/packages/syftbox/syftbox/lib/profiling.py new file mode 100644 index 00000000000..3081ddbf663 --- /dev/null +++ b/packages/syftbox/syftbox/lib/profiling.py @@ -0,0 +1,70 @@ +# stdlib +import contextlib +import os +import signal +import subprocess # nosec +import tempfile +import time +from typing import Callable, Iterator + + +@contextlib.contextmanager +def pyspy() -> Iterator[subprocess.Popen]: + """Profile a block of code using py-spy. Intended for development purposes only. + + Example: + ``` + with pyspy(): + # do some work + a = [i for i in range(1000000)] + ``` + """ + fd, fname = tempfile.mkstemp(".svg") + os.close(fd) + + command = [ + "sudo", + "-S", + "py-spy", + "record", + "-r", + "1000", + "-o", + fname, + "--pid", + str(os.getpid()), + ] + process = subprocess.Popen(command, preexec_fn=os.setsid) # nosec + + start_time = time.time() + yield process + end_time = time.time() + + print(f"Execution time: {end_time - start_time}") + try: + os.killpg(os.getpgid(process.pid), signal.SIGINT) + os.chmod(fname, 0o444) + except Exception as e: + print(f"Error: {e}") + + +class FakeThread: + """Convenience class for profiling code that should be run in a thread. + Easy to swap with Thread when profiling is not needed and we want to run in the main thread. + """ + + def __init__(self, target: Callable, args: tuple = (), daemon: bool = True) -> None: + self.target = target + self.args = args + self.daemon = daemon + self.is_alive_flag = False + + def start(self) -> None: + self.is_alive_flag = True + self.target(*self.args) + + def is_alive(self) -> bool: + return self.is_alive_flag + + def join(self) -> None: + pass diff --git a/packages/syftbox/syftbox/lib/types.py b/packages/syftbox/syftbox/lib/types.py new file mode 100644 index 00000000000..135405c1f02 --- /dev/null +++ b/packages/syftbox/syftbox/lib/types.py @@ -0,0 +1,14 @@ +import os +from pathlib import Path +from typing import Iterable, Union + +from typing_extensions import TypeAlias + +__all__ = ["PathLike", "UserLike", "to_path"] + +PathLike: TypeAlias = Union[str, os.PathLike, Path] +UserLike: TypeAlias = Union[str, Iterable[str]] + + +def to_path(path: PathLike) -> Path: + return Path(path).expanduser().resolve() diff --git a/packages/syftbox/syftbox/lib/validators.py b/packages/syftbox/syftbox/lib/validators.py new file mode 100644 index 00000000000..44ed6d4e2f1 --- /dev/null +++ b/packages/syftbox/syftbox/lib/validators.py @@ -0,0 +1,54 @@ +import re +import shutil +import tempfile + +from typing_extensions import Tuple + +from syftbox.lib.types import PathLike, to_path + +DIR_NOT_EMPTY = "Directory is not empty" + + +def is_valid_dir(path: PathLike, check_empty: bool = True, check_writable: bool = True) -> Tuple[bool, str]: + try: + if not path: + return False, "Empty path" + + # Convert to Path object if string + dir_path = to_path(path) + + # Must not be a reserved path + if dir_path.is_reserved(): + return False, "Reserved path" + + if dir_path.exists(): + if not dir_path.is_dir(): + return False, "Path is not a directory" + + if check_empty and any(dir_path.iterdir()): + return False, DIR_NOT_EMPTY + elif check_writable: + # Try to create a temporary file to test write permissions on parent + try: + dir_path.mkdir(parents=True, exist_ok=True) + testfile = tempfile.TemporaryFile(dir=dir_path) + testfile.close() + shutil.rmtree(dir_path) + except Exception as e: + return False, str(e) + + # all checks passed + return True, "" + except Exception as e: + return False, str(e) + + +def is_valid_email(email: str) -> bool: + # Define a regex pattern for a valid email + # from: https://stackoverflow.com/a/21608610 + email_regex = r"\w+([-+.']\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*" + + # Use the match method to check if the email fits the pattern + if re.match(email_regex, email): + return True + return False diff --git a/packages/syftbox/syftbox/lib/version_utils.py b/packages/syftbox/syftbox/lib/version_utils.py new file mode 100644 index 00000000000..d17b7241fd0 --- /dev/null +++ b/packages/syftbox/syftbox/lib/version_utils.py @@ -0,0 +1,22 @@ +import json +from os.path import dirname +from typing import Dict, List, Union + +from packaging import version + + +def get_version_dict() -> Dict[str, List[str]]: + with open(dirname(dirname(__file__)) + "/server2client_version.json") as json_file: + version_matrix = json.load(json_file) + return version_matrix + + +def get_range_for_version(version_string: str) -> Union[List[str], str]: + version_matrix = get_version_dict() + if version_string not in version_matrix: + if version.parse(version_string) > max([version.parse(version_range) for version_range in version_matrix]): + return "New version, we can't know if it is compatible, please upgrade if errors occur." + else: + return "Old version not in the compatibility matrix, proceed at your own discretion." + else: + return version_matrix[version_string] diff --git a/packages/syftbox/syftbox/lib/workspace.py b/packages/syftbox/syftbox/lib/workspace.py new file mode 100644 index 00000000000..d3b6b7fbf61 --- /dev/null +++ b/packages/syftbox/syftbox/lib/workspace.py @@ -0,0 +1,40 @@ +from syftbox.lib.types import PathLike, to_path + + +class SyftWorkspace: + """ + A Syft workspace is a directory structure for everything stored by the client. + Each workspace is expected to be unique for a client. + + ```txt + data_dir/ + ├── apis/ <-- installed apis + ├── plugins/ <-- plugins data + └── datasites/ <-- synced datasites + ├── user1@openmined.org/ + │ └── api_data/ + └── user2@openmined.org/ + └── api_data/ + ``` + """ + + def __init__(self, data_dir: PathLike): + self.data_dir = to_path(data_dir) + """Path to the root directory of the workspace.""" + + # datasites dir + self.datasites = self.data_dir / "datasites" + """Path to the directory containing datasites.""" + + # plugins dir + """Path to the directory containing plugins.""" + self.plugins = self.data_dir / "plugins" + + # apps/apis dir + self.apps = self.data_dir / "apis" + """Path to the directory containing apps.""" + + def mkdirs(self) -> None: + self.datasites.mkdir(parents=True, exist_ok=True) + self.plugins.mkdir(parents=True, exist_ok=True) + self.apps.mkdir(parents=True, exist_ok=True) diff --git a/packages/syftbox/syftbox/main.py b/packages/syftbox/syftbox/main.py new file mode 100644 index 00000000000..26e9ca7eb6e --- /dev/null +++ b/packages/syftbox/syftbox/main.py @@ -0,0 +1,56 @@ +from pathlib import Path +from typing import Annotated, Optional + +from rich import print as rprint +from typer import Exit, Option, Typer + +from syftbox import __version__ +from syftbox.app.cli import app as app_cli +from syftbox.client.cli import app as client_cli +from syftbox.server.cli import app as server_cli +from syftbox.tui.cli import app as tui_cli + +app = Typer( + name="SyftBox", + help="SyftBox CLI", + no_args_is_help=True, + pretty_exceptions_enable=False, + context_settings={"help_option_names": ["-h", "--help"]}, +) + +CONFIG_OPTS = Option("-c", "--config", "--config_path", help="Path to the SyftBox config") + + +@app.command(rich_help_panel="General Options") +def version() -> None: + """Print SyftBox version""" + + print(__version__) + + +@app.command(rich_help_panel="General Options") +def debug(config_path: Annotated[Optional[Path], CONFIG_OPTS] = None) -> None: + """Print SyftBox debug data""" + + # lazy import to improve CLI startup performance + from syftbox.lib.debug import debug_report_yaml + + try: + rprint(debug_report_yaml(config_path)) + except Exception as e: + rprint(f"[red]Error[/red]: {e}") + raise Exit(1) + + +app.add_typer(client_cli, name="client") +app.add_typer(server_cli, name="server") +app.add_typer(app_cli, name="app") +app.add_typer(tui_cli, name="tui") + + +def main() -> None: + app() + + +if __name__ == "__main__": + main() diff --git a/packages/syftbox/syftbox/server/__init__.py b/packages/syftbox/syftbox/server/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/server/analytics.py b/packages/syftbox/syftbox/server/analytics.py new file mode 100644 index 00000000000..ce78e337282 --- /dev/null +++ b/packages/syftbox/syftbox/server/analytics.py @@ -0,0 +1,113 @@ +import json +import zipfile +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Optional + +from loguru import logger +from pydantic import BaseModel + +from syftbox.server.db.file_store import FileStore +from syftbox.server.logger import analytics_logger + + +def to_jsonable_dict(obj: dict) -> dict: + """ + Convert log record to a JSON serializable dictionary. + """ + result: dict = {} + for key, value in obj.items(): + if isinstance(value, dict): + result[key] = to_jsonable_dict(value) + elif isinstance(value, BaseModel): + result[key] = value.model_dump(mode="json") + elif isinstance(value, datetime): + result[key] = value.isoformat() + elif isinstance(value, Path): + result[key] = value.as_posix() + elif isinstance(value, (str, int, float, bool, type(None))): + result[key] = value + else: + result[key] = str(value) + + return result + + +def log_analytics_event( + endpoint: str, + email: Optional[str], + message: str = "", + **kwargs: Any, +) -> None: + """ + Log an event to the analytics logger. + """ + email = email or "anonymous" + + try: + extra = { + "email": email, + "endpoint": endpoint, + "timestamp": datetime.now(timezone.utc), + **kwargs, + } + extra = to_jsonable_dict(extra) + analytics_logger.bind(**extra).info(message) + except Exception as e: + logger.error(f"Failed to log event: {e}") + + +def log_file_change_event( + endpoint: str, + email: str, + relative_path: Path, + file_store: FileStore, +) -> None: + """ + Log a file change event to the analytics logger. + """ + try: + metadata = file_store.get_metadata(relative_path, email, skip_permission_check=True) + log_analytics_event( + endpoint=endpoint, + email=email, + file_metadata=metadata, + ) + except Exception as e: + logger.error(f"Failed to log file change event: {e}") + + +def _parse_analytics_file(file_path: Path) -> list[dict]: + if file_path.suffix == ".zip": + with zipfile.ZipFile(file_path, "r") as zfile: + with zfile.open(zfile.namelist()[0]) as f: + content = f.read().decode("utf-8") + else: + with open(file_path, "r") as f: + content = f.read() + + events = [] + for line in content.split("\n"): + if not line: + continue + + try: + event = json.loads(line) + event["timestamp"] = datetime.fromisoformat(event["timestamp"]) + events.append(event) + except Exception as e: + logger.error(f"Failed to parse event: {e}") + + return events + + +def parse_analytics_logs(logs_dir: Path) -> list[dict]: + # Load current log and all archived logs + log_files = list(logs_dir.glob("analytics.log")) + list(logs_dir.glob("analytics*.zip")) + logger.info(f"Loading logs from: {[f.as_posix() for f in log_files]}") + events = [] + for log_file in log_files: + events.extend(_parse_analytics_file(log_file)) + + events = sorted(events, key=lambda x: x["timestamp"]) + return events diff --git a/packages/syftbox/syftbox/server/api/__init__.py b/packages/syftbox/syftbox/server/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/server/api/v1/__init__.py b/packages/syftbox/syftbox/server/api/v1/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/server/api/v1/main_router.py b/packages/syftbox/syftbox/server/api/v1/main_router.py new file mode 100644 index 00000000000..854f39734df --- /dev/null +++ b/packages/syftbox/syftbox/server/api/v1/main_router.py @@ -0,0 +1,206 @@ +import os +from datetime import datetime +from pathlib import Path + +from fastapi import APIRouter, Depends, Request +from fastapi.responses import ( + FileResponse, + HTMLResponse, + JSONResponse, + PlainTextResponse, + RedirectResponse, +) +from jinja2 import Template +from loguru import logger +from typing_extensions import Any, Union + +from syftbox import __version__ +from syftbox.lib.lib import ( + get_datasites, +) +from syftbox.server.analytics import log_analytics_event +from syftbox.server.settings import ServerSettings, get_server_settings +from syftbox.server.users.auth import get_current_user + +current_dir = Path(__file__).parent + +ascii_art = rf""" + ____ __ _ ____ +/ ___| _ _ / _| |_| __ ) _____ __ +\___ \| | | | |_| __| _ \ / _ \ \/ / + ___) | |_| | _| |_| |_) | (_) > < +|____/ \__, |_| \__|____/ \___/_/\_\ + |___/ {__version__:>17} + + +# Install Syftbox (MacOS and Linux) +curl -LsSf [[SERVER_URL]]/install.sh | sh + +# Run the client +syftbox client +""" + +main_router = APIRouter() + + +@main_router.get("/", response_class=PlainTextResponse) +async def get_ascii_art(request: Request) -> str: + return ascii_art.replace("[[SERVER_URL]]", str(request.url).rstrip("/")) + + +def get_file_list(directory: Union[str, Path] = ".") -> list[dict[str, Any]]: + # TODO rewrite with pathlib + directory = str(directory) + + file_list = [] + for item in os.listdir(directory): + item_path = os.path.join(directory, item) + is_dir = os.path.isdir(item_path) + size = os.path.getsize(item_path) if not is_dir else "-" + mod_time = datetime.fromtimestamp(os.path.getmtime(item_path)).strftime("%Y-%m-%d %H:%M:%S") + + file_list.append({"name": item, "is_dir": is_dir, "size": size, "mod_time": mod_time}) + + return sorted(file_list, key=lambda x: (not x["is_dir"], x["name"].lower())) + + +@main_router.get("/datasites", response_class=HTMLResponse) +async def list_datasites( + request: Request, server_settings: ServerSettings = Depends(get_server_settings) +) -> HTMLResponse: + files = get_file_list(server_settings.snapshot_folder) + template_path = current_dir.parent.parent / "templates" / "datasites.html" + html = "" + with open(template_path) as f: + html = f.read() + template = Template(html) + + html_content = template.render( + { + "request": request, + "files": files, + "current_path": "/", + } + ) + return html_content + + +@main_router.get("/datasites/{path:path}", response_class=HTMLResponse) +async def browse_datasite( + request: Request, + path: str, + server_settings: ServerSettings = Depends(get_server_settings), +) -> HTMLResponse: + if path == "": # Check if path is empty (meaning "/datasites/") + return RedirectResponse(url="/datasites") + + snapshot_folder = str(server_settings.snapshot_folder) + datasite_part = path.split("/")[0] + datasites = get_datasites(snapshot_folder) + if datasite_part in datasites: + slug = path[len(datasite_part) :] + if slug == "": + slug = "/" + datasite_path = os.path.join(snapshot_folder, datasite_part) + datasite_public = datasite_path + "/public" + if not os.path.exists(datasite_public): + return "No public datasite" + + slug_path = os.path.abspath(datasite_public + slug) + if os.path.exists(slug_path) and os.path.isfile(slug_path): + if slug_path.endswith(".html") or slug_path.endswith(".htm"): + return FileResponse(slug_path) + elif slug_path.endswith(".md"): + with open(slug_path, "r") as file: + content = file.read() + return PlainTextResponse(content) + elif slug_path.endswith(".json") or slug_path.endswith(".jsonl"): + return FileResponse(slug_path, media_type="application/json") + elif slug_path.endswith(".yaml") or slug_path.endswith(".yml"): + return FileResponse(slug_path, media_type="application/x-yaml") + elif slug_path.endswith(".log") or slug_path.endswith(".txt"): + return FileResponse(slug_path, media_type="text/plain") + elif slug_path.endswith(".py"): + return FileResponse(slug_path, media_type="text/plain") + else: + return FileResponse(slug_path, media_type="application/octet-stream") + + # show directory + if not path.endswith("/") and os.path.exists(path + "/") and os.path.isdir(path + "/"): + return RedirectResponse(url=f"{path}/") + + index_file = os.path.abspath(slug_path + "/" + "index.html") + if os.path.exists(index_file): + with open(index_file, "r") as file: + html_content = file.read() + return HTMLResponse(content=html_content, status_code=200) + + if os.path.isdir(slug_path): + files = get_file_list(slug_path) + template_path = current_dir.parent.parent / "templates" / "folder.html" + html = "" + with open(template_path) as f: + html = f.read() + template = Template(html) + html_content = template.render( + { + "datasite": datasite_part, + "request": request, + "files": files, + "current_path": path, + } + ) + return html_content + else: + # return 404 + message_404 = f"No file or directory found at /datasites/{datasite_part}{slug}" + return HTMLResponse(content=message_404, status_code=404) + + return f"No Datasite {datasite_part} exists" + + +@main_router.post("/register") +async def register( + request: Request, + server_settings: ServerSettings = Depends(get_server_settings), +) -> JSONResponse: + data = await request.json() + email = data["email"] + + # create datasite snapshot folder + datasite_folder = Path(server_settings.snapshot_folder) / email + os.makedirs(datasite_folder, exist_ok=True) + + logger.info(f"> {email} registering, snapshot folder: {datasite_folder}") + log_analytics_event("/register", email) + + return JSONResponse({"status": "success", "token": "0"}, status_code=200) + + +@main_router.post("/log_event") +async def log_event( + request: Request, + email: str = Depends(get_current_user), +) -> JSONResponse: + data = await request.json() + log_analytics_event("/log_event", email, **data) + return JSONResponse({"status": "success"}, status_code=200) + + +@main_router.get("/install.sh") +async def install() -> FileResponse: + install_script = current_dir.parent.parent / "templates" / "install.sh" + return FileResponse(install_script, media_type="text/plain") + + +@main_router.get("/icon.png") +async def icon() -> FileResponse: + icon_path = current_dir.parent.parent / "assets" / "icon.png" + return FileResponse(icon_path, media_type="image/png") + + +@main_router.get("/info") +async def info() -> dict: + return { + "version": __version__, + } diff --git a/packages/syftbox/syftbox/server/api/v1/sync_router.py b/packages/syftbox/syftbox/server/api/v1/sync_router.py new file mode 100644 index 00000000000..4f04a3dfa0d --- /dev/null +++ b/packages/syftbox/syftbox/server/api/v1/sync_router.py @@ -0,0 +1,226 @@ +import base64 +import hashlib +import sqlite3 +from collections import defaultdict +from typing import Iterator, List + +import msgpack +import py_fast_rsync +from fastapi import APIRouter, Depends, HTTPException, Request, UploadFile +from fastapi.responses import FileResponse, JSONResponse, StreamingResponse +from loguru import logger +from typing_extensions import Generator + +from syftbox.lib.permissions import PermissionType +from syftbox.server.analytics import log_file_change_event +from syftbox.server.db.db import get_all_datasites +from syftbox.server.db.file_store import FileStore +from syftbox.server.db.schema import get_db +from syftbox.server.settings import ServerSettings, get_server_settings +from syftbox.server.users.auth import get_current_user + +from ...models.sync_models import ( + ApplyDiffRequest, + ApplyDiffResponse, + BatchFileRequest, + DiffRequest, + DiffResponse, + FileMetadata, + FileMetadataRequest, + FileRequest, + RelativePath, +) + + +def get_db_connection(request: Request) -> Generator[sqlite3.Connection, None, None]: + conn = get_db(request.state.server_settings.file_db_path) + yield conn + conn.close() + + +def get_file_store(request: Request) -> Generator[FileStore, None, None]: + store = FileStore( + server_settings=request.state.server_settings, + ) + yield store + + +router = APIRouter(prefix="/sync", tags=["sync"]) + + +@router.post("/get_diff", response_model=DiffResponse) +def get_diff( + req: DiffRequest, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> DiffResponse: + try: + file = file_store.get(req.path, email) + except ValueError: + raise HTTPException(status_code=404, detail="file not found") + diff = py_fast_rsync.diff(req.signature_bytes, file.data) + diff_bytes = base64.b85encode(diff).decode("utf-8") + return DiffResponse( + path=file.metadata.path.as_posix(), + diff=diff_bytes, + hash=file.metadata.hash, + ) + + +@router.post("/datasite_states", response_model=dict[str, list[FileMetadata]]) +def get_datasite_states( + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> dict[str, list[FileMetadata]]: + file_metadata = file_store.list_for_user(email=email) + + datasite_states = defaultdict(list) + for metadata in file_metadata: + user_email = metadata.path.parts[0] + datasite_states[user_email].append(metadata) + + return dict(datasite_states) + + +@router.post("/dir_state", response_model=list[FileMetadata]) +def dir_state( + dir: RelativePath, + file_store: FileStore = Depends(get_file_store), + server_settings: ServerSettings = Depends(get_server_settings), + email: str = Depends(get_current_user), +) -> list[FileMetadata]: + return file_store.list_for_user(email=email, path=dir) + + +@router.post("/get_metadata", response_model=FileMetadata) +def get_metadata( + req: FileMetadataRequest, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> FileMetadata: + try: + metadata = file_store.get_metadata(req.path, email) + return metadata + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.post("/apply_diff", response_model=ApplyDiffResponse) +def apply_diffs( + req: ApplyDiffRequest, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> ApplyDiffResponse: + try: + file = file_store.get(req.path, email) + except ValueError: + raise HTTPException(status_code=404, detail="file not found") + + result = py_fast_rsync.apply(file.data, req.diff_bytes) + new_hash = hashlib.sha256(result).hexdigest() + + if new_hash != req.expected_hash: + raise HTTPException(status_code=400, detail="hash mismatch, skipped writing") + + file_store.put(req.path, result, user=email, check_permission=PermissionType.WRITE) + + log_file_change_event( + "/sync/apply_diff", + email=email, + relative_path=req.path, + file_store=file_store, + ) + + return ApplyDiffResponse(path=req.path, current_hash=new_hash, previous_hash=file.metadata.hash) + + +@router.post("/delete", response_class=JSONResponse) +def delete_file( + req: FileRequest, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> JSONResponse: + log_file_change_event( + "/sync/delete", + email=email, + relative_path=req.path, + file_store=file_store, + ) + + file_store.delete(req.path, email) + return JSONResponse(content={"status": "success"}) + + +@router.post("/create", response_class=JSONResponse) +def create_file( + file: UploadFile, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> JSONResponse: + relative_path = RelativePath(file.filename) + if "%" in file.filename: + raise HTTPException(status_code=400, detail="filename cannot contain '%'") + + if file_store.exists(relative_path): + raise HTTPException(status_code=400, detail="file already exists") + + contents = file.file.read() + + file_store.put( + relative_path, + contents, + user=email, + check_permission=PermissionType.CREATE, + ) + + log_file_change_event( + "/sync/create", + email=email, + relative_path=relative_path, + file_store=file_store, + ) + return JSONResponse(content={"status": "success"}) + + +@router.post("/download", response_class=FileResponse) +def download_file( + req: FileRequest, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> FileResponse: + try: + abs_path = file_store.get(req.path, email).absolute_path + return FileResponse(abs_path) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.post("/datasites", response_model=list[str]) +def get_datasites( + conn: sqlite3.Connection = Depends(get_db_connection), + email: str = Depends(get_current_user), +) -> list[str]: + return get_all_datasites(conn) + + +def file_streamer(files: List[RelativePath], file_store: FileStore, email: str) -> Iterator[bytes]: + for path in files: + try: + file = file_store.get(path, email) + metadata = { + "path": file.metadata.path.as_posix(), + "content": file.data, + } + yield msgpack.packb(metadata) + except ValueError: + logger.warning(f"File not found: {path}") + continue + + +@router.post("/download_bulk") +def get_files( + req: BatchFileRequest, + file_store: FileStore = Depends(get_file_store), + email: str = Depends(get_current_user), +) -> StreamingResponse: + return StreamingResponse(file_streamer(req.paths, file_store, email), media_type="application/x-ndjson") diff --git a/packages/syftbox/syftbox/server/assets/icon.png b/packages/syftbox/syftbox/server/assets/icon.png new file mode 100644 index 00000000000..178d4af35f7 Binary files /dev/null and b/packages/syftbox/syftbox/server/assets/icon.png differ diff --git a/packages/syftbox/syftbox/server/cli.py b/packages/syftbox/syftbox/server/cli.py new file mode 100644 index 00000000000..ca9b120f96b --- /dev/null +++ b/packages/syftbox/syftbox/server/cli.py @@ -0,0 +1,49 @@ +from loguru import logger +from typer import Exit, Option, Typer + +from syftbox.server.migrations import run_migrations +from syftbox.server.settings import ServerSettings + +app = Typer( + name="SyftBox Server", + no_args_is_help=True, + pretty_exceptions_enable=False, + add_completion=False, + context_settings={"help_option_names": ["-h", "--help"]}, +) + + +# Define options separately to keep the function signature clean +# fmt: off +SERVER_PANEL = "Server Options" +SSL_PANEL = "SSL Options" + +EXAMPLE_OPTS = Option( + "-v", "--verbose", + is_flag=True, + rich_help_panel=SERVER_PANEL, + help="Enable verbose mode", +) +# fmt: on + + +@app.command() +def migrate() -> None: + """Run database migrations""" + + try: + settings = ServerSettings() + run_migrations(settings) + logger.info("Migrations completed successfully") + except Exception as e: + logger.error("Migrations failed") + logger.exception(e) + raise Exit(1) + + +def main() -> None: + app() + + +if __name__ == "__main__": + main() diff --git a/packages/syftbox/syftbox/server/db/__init__.py b/packages/syftbox/syftbox/server/db/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/server/db/db.py b/packages/syftbox/syftbox/server/db/db.py new file mode 100644 index 00000000000..b476722409c --- /dev/null +++ b/packages/syftbox/syftbox/server/db/db.py @@ -0,0 +1,334 @@ +import sqlite3 +from pathlib import Path +from typing import Optional + +from syftbox.lib.permissions import PermissionRule, SyftPermission +from syftbox.server.models.sync_models import FileMetadata, RelativePath + + +def save_file_metadata(conn: sqlite3.Connection, metadata: FileMetadata) -> None: + # Insert the metadata into the database or update if a conflict on 'path' occurs + conn.execute( + """ + INSERT INTO file_metadata (path, datasite, hash, signature, file_size, last_modified) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT(path) DO UPDATE SET + datasite = excluded.datasite, + hash = excluded.hash, + signature = excluded.signature, + file_size = excluded.file_size, + last_modified = excluded.last_modified + """, + ( + str(metadata.path), + metadata.datasite, + metadata.hash, + metadata.signature, + metadata.file_size, + metadata.last_modified.isoformat(), + ), + ) + + +def delete_file_metadata(conn: sqlite3.Connection, path: str) -> None: + cur = conn.execute("DELETE FROM file_metadata WHERE path = ?", (path,)) + # get number of changes + if cur.rowcount != 1: + raise ValueError(f"Failed to delete metadata for {path}.") + + +def get_all_metadata(conn: sqlite3.Connection, path_like: Optional[str] = None) -> list[FileMetadata]: + query = "SELECT * FROM file_metadata" + params: tuple = () + + if path_like: + if "%" in path_like: + raise ValueError("we don't support % in paths") + path_like = path_like + "%" + escaped_path = path_like.replace("_", "\\_") + query += " WHERE path LIKE ? ESCAPE '\\' " + params = (escaped_path,) + + cursor = conn.execute(query, params) + # would be nice to paginate + return [FileMetadata.from_row(row) for row in cursor] + + +def get_one_metadata(conn: sqlite3.Connection, path: str) -> FileMetadata: + cursor = conn.execute("SELECT * FROM file_metadata WHERE path = ?", (path,)) + rows = cursor.fetchall() + if len(rows) == 0 or len(rows) > 1: + raise ValueError(f"Expected 1 metadata entry for {path}, got {len(rows)}") + row = rows[0] + return FileMetadata.from_row(row) + + +def get_all_datasites(conn: sqlite3.Connection) -> list[str]: + # INSTR(path, '/'): Finds the position of the first slash in the path. + cursor = conn.execute( + """SELECT DISTINCT SUBSTR(path, 1, INSTR(path, '/') - 1) AS root_folder + FROM file_metadata; + """ + ) + return [row[0] for row in cursor if row[0]] + + +def query_rules_for_permfile(cursor: sqlite3.Cursor, file: SyftPermission) -> list[sqlite3.Row]: + cursor.execute( + """ + SELECT * FROM rules WHERE permfile_path = ? ORDER BY priority + """, + (file.relative_filepath.as_posix(),), + ) + return cursor.fetchall() + + +def get_rules_for_permfile(connection: sqlite3.Connection, file: SyftPermission) -> list[PermissionRule]: + cursor = connection.cursor() + return [PermissionRule.from_db_row(row) for row in query_rules_for_permfile(cursor, file)] + + +def get_all_files(cursor: sqlite3.Cursor) -> list: + cursor.execute( + """ + SELECT * FROM file_metadata + """ + ) + return cursor.fetchall() + + +def get_all_files_under_syftperm(cursor: sqlite3.Cursor, permfile: SyftPermission) -> list[tuple[int, FileMetadata]]: + cursor.execute( + """ + SELECT * FROM file_metadata WHERE path LIKE ? + """, + (str(permfile.dir_path) + "/%",), + ) + return [ + ( + row["id"], + FileMetadata.from_row(row), + ) + for row in cursor.fetchall() + ] + + +def get_rules_for_path(connection: sqlite3.Connection, path: Path) -> list[PermissionRule]: + parents = path.parents + placeholders = ",".join("?" * len(parents)) + cursor = connection.cursor() + cursor.execute( + """ + SELECT * FROM rules WHERE permfile_dir in ({}) + """.format(placeholders), + [x.as_posix() for x in parents], + ) + return [PermissionRule.from_db_row(row) for row in cursor.fetchall()] + + +def set_rules_for_permfile(connection: sqlite3.Connection, file: SyftPermission) -> None: + """ + Atomically set the rules for a permission file. Basically its just a write operation, but + we also make sure we delete the rules that are no longer in the file. + """ + try: + cursor = connection.cursor() + + cursor.execute( + """ + DELETE FROM rules + WHERE permfile_path = ? + """, + (str(file.relative_filepath),), + ) + + # TODO + files_under_dir = get_all_files_under_syftperm(cursor, file) + + rule2files = [] + + for rule in file.rules: + for _id, file_in_dir in files_under_dir: + match, match_for_email = rule.filepath_matches_rule_path(file_in_dir.path) + if match: + rule2files.append([str(rule.permfile_path), rule.priority, _id, match_for_email]) + + rule_rows = [tuple(rule.to_db_row().values()) for rule in file.rules] + + cursor.executemany( + """ + INSERT INTO rules ( + permfile_path, permfile_dir, permfile_depth, priority, path, user, + can_read, can_create, can_write, admin, disallow + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(permfile_path, priority) DO UPDATE SET + path = excluded.path, + user = excluded.user, + can_read = excluded.can_read, + can_create = excluded.can_create, + can_write = excluded.can_write, + admin = excluded.admin, + disallow = excluded.disallow + """, + rule_rows, + ) + + cursor.executemany( + """ + INSERT INTO rule_files (permfile_path, priority, file_id, match_for_email) VALUES (?, ?, ?, ?) + ON CONFLICT(permfile_path, priority, file_id) DO UPDATE SET match_for_email = excluded.match_for_email + """, + rule2files, + ) + + except Exception as e: + connection.rollback() + raise e + + +def get_metadata_for_file(connection: sqlite3.Connection, path: Path) -> tuple[int, FileMetadata]: + cursor = connection.cursor() + cursor.execute("SELECT * FROM file_metadata WHERE path = ?", (str(path),)) + row = cursor.fetchone() + return ( + row["id"], + FileMetadata.from_row(row), + ) + + +def link_existing_rules_to_file(connection: sqlite3.Connection, path: Path) -> None: + # 1 find all rules in that branch of the tree + # 2 check which rules apply to the file + # 3 link them + + perm_rules = get_rules_for_path(connection, path) + + rule2files = [] + _id, _ = get_metadata_for_file(connection, path) + + for rule in perm_rules: + match, match_for_email = rule.filepath_matches_rule_path(path) + if match: + rule2files.append([str(rule.permfile_path), rule.priority, _id, match_for_email]) + cursor = connection.cursor() + cursor.executemany( + """ + INSERT INTO rule_files (permfile_path, priority, file_id, match_for_email) VALUES (?, ?, ?, ?) + ON CONFLICT(permfile_path, priority, file_id) DO UPDATE SET + match_for_email = excluded.match_for_email + """, + rule2files, + ) + + +def get_read_permissions_for_user( + connection: sqlite3.Connection, user: str, path_like: Optional[str] = None +) -> list[sqlite3.Row]: + """ + Get all files that the user has read access to. First we get all files, then we do a subquery for every file. + For every file, we join all the rules that apply to it for this user. As an intermediate result, we get all those + rules, which we reduce into a single value. To do this, we add extra columns to the table indicating rule priority. + For all rules, later rules overwrite earlier ones, so you only need to check the + last rule for a permission. By overwriting, we mean that if a disallow comes after an allow, you have no read + permission. The default is no read permission. + + We use these row orderings to find if the last read is either a disallow or allow + + We do the same for admin permissions. We then compute two things: + - The admin "bit" (indicating whether a user has admin permissions) + - The read "bit" (indicating whether a user has read permissions) + + These bits are combined with a final OR operation. + """ + + cursor = connection.cursor() + + params: list = [] + path_condition = "" + if path_like: + if "%" in path_like: + raise ValueError("we don't support % in paths") + path_like = path_like + "%" + escaped_path = path_like.replace("_", "\\_") + path_condition = "AND f.path LIKE ? ESCAPE '\\'" + params.append(escaped_path) + + query = """ + -- First get all rules that apply to this user, including wildcards and email matches + WITH + user_matching_rules AS ( + SELECT r.*, rf.file_id, rf.match_for_email + FROM rules r + JOIN rule_files rf + ON r.permfile_path = rf.permfile_path + AND r.priority = rf.priority + WHERE r.user = ? -- Direct user match + OR r.user = '*' -- Wildcard match + OR rf.match_for_email = ? -- Email pattern match + ), + + -- Then calculate effective permissions by taking the highest priority rule + -- Higher depth * 1000 + priority means more specific rules take precedence + -- Caveat: using 1000 means we can't have more than 1000 rules in the same permission file + permission_priorities AS ( + SELECT + file_id, + MAX(CASE WHEN can_read AND NOT disallow THEN permfile_depth * 1000 + priority ELSE 0 END) as read_allow_prio, + MAX(CASE WHEN can_read AND disallow THEN permfile_depth * 1000 + priority ELSE 0 END) as read_deny_prio, + MAX(CASE WHEN admin AND NOT disallow THEN permfile_depth * 1000 + priority ELSE 0 END) as admin_allow_prio, + MAX(CASE WHEN admin AND disallow THEN permfile_depth * 1000 + priority ELSE 0 END) as admin_deny_prio + FROM user_matching_rules + GROUP BY file_id + ), + + final_permissions AS ( + SELECT + file_id, + (read_allow_prio > read_deny_prio) as can_read, + (admin_allow_prio > admin_deny_prio) as is_admin + FROM permission_priorities + ) + + -- User has access if any of the following are true: + -- 1. They have an allowing rule that overrides any denying rules `can_read` + -- 2. They have admin access that overrides admin denials `is_admin` + -- 3. They own the datasite `f.datasite = user` + SELECT + f.path, + f.hash, + f.signature, + f.file_size, + f.last_modified, + COALESCE( + can_read OR is_admin, + FALSE + ) OR f.datasite = ? AS read_permission + FROM file_metadata f + LEFT JOIN final_permissions fp ON f.id = fp.file_id + WHERE 1=1 {path_condition} + """.format(path_condition=path_condition) + + # Add parameters in order: 2 user checks + 1 datasite check + optional path + query_params = [user, user, user] + params + + return cursor.execute(query, query_params).fetchall() + + +def print_table(connection: sqlite3.Connection, table: str) -> None: + """util function for debugging""" + cursor = connection.cursor() + cursor.execute(f"SELECT * FROM {table}") + rows = cursor.fetchall() + for i, row in enumerate(rows): + if i == 0: + print(" | ".join(dict(row).keys())) + print(" | ".join(str(x) for x in list(dict(row).values()))) + + +def get_filemetadata_with_read_access( + connection: sqlite3.Connection, user: str, path: Optional[RelativePath] = None +) -> list[FileMetadata]: + string_path = str(path) if path else None + rows = get_read_permissions_for_user(connection, user, string_path) + res = [FileMetadata.from_row(row) for row in rows if row["read_permission"]] + return res diff --git a/packages/syftbox/syftbox/server/db/file_store.py b/packages/syftbox/syftbox/server/db/file_store.py new file mode 100644 index 00000000000..8a22b160005 --- /dev/null +++ b/packages/syftbox/syftbox/server/db/file_store.py @@ -0,0 +1,201 @@ +import sqlite3 +from pathlib import Path +from typing import List, Optional + +import yaml +from fastapi import HTTPException +from pydantic import BaseModel + +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.hash import hash_file +from syftbox.lib.permissions import ( + ComputedPermission, + PermissionRule, + PermissionType, + SyftPermission, +) +from syftbox.server.db import db +from syftbox.server.db.db import ( + get_rules_for_path, + link_existing_rules_to_file, + set_rules_for_permfile, +) +from syftbox.server.db.schema import get_db +from syftbox.server.models.sync_models import AbsolutePath, FileMetadata, RelativePath +from syftbox.server.settings import ServerSettings + + +class SyftFile(BaseModel): + metadata: FileMetadata + data: bytes + absolute_path: AbsolutePath + + +def computed_permission_for_user_and_path(connection: sqlite3.Connection, user: str, path: Path) -> ComputedPermission: + rules: List[PermissionRule] = get_rules_for_path(connection, path) + return ComputedPermission.from_user_rules_and_path(rules=rules, user=user, path=path) + + +class FileStore: + def __init__(self, server_settings: ServerSettings) -> None: + self.server_settings = server_settings + + @property + def db_path(self) -> AbsolutePath: + return self.server_settings.file_db_path + + def delete(self, path: RelativePath, user: str, skip_permission_check: bool = False) -> None: + with get_db(self.db_path) as conn: + if path.name.endswith(PERM_FILE) and not skip_permission_check: + # check admin permission + computed_perm = computed_permission_for_user_and_path(conn, user, path) + if not computed_perm.has_permission(PermissionType.ADMIN): + raise HTTPException( + status_code=403, + detail=f"User {user} does not have permission to edit syftperm file for {path}", + ) + + computed_perm = computed_permission_for_user_and_path(conn, user, path) + if not computed_perm.has_permission(PermissionType.WRITE): + raise HTTPException( + status_code=403, + detail=f"User {user} does not have write permission for {path}", + ) + + cursor = conn.cursor() + cursor.execute("BEGIN IMMEDIATE;") + try: + db.delete_file_metadata(conn, str(path)) + except ValueError: + pass + + if path.name.endswith(PERM_FILE): + # todo: implement delete for permfile + permfile = SyftPermission(relative_filepath=path, rules=[]) + set_rules_for_permfile(conn, permfile) + + abs_path = self.server_settings.snapshot_folder / path + abs_path.unlink(missing_ok=True) + conn.commit() + cursor.close() + + def get(self, path: RelativePath, user: str) -> SyftFile: + with get_db(self.db_path) as conn: + computed_perm = computed_permission_for_user_and_path(conn, user, path) + if not computed_perm.has_permission(PermissionType.READ): + raise HTTPException( + status_code=403, + detail=f"User {user} does not have read permission for {path}", + ) + + metadata = db.get_one_metadata(conn, path=str(path)) + abs_path = self.server_settings.snapshot_folder / metadata.path + + if not Path(abs_path).exists(): + self.delete(Path(metadata.path.as_posix()), user) + raise ValueError("File not found") + return SyftFile( + metadata=metadata, + data=self._read_bytes(abs_path), + absolute_path=abs_path, + ) + + def exists(self, path: RelativePath) -> bool: + with get_db(self.db_path) as conn: + try: + # we are skipping permission check here for now + db.get_one_metadata(conn, path=str(path)) + return True + except ValueError: + return False + + def get_metadata(self, path: RelativePath, user: str, skip_permission_check: bool = False) -> FileMetadata: + with get_db(self.db_path) as conn: + if not skip_permission_check: + computed_perm = computed_permission_for_user_and_path(conn, user, path) + if not computed_perm.has_permission(PermissionType.READ): + raise HTTPException( + status_code=403, + detail=f"User {user} does not have read permission for {path}", + ) + metadata = db.get_one_metadata(conn, path=str(path)) + return metadata + + def _read_bytes(self, path: AbsolutePath) -> bytes: + with open(path, "rb") as f: + return f.read() + + def put( + self, + path: Path, + contents: bytes, + user: str, + check_permission: Optional[PermissionType] = None, + skip_permission_check: bool = False, + ) -> None: + with get_db(self.db_path) as conn: + if path.name.endswith(PERM_FILE) and not skip_permission_check: + # check admin permission + computed_perm = computed_permission_for_user_and_path(conn, user, path) + if not computed_perm.has_permission(PermissionType.ADMIN): + raise HTTPException( + status_code=403, + detail=f"User {user} does not have permission to edit syftperm file for {path}", + ) + + if not skip_permission_check: + computed_perm = computed_permission_for_user_and_path(conn, user, path) + if check_permission not in [ + PermissionType.WRITE, + PermissionType.CREATE, + ]: + raise ValueError(f"check_permission must be either WRITE or CREATE, got {check_permission}") + + if not computed_perm.has_permission(check_permission): + raise HTTPException( + status_code=403, + detail=f"User {user} does not have write permission for {path}", + ) + + cursor = conn.cursor() + cursor.execute("BEGIN IMMEDIATE;") + abs_path = self.server_settings.snapshot_folder / path + abs_path.parent.mkdir(exist_ok=True, parents=True) + + abs_path.write_bytes(contents) + + # TODO: this is currently not atomic (writing the file and adding rows to db) + # but its also somehwat challenging to do so. Especially date modified is tricky. + # Because: if we insert first and write the file later, the date modified it not known yet. + # If we write the file first and then insert, we might have to revert the file, but we need to + # set it to the old date modified. + metadata = hash_file(abs_path, root_dir=self.server_settings.snapshot_folder) + if metadata is None: + raise HTTPException( + status_code=500, + detail=f"Failed to hash file {abs_path}", + ) + db.save_file_metadata(conn, metadata) + if path.name.endswith(PERM_FILE): + try: + permfile = SyftPermission.from_bytes(contents, path) + except (yaml.YAMLError, ValueError): + raise HTTPException( + status_code=400, + detail="invalid syftpermission contents, skipped writing", + ) + set_rules_for_permfile(conn, permfile) + + link_existing_rules_to_file(conn, path) + + conn.commit() + cursor.close() + + def list_for_user( + self, + *, + email: str, + path: Optional[RelativePath] = None, + ) -> list[FileMetadata]: + with get_db(self.db_path) as conn: + return db.get_filemetadata_with_read_access(conn, email, path) diff --git a/packages/syftbox/syftbox/server/db/schema.py b/packages/syftbox/syftbox/server/db/schema.py new file mode 100644 index 00000000000..13458b1868f --- /dev/null +++ b/packages/syftbox/syftbox/server/db/schema.py @@ -0,0 +1,67 @@ +import sqlite3 +from pathlib import Path + +from syftbox.lib.types import PathLike + + +# @contextlib.contextmanager +def get_db(path: PathLike) -> sqlite3.Connection: + conn = sqlite3.connect(Path(path), check_same_thread=False) + + with conn: + conn.execute("PRAGMA cache_size=10000;") + conn.execute("PRAGMA synchronous=OFF;") + conn.execute("PRAGMA journal_mode=WAL;") + conn.execute("PRAGMA busy_timeout=5000;") + conn.execute("PRAGMA foreign_keys = ON;") + conn.row_factory = sqlite3.Row + + # Create the table if it doesn't exist + conn.execute( + """ + CREATE TABLE IF NOT EXISTS file_metadata ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + datasite TEXT NOT NULL, + path TEXT NOT NULL UNIQUE, + hash TEXT NOT NULL, + signature TEXT NOT NULL, + file_size INTEGER NOT NULL, + last_modified TEXT NOT NULL ) + """ + ) + # TODO: migrate file_metadata id? + + # Create a table for storing file information + conn.execute( + """ + CREATE TABLE IF NOT EXISTS rules ( + permfile_path varchar(1000) NOT NULL, + permfile_dir varchar(1000) NOT NULL, + permfile_depth INTEGER NOT NULL, + priority INTEGER NOT NULL, + path varchar(1000) NOT NULL, + user varchar(1000) NOT NULL, + can_read bool NOT NULL, + can_create bool NOT NULL, + can_write bool NOT NULL, + admin bool NOT NULL, + disallow bool NOT NULL, + PRIMARY KEY (permfile_path, priority) + ) + """ + ) + + conn.execute( + """ + CREATE TABLE IF NOT EXISTS rule_files ( + permfile_path varchar(1000) NOT NULL, + priority INTEGER NOT NULL, + file_id INTEGER NOT NULL, + match_for_email varchar(1000), + PRIMARY KEY (permfile_path, priority, file_id), + FOREIGN KEY (permfile_path, priority) REFERENCES rules(permfile_path, priority) ON DELETE CASCADE, + FOREIGN KEY (file_id) REFERENCES file_metadata(id) ON DELETE CASCADE + ); + """ + ) + return conn diff --git a/packages/syftbox/syftbox/server/emails/models.py b/packages/syftbox/syftbox/server/emails/models.py new file mode 100644 index 00000000000..c0396d85935 --- /dev/null +++ b/packages/syftbox/syftbox/server/emails/models.py @@ -0,0 +1,19 @@ +from typing import Union + +from pydantic import BaseModel, EmailStr, NameEmail + +FROM_EMAIL = "SyftBox " + + +class SendEmailRequest(BaseModel): + to: Union[EmailStr, NameEmail] + subject: str + html: str + + def json_for_request(self) -> dict: + return { + "personalizations": [{"to": [{"email": self.to}]}], + "from": {"email": FROM_EMAIL}, + "subject": self.subject, + "content": [{"type": "text/html", "value": self.html}], + } diff --git a/packages/syftbox/syftbox/server/emails/router.py b/packages/syftbox/syftbox/server/emails/router.py new file mode 100644 index 00000000000..ae158a62a78 --- /dev/null +++ b/packages/syftbox/syftbox/server/emails/router.py @@ -0,0 +1,38 @@ +import httpx +from fastapi import APIRouter, Depends, HTTPException, status +from loguru import logger + +from syftbox.lib.constants import SENDGRID_API_URL +from syftbox.server.emails.models import SendEmailRequest +from syftbox.server.settings import ServerSettings, get_server_settings + +router = APIRouter(prefix="/emails", tags=["email"]) + + +@router.post("/") +async def send_email( + email_request: SendEmailRequest, + server_settings: ServerSettings = Depends(get_server_settings), +) -> bool: + # TODO add some safety mechanisms (rate limiting, authorization, etc) + if not server_settings.sendgrid_secret: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Email service API key is not set", + ) + + async with httpx.AsyncClient() as client: + response = await client.post( + SENDGRID_API_URL, + headers={ + "Authorization": f"Bearer {server_settings.sendgrid_secret.get_secret_value()}", + "Content-Type": "application/json", + }, + json=email_request.json_for_request(), + ) + if response.is_success: + logger.info(f"Email sent successfully to '{email_request.to}'") + return True + else: + logger.error(f"Failed to send email: {response.text}") + return False diff --git a/packages/syftbox/syftbox/server/logger.py b/packages/syftbox/syftbox/server/logger.py new file mode 100644 index 00000000000..f1545649552 --- /dev/null +++ b/packages/syftbox/syftbox/server/logger.py @@ -0,0 +1,68 @@ +import json +import logging +import sys +from pathlib import Path +from typing import Union + +from loguru import logger + +custom_format = "{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {message}" + +ANALYTICS_EVENT = "analytics_event" + + +def _default_logger_filter(record: dict) -> bool: + return record["extra"].get("event_type") != ANALYTICS_EVENT + + +def _analytics_logger_filter(record: dict) -> bool: + return record["extra"].get("event_type") == ANALYTICS_EVENT + + +analytics_logger = logger.bind(event_type=ANALYTICS_EVENT) + + +def analytics_formatter(record: dict) -> str: + serialized = json.dumps(record["extra"]) + record["extra"]["serialized"] = serialized + return "{extra[serialized]}\n" + + +def setup_logger(logs_folder: Path, level: Union[str, int] = "DEBUG") -> None: + logs_folder.mkdir(parents=True, exist_ok=True) + + logger.remove() + + # Standard server logs + logger.add( + level=level, + sink=sys.stderr, + diagnose=False, + backtrace=False, + format=custom_format, + filter=_default_logger_filter, + ) + + logger.add( + logs_folder / "server.log", + rotation="100 MB", # Rotate after the log file reaches 100 MB + retention=2, # Keep only the last 1 log files + compression="zip", # Usually, 10x reduction in file size + filter=_default_logger_filter, + format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {message}", # matches the log format printed in the console + ) + + # Dedicated logger for analytics events + # example usage: user_event_logger.info("User logged in") + logger.add( + logs_folder / "analytics.log", + rotation="100 MB", + compression="zip", + format=analytics_formatter, + filter=_analytics_logger_filter, + ) + + uvicorn_access = logging.getLogger("uvicorn.access") + uvicorn_access.disabled = True + + logger.info(f"Logger set up. Saving logs to {logs_folder}") diff --git a/packages/syftbox/syftbox/server/middleware.py b/packages/syftbox/syftbox/server/middleware.py new file mode 100644 index 00000000000..7b8c4bcdf65 --- /dev/null +++ b/packages/syftbox/syftbox/server/middleware.py @@ -0,0 +1,82 @@ +import time +from typing import Callable + +from fastapi import Request, Response, status +from loguru import logger +from packaging import version +from starlette.middleware.base import BaseHTTPMiddleware + +from syftbox import __version__ +from syftbox.lib.http import ( + HEADER_SYFTBOX_VERSION, +) +from syftbox.lib.version_utils import get_range_for_version + + +class LoguruMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next: Callable) -> Response: + start_time = time.time() + response = await call_next(request) + duration = time.time() - start_time + logger.info(f"{request.method} {request.url.path} {response.status_code} {duration:.2f}s") + + return response + + +class RequestSizeLimitMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next: Callable) -> Response: + request_size_limit_in_mb = request.state.server_settings.request_size_limit_in_mb + request_size_limit_in_bytes = request_size_limit_in_mb * 1024 * 1024 + + content_length = request.headers.get("content-length") + + # If content-length header is present, check it first + if content_length: + if int(content_length) > request_size_limit_in_bytes: + return Response( + status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE, + content=f"Request Denied. Message size is greater than {request_size_limit_in_mb} MB", + ) + + # If content-length header is not present, read the request body and check its size. + # TODO: This is susceptible to DoS attacks like Slowloris and body flooding. We should check + # the request stream and terminate early as soon as the size exceeds the limit. + request_body = await request.body() + if len(request_body) > request_size_limit_in_bytes: + return Response( + status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE, + content=f"Request Denied. Message size is greater than {request_size_limit_in_mb} MB", + ) + + response = await call_next(request) + return response + + +class VersionCheckMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next: Callable) -> Response: + user_agent = request.headers.get("User-Agent") + if user_agent.startswith("SyftBox"): + client_version = request.headers.get(HEADER_SYFTBOX_VERSION) + + if not client_version: + return Response( + status_code=status.HTTP_400_BAD_REQUEST, + content="Client version not provided. Please include the 'Version' header.", + ) + + version_range = get_range_for_version(client_version) + + if isinstance(version_range, str): + logger.info(version_range) + else: + lower_bound_version = version_range[0] + + if version.parse(client_version) < version.parse(lower_bound_version): + return Response( + status_code=status.HTTP_426_UPGRADE_REQUIRED, + content=f"Client version is too old. Minimum version required is {lower_bound_version}", + ) + + response = await call_next(request) + response.headers[HEADER_SYFTBOX_VERSION] = __version__ + return response diff --git a/packages/syftbox/syftbox/server/migrations.py b/packages/syftbox/syftbox/server/migrations.py new file mode 100644 index 00000000000..34e05e62313 --- /dev/null +++ b/packages/syftbox/syftbox/server/migrations.py @@ -0,0 +1,70 @@ +import os +from pathlib import Path + +import yaml +from loguru import logger +from packaging import version + +from syftbox import __version__ +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.hash import collect_files, hash_files +from syftbox.lib.permissions import SyftPermission, migrate_permissions +from syftbox.server.db import db +from syftbox.server.db.schema import get_db +from syftbox.server.settings import ServerSettings + + +def create_folders(folders: list[Path]) -> None: + for folder in folders: + if not os.path.exists(folder): + os.makedirs(folder, exist_ok=True) + + +def run_migrations(settings: ServerSettings) -> None: + logger.info("Creating folders") + create_folders(settings.folders) + logger.info("Initializing DB") + init_db(settings) + + +def init_db(settings: ServerSettings) -> None: + # remove this after the upcoming release + if version.parse(__version__) > version.parse("0.2.10"): + # Delete existing DB to avoid conflicts + db_path = settings.file_db_path.absolute() + if db_path.exists(): + db_path.unlink() + migrate_permissions(settings.snapshot_folder) + + # might take very long as snapshot folder grows + logger.info(f"> Collecting Files from {settings.snapshot_folder.absolute()}") + files = collect_files(settings.snapshot_folder.absolute()) + logger.info("> Hashing files") + metadata = hash_files(files, settings.snapshot_folder) + logger.info(f"> Updating file hashes at {settings.file_db_path.absolute()}") + con = get_db(settings.file_db_path.absolute()) + cur = con.cursor() + for m in metadata: + db.save_file_metadata(con, m) + + # remove files that are not in the snapshot folder + all_metadata = db.get_all_metadata(con) + for m in all_metadata: + abs_path = settings.snapshot_folder / m.path + if not abs_path.exists(): + logger.info(f"{m.path} not found in {settings.snapshot_folder}, deleting from db") + db.delete_file_metadata(con, m.path.as_posix()) + + # fill the permission tables + for file in settings.snapshot_folder.rglob(PERM_FILE): + content = file.read_text() + rule_dicts = yaml.safe_load(content) + perm_file = SyftPermission.from_rule_dicts( + permfile_file_path=file.relative_to(settings.snapshot_folder), rule_dicts=rule_dicts + ) + db.set_rules_for_permfile(con, perm_file) + db.link_existing_rules_to_file(con, file.relative_to(settings.snapshot_folder)) + + cur.close() + con.commit() + con.close() diff --git a/packages/syftbox/syftbox/server/models/__init__.py b/packages/syftbox/syftbox/server/models/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/server/models/sync_models.py b/packages/syftbox/syftbox/server/models/sync_models.py new file mode 100644 index 00000000000..92a06c7a2cb --- /dev/null +++ b/packages/syftbox/syftbox/server/models/sync_models.py @@ -0,0 +1,132 @@ +import base64 +import enum +import sqlite3 +from datetime import datetime +from pathlib import Path +from typing import Annotated, Any, Optional + +from pydantic import AfterValidator, BaseModel, Field + + +def should_be_relative(v: Path) -> Path: + if v.is_absolute(): + raise ValueError("path must be relative") + return v + + +def should_be_absolute(v: Path) -> Path: + if not v.is_absolute(): + raise ValueError("path must be absolute") + return v + + +RelativePath = Annotated[Path, AfterValidator(should_be_relative)] + +AbsolutePath = Annotated[Path, AfterValidator(should_be_absolute)] + + +class DiffRequest(BaseModel): + path: RelativePath + signature: str + + @property + def signature_bytes(self) -> bytes: + return base64.b85decode(self.signature) + + +class DiffResponse(BaseModel): + path: RelativePath + diff: str + hash: str + + @property + def diff_bytes(self) -> bytes: + return base64.b85decode(self.diff) + + +class SignatureError(str, enum.Enum): + FILE_NOT_FOUND = "FILE_NOT_FOUND" + FILE_NOT_WRITEABLE = "FILE_NOT_WRITEABLE" + FILE_NOT_READABLE = "FILE_NOT_READABLE" + NOT_A_FILE = "NOT_A_FILE" + + +class SignatureResponse(BaseModel): + path: RelativePath + signature: Optional[str] = None + error: Optional[SignatureError] = None + + +class FileMetadataRequest(BaseModel): + path: RelativePath = Field(description="Path to search for files") + + +class FileRequest(BaseModel): + path: RelativePath = Field(description="Path to search for files, uses SQL LIKE syntax") + + +class BatchFileRequest(BaseModel): + paths: list[RelativePath] + + +class ApplyDiffRequest(BaseModel): + path: RelativePath + diff: str + expected_hash: str + + @property + def diff_bytes(self) -> bytes: + return base64.b85decode(self.diff) + + +class ApplyDiffResponse(BaseModel): + path: RelativePath + current_hash: str + previous_hash: str + + +class FileMetadata(BaseModel): + path: Path + hash: str + signature: str + file_size: int = 0 + last_modified: datetime + + @property + def datasite(self) -> str: + return self.path.parts[0] + + @staticmethod + def from_row(row: sqlite3.Row) -> "FileMetadata": + return FileMetadata( + path=Path(row["path"]), + hash=row["hash"], + signature=row["signature"], + file_size=row["file_size"], + last_modified=row["last_modified"], + ) + + @property + def signature_bytes(self) -> bytes: + return base64.b85decode(self.signature) + + @property + def hash_bytes(self) -> bytes: + return base64.b85decode(self.hash) + + @property + def datasite_name(self) -> str: + return self.path.parts[0] + + def __eq__(self, value: Any) -> bool: + if not isinstance(value, FileMetadata): + return False + return self.path == value.path and self.hash == value.hash + + +class SyncLog(BaseModel): + path: Path + method: str # pull or push + status: str # success or failure + timestamp: datetime + requesting_user: str diff --git a/packages/syftbox/syftbox/server/server.py b/packages/syftbox/syftbox/server/server.py new file mode 100644 index 00000000000..4d4766c2d76 --- /dev/null +++ b/packages/syftbox/syftbox/server/server.py @@ -0,0 +1,81 @@ +import contextlib +import platform +from pathlib import Path +from typing import Any, Dict + +from fastapi import FastAPI +from fastapi.middleware.gzip import GZipMiddleware +from loguru import logger +from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor +from opentelemetry.instrumentation.sqlite3 import SQLite3Instrumentor +from typing_extensions import AsyncGenerator, Optional + +from syftbox import __version__ +from syftbox.server.api.v1.main_router import main_router +from syftbox.server.api.v1.sync_router import router as sync_router +from syftbox.server.emails.router import router as emails_router +from syftbox.server.logger import setup_logger +from syftbox.server.middleware import LoguruMiddleware, RequestSizeLimitMiddleware, VersionCheckMiddleware +from syftbox.server.settings import ServerSettings +from syftbox.server.telemetry import ( + server_request_hook, + setup_otel_exporter, +) +from syftbox.server.users.router import router as users_router + +current_dir = Path(__file__).parent + + +def _server_setup(app: FastAPI, settings: ServerSettings) -> dict[str, Any]: + setup_logger(logs_folder=settings.logs_folder) + + logger.info(f"Starting SyftBox Server {__version__}. Python {platform.python_version()}") + logger.info(settings) + + if settings.otel_enabled: + logger.info("OTel Exporter is ENABLED") + setup_otel_exporter(settings.env.value) + else: + logger.info("OTel Exporter is DISABLED") + + return { + "server_settings": settings, + } + + +def _server_shutdown(app: FastAPI) -> None: + logger.info("Shutting down server") + + +def create_server(settings: Optional[ServerSettings] = None) -> FastAPI: + settings = settings or ServerSettings() + + @contextlib.asynccontextmanager + async def lifespan(app: FastAPI) -> AsyncGenerator[Dict[str, Any], None]: + state = _server_setup(app, settings) + yield state + _server_shutdown(app) + + app = FastAPI(lifespan=lifespan) + app.include_router(main_router) + app.include_router(emails_router) + app.include_router(sync_router) + app.include_router(users_router) + + app.add_middleware(GZipMiddleware, minimum_size=1000, compresslevel=5) + app.add_middleware(LoguruMiddleware) + app.add_middleware(RequestSizeLimitMiddleware) + app.add_middleware(VersionCheckMiddleware) + + FastAPIInstrumentor.instrument_app( + app, + http_capture_headers_server_request=[".*"], + server_request_hook=server_request_hook, + ) + SQLite3Instrumentor().instrument() + + return app + + +# Global instance for backwards compatibility +app = create_server() diff --git a/packages/syftbox/syftbox/server/settings.py b/packages/syftbox/syftbox/server/settings.py new file mode 100644 index 00000000000..52bf13bd5fc --- /dev/null +++ b/packages/syftbox/syftbox/server/settings.py @@ -0,0 +1,117 @@ +from datetime import timedelta +from enum import Enum +from pathlib import Path +from typing import Optional + +from fastapi import Request +from pydantic import Field, SecretStr, field_validator, model_validator +from pydantic_settings import BaseSettings, SettingsConfigDict +from typing_extensions import Self, Union + +DEV_JWT_SECRET = "changethis" + + +class ServerEnv(Enum): + STAGE = "STAGE" + PROD = "PROD" + DEV = "DEV" + + +class ServerSettings(BaseSettings): + """ + Reads the server settings from the environment variables, using the prefix SYFTBOX_. + + example: + `export SYFTBOX_DATA_FOLDER=data/data_folder` + will set the server_settings.data_folder to `data/data_folder` + + see: https://docs.pydantic.dev/latest/concepts/pydantic_settings/#parsing-environment-variable-values + """ + + model_config = SettingsConfigDict(env_prefix="SYFTBOX_", env_file="server.env", extra="ignore") + + env: ServerEnv = ServerEnv.DEV + """Server environment""" + + sendgrid_secret: Optional[SecretStr] = None + """API key for sendgrid email service""" + + data_folder: Path = Field(default=Path("data").resolve()) + """Absolute path to the server data folder""" + + jwt_secret: SecretStr = DEV_JWT_SECRET + """Secret key for the JWT tokens. Dev secret is not allowed in production""" + + jwt_email_token_exp: timedelta = timedelta(hours=1) + """Expiration time for the email token""" + + jwt_access_token_exp: Optional[timedelta] = None + """Expiration time for the access token""" + + jwt_algorithm: str = "HS256" + """Algorithm used for the JWT tokens""" + + auth_enabled: bool = False + """Enable/Disable authentication""" + + otel_enabled: bool = False + """Enable/Disable OpenTelemetry tracing""" + + request_size_limit_in_mb: int = 10 + """Request size limit in MB""" + + @field_validator("data_folder", mode="after") + def data_folder_abs(cls, v: Path) -> Path: + return Path(v).expanduser().resolve() + + @model_validator(mode="after") + def auth_secret_not_empty(self) -> "ServerSettings": + secret_val = self.jwt_secret.get_secret_value() + secret_val_is_set = len(secret_val) > 0 and secret_val != DEV_JWT_SECRET + + if self.auth_enabled and not secret_val_is_set: + raise ValueError("auth is enabled, but jwt_secret is not set") + + return self + + @model_validator(mode="after") + def sendgrid_secret_not_empty(self) -> "ServerSettings": + if self.auth_enabled and self.sendgrid_secret is None: + raise ValueError("auth is enabled, but no sendgrid_secret is defined") + + return self + + @property + def folders(self) -> list[Path]: + return [self.data_folder, self.snapshot_folder] + + @property + def snapshot_folder(self) -> Path: + return self.data_folder / "snapshot" + + @property + def logs_folder(self) -> Path: + return self.data_folder / "logs" + + @property + def user_file_path(self) -> Path: + return self.data_folder / "users.json" + + @classmethod + def from_data_folder(cls, data_folder: Union[Path, str]) -> Self: + data_folder = Path(data_folder) + return cls( + data_folder=data_folder, + ) + + @property + def file_db_path(self) -> Path: + return self.data_folder / "file.db" + + def read(self, path: Path) -> bytes: + with open(self.snapshot_folder / path, "rb") as f: + return f.read() + + +def get_server_settings(request: Request) -> ServerSettings: + return request.state.server_settings diff --git a/packages/syftbox/syftbox/server/telemetry.py b/packages/syftbox/syftbox/server/telemetry.py new file mode 100644 index 00000000000..c15993e39e9 --- /dev/null +++ b/packages/syftbox/syftbox/server/telemetry.py @@ -0,0 +1,61 @@ +from loguru import logger +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.resources import OTELResourceDetector, Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.trace import Span +from typing_extensions import Any + +from syftbox import __version__ +from syftbox.lib.http import ( + HEADER_OS_ARCH, + HEADER_OS_NAME, + HEADER_OS_VERSION, + HEADER_SYFTBOX_PYTHON, + HEADER_SYFTBOX_USER, + HEADER_SYFTBOX_VERSION, +) + +OTEL_ATTR_CLIENT_VERSION = "syftbox.client.version" +OTEL_ATTR_CLIENT_PYTHON = "syftbox.client.python" +OTEL_ATTR_CLIENT_USER = "syftbox.client.user" +OTEL_ATTR_CLIENT_USER_LOC = "syftbox.client.user.location" +OTEL_ATTR_CLIENT_OS_NAME = "syftbox.client.os.name" +OTEL_ATTR_CLIENT_OS_VER = "syftbox.client.os.version" +OTEL_ATTR_CLIENT_OS_ARCH = "syftbox.client.os.arch" +OTEL_ATTR_SERVER_VERSION = "syftbox.server.version" + + +def setup_otel_exporter(env: str) -> None: + exporter = OTLPSpanExporter() + span_processor = BatchSpanProcessor(exporter) + + resource = Resource( + { + "service.name": "syftbox-server", + "deployment.environment": env.lower(), + OTEL_ATTR_SERVER_VERSION: __version__, + } + ) + resource = resource.merge(OTELResourceDetector().detect()) + + tracer_provider = TracerProvider(resource=resource) + tracer_provider.add_span_processor(span_processor) + trace.set_tracer_provider(tracer_provider) + + logger.info(f"OTEL Exporter: {exporter._endpoint}") + logger.info(f"OTEL Resource: {tracer_provider.resource.attributes}") + + +def server_request_hook(span: Span, scope: dict[str, Any]) -> None: + if not span.is_recording(): + return + # headers k/v pairs are bytes + headers: dict[bytes, bytes] = dict(scope.get("headers", {})) + span.set_attribute(OTEL_ATTR_CLIENT_VERSION, headers.get(HEADER_SYFTBOX_VERSION.encode(), "")) + span.set_attribute(OTEL_ATTR_CLIENT_PYTHON, headers.get(HEADER_SYFTBOX_PYTHON.encode(), "")) + span.set_attribute(OTEL_ATTR_CLIENT_USER, headers.get(HEADER_SYFTBOX_USER.encode(), "")) + span.set_attribute(OTEL_ATTR_CLIENT_OS_NAME, headers.get(HEADER_OS_NAME.encode(), "")) + span.set_attribute(OTEL_ATTR_CLIENT_OS_VER, headers.get(HEADER_OS_VERSION.encode(), "")) + span.set_attribute(OTEL_ATTR_CLIENT_OS_ARCH, headers.get(HEADER_OS_ARCH.encode(), "")) diff --git a/packages/syftbox/syftbox/server/templates/datasites.html b/packages/syftbox/syftbox/server/templates/datasites.html new file mode 100644 index 00000000000..050df92882a --- /dev/null +++ b/packages/syftbox/syftbox/server/templates/datasites.html @@ -0,0 +1,60 @@ + + + + + + Index of {{ current_path.rstrip('/') }} + + + +

    Index of {{ current_path.rstrip('/') }}

    + + + + + + + + + + + + {% for file in files %} + + + + + + {% endfor %} +
    NameLast modifiedSize
    📁 Parent Directory--
    + {% if file.is_dir %} + 📁 {{ file.name }}/ + {% else %} + 📄 {{ file.name }} + {% endif %} + {{ file.mod_time }} + {% if not file.is_dir %}{{ file.size }} bytes{% else %}-{% endif %} +
    + + diff --git a/packages/syftbox/syftbox/server/templates/folder.html b/packages/syftbox/syftbox/server/templates/folder.html new file mode 100644 index 00000000000..db982be967e --- /dev/null +++ b/packages/syftbox/syftbox/server/templates/folder.html @@ -0,0 +1,60 @@ + + + + + + Index of {{ current_path.rstrip('/') }} + + + +

    Index of {{ current_path.rstrip('/') }}

    + + + + + + + + + + + + {% for file in files %} + + + + + + {% endfor %} +
    NameLast modifiedSize
    📁 Parent Directory--
    + {% if file.is_dir %} + 📁 {{ file.name }} + {% else %} + 📄 {{ file.name }} + {% endif %} + {{ file.mod_time }} + {% if not file.is_dir %}{{ file.size }} bytes{% else %}-{% endif %} +
    + + diff --git a/packages/syftbox/syftbox/server/templates/index.html b/packages/syftbox/syftbox/server/templates/index.html new file mode 100644 index 00000000000..13678670ce9 --- /dev/null +++ b/packages/syftbox/syftbox/server/templates/index.html @@ -0,0 +1,131 @@ + + + + + + Syft Cache User Management + + + +

    Syft Cache User Management

    + +

    Add User

    + + + + + + +

    Users

    +
      + {% for user in users %} +
    • + {{ user }} + + +
    • + {% endfor %} +
    + + + + + + diff --git a/packages/syftbox/syftbox/server/templates/install.sh b/packages/syftbox/syftbox/server/templates/install.sh new file mode 100755 index 00000000000..08928cceb3f --- /dev/null +++ b/packages/syftbox/syftbox/server/templates/install.sh @@ -0,0 +1,305 @@ +#!/bin/sh + +set -e + +# --no-prompt => disables the run client prompt +ASK_RUN_CLIENT=1 + +# --run => disables the prompt & runs the client +RUN_CLIENT=0 + +# --system-python => MANAGED_PYTHON=0 +# --managed-python => MANAGED_PYTHON=1 +MANAGED_PYTHON=1 +MANAGED_PYTHON_VERSION=${MANAGED_PYTHON_VERSION:-"3.12"} + +# min system python version if not using managed python +REQ_PYTHON_MAJOR="3" +REQ_PYTHON_MINOR="9" + +red='\033[1;31m' +yellow='\033[0;33m' +cyan='\033[0;36m' +green='\033[1;32m' +reset='\033[0m' + +err() { + echo "${red}ERROR${reset}: $1" >&2 + exit 1 +} + +info() { + echo "${cyan}$1${reset}" +} + +warn() { + echo "${yellow}$1${reset}" +} + +success() { + echo "${green}$1${reset}" +} + +check_cmd() { + command -v "$1" > /dev/null 2>&1 + return $? +} + +need_cmd() { + if ! check_cmd "$1" + then err "need '$1' (command not found)" + fi +} + +need_python() { + # check if either python3 or python is available + if ! check_cmd python && ! check_cmd python3 + then err "need 'python' or 'python3' (command not found)" + fi +} + +downloader() { + if check_cmd curl + then curl -sSfL "$1" + elif check_cmd wget + then wget -qO- "$1" + else need_cmd "curl or wget" + fi +} + +get_python_command() { + need_python + + # check if either python3 or python is available + # and echo the python one that works + if check_cmd python3 + then echo "python3" + elif check_cmd python + then echo "python" + fi +} + +check_home_path() { + # check if a path exists as ~/path or $HOME/path + if echo $PATH | grep -q "$HOME/$1" || echo $PATH | grep -q "~/$1" + then return 0 + else return 1 + fi +} + +write_path() { + local _path_contents="$1" + local _profile_path="$2" + # if profile exists, add the export + if [ -f "$_profile_path" ] + then + echo "export PATH=\"$_path_contents\$PATH\"" >> $_profile_path; + fi +} + +patch_path() { + local _path_expr="" + + if ! check_home_path ".cargo/bin" + then _path_expr="$HOME/.cargo/bin:" + fi + + if ! check_home_path ".local/bin" + then _path_expr="${_path_expr}$HOME/.local/bin:" + fi + + # reload env vars + export PATH="$_path_expr$PATH" + + # write to profile files + write_path $_path_expr "$HOME/.profile" + write_path $_path_expr "$HOME/.zshrc" + write_path $_path_expr "$HOME/.bashrc" + write_path $_path_expr "$HOME/.bash_profile" +} + +download_uv() { + if ! check_cmd "uv" + then downloader https://astral.sh/uv/install.sh | env INSTALLER_PRINT_QUIET=1 sh + fi +} + +install_uv() { + download_uv + patch_path +} + +install_syftbox() { + need_cmd "uv" + + python_flag="" + py_detail="" + + if [ $MANAGED_PYTHON -eq 1 ] + then + python_flag="--python $MANAGED_PYTHON_VERSION" + py_detail="managed Python $MANAGED_PYTHON_VERSION" + else + py=$(get_python_command) + python_flag="--python-preference system" + py_detail="system $($py -V)" + fi + + info "Installing SyftBox (with $py_detail)" + + exit=$(uv tool install $python_flag -Uq --force syftbox) + if ! $(exit) + then err "Failed to install SyftBox" + fi +} + +check_python_version() { + # Try python3, if it exists; otherwise, fall back to python + py=$(get_python_command) + + # Check if py is empty (meaning no python was found) + if [ -z "$py" ]; then + return 1 + fi + + # Check if the python version is >= 3.9 + py_valid_ver=$($py -c "import sys; print((sys.version_info[:2] >= ($REQ_PYTHON_MAJOR, $REQ_PYTHON_MINOR)))") + + # Check if Python version not is greater than or equal to 3.9 + if [ "$py_valid_ver" = "False" ]; then + err "SyftBox requires Python $REQ_PYTHON_MAJOR.$REQ_PYTHON_MINOR or higher, found $($py -V). Please upgrade your Python installation and retry." + fi +} + +show_debug_and_exit() { + need_python + py=$(get_python_command) + echo + warn "SYFTBOX INSTALLER DEBUG REPORT" + echo + info System + echo "SHELL : $SHELL" + echo "LANG : $LANG" + echo "LD_LIBRARY_PATH : $LD_LIBRARY_PATH" + echo "PATH : $PATH" + echo "PWD : $(pwd)" + echo + info "Python" + echo "Alias : $py" + echo "Version : $($py -V)" + echo "which : $(which $py)" + echo "python env vars" + env | grep -E "(PYTHON|PIP)" || echo "-" + echo + info "Python Virtual Environment" + env | grep -E "(VIRTUAL_ENV|VENV)" || echo "-" + echo + info "Conda Environment" + env | grep CONDA || echo "-" + echo + info "Python Runtime" + $py -c ' +import sys; \ +print("sys.version :", sys.version); \ +print("sys.executable :", sys.executable); \ +print("sys.prefix :", sys.prefix); \ +print("sys.base_prefix :", sys.base_prefix); \ +print("sys.path"); \ +[print("-", p) for p in sys.path];' + exit 0 +} + +pre_install() { + + # check if python version is >= 3.9, if uv is not managing python + if [ $MANAGED_PYTHON -eq 0 ] + then check_python_version + fi + + # if you see this message, you're good to go + echo " + ____ __ _ ____ +/ ___| _ _ / _| |_| __ ) _____ __ +\___ \| | | | |_| __| _ \ / _ \ \/ / + ___) | |_| | _| |_| |_) | (_) > < +|____/ \__, |_| \__|____/ \___/_/\_\\ + |___/ +" +} + +run_client() { + echo + success "Starting SyftBox client..." + exec ~/.local/bin/syftbox client < /dev/tty +} + +prompt_run_client() { + # prompt if they want to start the client + echo + prompt=$(echo "${yellow}Start the client now? [y/n] ${reset}") + while [ "$start_client" != "y" ] && [ "$start_client" != "Y" ] && [ "$start_client" != "n" ] && [ "$start_client" != "N" ] + do + read -p "$prompt" start_client < /dev/tty + done + + if [ "$start_client" = "y" ] || [ "$start_client" = "Y" ] + then run_client + else prompt_restart_shell + fi +} + +prompt_restart_shell() { + echo + warn "RESTART your shell or RELOAD shell profile" + echo " \`source ~/.zshrc\` (for zsh)" + echo " \`source ~/.bash_profile\` (for bash)" + echo " \`source ~/.profile\` (for sh)" + + success "\nAfter reloading, start the client" + echo " \`syftbox client\`" +} + +post_install() { + if [ $RUN_CLIENT -eq 1 ] + then run_client + elif [ $ASK_RUN_CLIENT -eq 1 ] + then prompt_run_client + else prompt_restart_shell + fi +} + +do_install() { + for arg in "$@"; do + case "$arg" in + -r|--run|run) + RUN_CLIENT=1 + ;; + -n|--no-prompt|no-prompt) + ASK_RUN_CLIENT=0 + ;; + --system-python|system-python) + MANAGED_PYTHON=0 + ;; + --managed-python|managed-python) + MANAGED_PYTHON=1 + ;; + --debug|debug) + show_debug_and_exit + ;; + *) + ;; + esac + done + + pre_install + + info "Installing uv" + install_uv + + install_syftbox + + success "Installation completed!" + post_install +} + +do_install "$@" || exit 1 diff --git a/packages/syftbox/syftbox/server/users/__init__.py b/packages/syftbox/syftbox/server/users/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/server/users/auth.py b/packages/syftbox/syftbox/server/users/auth.py new file mode 100644 index 00000000000..6770b0a3285 --- /dev/null +++ b/packages/syftbox/syftbox/server/users/auth.py @@ -0,0 +1,109 @@ +from datetime import datetime, timezone +from typing import Any + +import jwt +from fastapi import Depends, HTTPException, Security +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from opentelemetry import trace +from typing_extensions import Annotated + +from syftbox.server.settings import ServerSettings, get_server_settings +from syftbox.server.telemetry import OTEL_ATTR_CLIENT_USER + +bearer_scheme = HTTPBearer() + +ACCESS_TOKEN = "access_token" +EMAIL_TOKEN = "email_token" + + +def _validate_jwt(server_settings: ServerSettings, token: str) -> dict: + try: + return jwt.decode( + token, + server_settings.jwt_secret.get_secret_value(), + algorithms=[server_settings.jwt_algorithm], + ) + except jwt.ExpiredSignatureError: + raise HTTPException( + status_code=401, + detail="Token has expired", + headers={"WWW-Authenticate": "Bearer"}, + ) + except Exception: + raise HTTPException( + status_code=401, + detail="Invalid token", + headers={"WWW-Authenticate": "Bearer"}, + ) + + +def _generate_jwt(server_settings: ServerSettings, data: dict) -> str: + return jwt.encode( + data, + server_settings.jwt_secret.get_secret_value(), + algorithm=server_settings.jwt_algorithm, + ) + + +def generate_access_token(server_settings: ServerSettings, email: str) -> str: + iat: datetime = datetime.now(tz=timezone.utc) + data: dict[str, Any] = { + "email": email, + "type": ACCESS_TOKEN, + "iat": iat, + } + if server_settings.jwt_access_token_exp: + data["exp"] = iat + server_settings.jwt_access_token_exp + return _generate_jwt(server_settings, data) + + +def generate_email_token(server_settings: ServerSettings, email: str) -> str: + iat: datetime = datetime.now(tz=timezone.utc) + data: dict[str, Any] = { + "email": email, + "type": EMAIL_TOKEN, + "iat": iat, + } + if server_settings.jwt_email_token_exp: + data["exp"] = iat + server_settings.jwt_email_token_exp + return _generate_jwt(server_settings, data) + + +def validate_access_token(server_settings: ServerSettings, token: str) -> dict: + data = _validate_jwt(server_settings, token) + if data["type"] != ACCESS_TOKEN: + raise HTTPException( + status_code=401, + detail="Invalid token type", + headers={"WWW-Authenticate": "Bearer"}, + ) + return data + + +def validate_email_token(server_settings: ServerSettings, token: str) -> dict: + data = _validate_jwt(server_settings, token) + if data["type"] != EMAIL_TOKEN: + raise HTTPException( + status_code=401, + detail="Invalid token type", + headers={"WWW-Authenticate": "Bearer"}, + ) + return data + + +def get_user_from_email_token( + credentials: Annotated[HTTPAuthorizationCredentials, Security(bearer_scheme)], + server_settings: Annotated[ServerSettings, Depends(get_server_settings)], +) -> str: + payload = validate_email_token(server_settings, credentials.credentials) + trace.get_current_span().set_attribute(OTEL_ATTR_CLIENT_USER, payload["email"]) + return payload["email"] + + +def get_current_user( + credentials: Annotated[HTTPAuthorizationCredentials, Security(bearer_scheme)], + server_settings: Annotated[ServerSettings, Depends(get_server_settings)], +) -> str: + payload = validate_access_token(server_settings, credentials.credentials) + trace.get_current_span().set_attribute(OTEL_ATTR_CLIENT_USER, payload["email"]) + return payload["email"] diff --git a/packages/syftbox/syftbox/server/users/router.py b/packages/syftbox/syftbox/server/users/router.py new file mode 100644 index 00000000000..a9982137aa4 --- /dev/null +++ b/packages/syftbox/syftbox/server/users/router.py @@ -0,0 +1,92 @@ +from typing import Optional + +from fastapi import APIRouter, Depends, HTTPException +from pydantic import BaseModel, EmailStr + +from syftbox.lib.email import send_token_email +from syftbox.server.analytics import log_analytics_event +from syftbox.server.settings import ServerSettings, get_server_settings +from syftbox.server.users.auth import ( + generate_access_token, + generate_email_token, + get_current_user, + get_user_from_email_token, +) + +router = APIRouter(prefix="/auth", tags=["authentication"]) + + +class EmailTokenRequest(BaseModel): + email: EmailStr + + +class EmailTokenResponse(BaseModel): + email_token: Optional[str] = None + + +class AccessTokenResponse(BaseModel): + access_token: str + + +@router.post("/request_email_token") +def get_token( + req: EmailTokenRequest, server_settings: ServerSettings = Depends(get_server_settings) +) -> EmailTokenResponse: + """ + Send an email token to the user's email address + + if auth is disabled, the token will be returned in the response as a base64 encoded json string + """ + email = req.email + token = generate_email_token(server_settings, email) + + response = EmailTokenResponse() + if server_settings.auth_enabled: + send_token_email(server_settings, email, token) + else: + # Only return token if auth is disabled, it will be a base64 encoded json string + response.email_token = token + + return response + + +@router.post("/validate_email_token") +def validate_email_token( + email: str, + email_from_token: str = Depends(get_user_from_email_token), + server_settings: ServerSettings = Depends(get_server_settings), +) -> AccessTokenResponse: + """ + Validate the email token and return a matching access token + + Args: + email (str, optional): The user email, extracted from the email token. Defaults to Depends(get_user_from_email_token). + server_settings (ServerSettings, optional): server settings. Defaults to Depends(get_server_settings). + + Returns: + AccessTokenResponse: access token + """ + if email_from_token != email: + raise HTTPException(status_code=401, detail="This email token is not for this email address") + + access_token = generate_access_token(server_settings, email) + return AccessTokenResponse(access_token=access_token) + + +class WhoAmIResponse(BaseModel): + email: str + + +@router.post("/whoami") +def whoami( + email: str = Depends(get_current_user), +) -> WhoAmIResponse: + """ + Get the current users email. + If the token is not valid or outdated, get_current_user will raise 401 Unauthorized. + + Returns: + str: email + """ + log_analytics_event("/auth/whoami", email=email) + return WhoAmIResponse(email=email) diff --git a/packages/syftbox/syftbox/server2client_version.json b/packages/syftbox/syftbox/server2client_version.json new file mode 100644 index 00000000000..a002aac2f72 --- /dev/null +++ b/packages/syftbox/syftbox/server2client_version.json @@ -0,0 +1,45 @@ +{ + "0.1.0": ["0.1.0", "0.2.0"], + "0.1.1": ["0.1.0", "0.2.0"], + "0.1.2": ["0.1.0", "0.2.0"], + "0.1.3": ["0.1.0", "0.2.0"], + "0.1.4": ["0.1.0", "0.2.0"], + "0.1.5": ["0.1.0", "0.2.0"], + "0.1.6": ["0.1.0", "0.2.0"], + "0.1.7": ["0.1.0", "0.2.0"], + "0.1.8": ["0.1.0", "0.2.0"], + "0.1.9": ["0.1.0", "0.2.0"], + "0.1.10": ["0.1.0", "0.2.0"], + "0.1.11": ["0.1.0", "0.2.0"], + "0.1.12": ["0.1.0", "0.2.0"], + "0.1.13": ["0.1.0", "0.2.0"], + "0.1.14": ["0.1.0", "0.2.0"], + "0.1.15": ["0.1.0", "0.2.0"], + "0.1.16": ["0.1.0", "0.2.0"], + "0.1.17": ["0.1.0", "0.2.0"], + "0.1.18": ["0.1.0", "0.2.0"], + "0.1.19": ["0.1.0", "0.2.0"], + "0.1.20": ["0.1.0", "0.2.0"], + "0.1.21": ["0.1.0", "0.2.0"], + "0.1.22": ["0.1.0", "0.2.0"], + "0.1.23": ["0.1.0", "0.2.0"], + "0.1.24": ["0.1.0", "0.2.0"], + "0.2.0": ["0.2.0", "0.3.0"], + "0.2.1": ["0.2.0", "0.3.0"], + "0.2.2": ["0.2.0", "0.3.0"], + "0.2.3": ["0.2.0", "0.3.0"], + "0.2.4": ["0.2.0", "0.3.0"], + "0.2.5": ["0.2.0", "0.3.0"], + "0.2.6": ["0.2.0", "0.3.0"], + "0.2.7": ["0.2.0", "0.3.0"], + "0.2.8": ["0.2.0", "0.3.0"], + "0.2.9": ["0.2.0", "0.3.0"], + "0.2.10": ["0.2.0", "0.3.0"], + "0.2.11": ["0.2.0", "0.3.0"], + "0.3.0": ["0.3.0", ""], + "0.3.1": ["0.3.0", ""], + "0.3.2": ["0.3.0", ""], + "0.3.3": ["0.3.0", ""], + "0.3.4": ["0.3.0", ""], + "0.3.5": ["0.3.0", ""] +} diff --git a/packages/syftbox/syftbox/tui/__init__.py b/packages/syftbox/syftbox/tui/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/tui/app.py b/packages/syftbox/syftbox/tui/app.py new file mode 100644 index 00000000000..d444bdcc816 --- /dev/null +++ b/packages/syftbox/syftbox/tui/app.py @@ -0,0 +1,56 @@ +from pathlib import Path +from typing import Any + +from textual.app import App +from textual.widgets import Footer, Header, TabbedContent, TabPane + +from syftbox.lib import Client +from syftbox.tui.widgets.api_widget import APIWidget +from syftbox.tui.widgets.datasites_widget import DatasiteSelector +from syftbox.tui.widgets.home_widget import HomeWidget +from syftbox.tui.widgets.sync_widget import SyncWidget + + +class SyftBoxTUI(App): + CSS_PATH = Path(__file__).parent.parent / "assets" / "tui.tcss" + BINDINGS = [ + ("h", "switch_tab('Home')", "Home"), + ("a", "switch_tab('APIs')", "APIs"), + ("d", "switch_tab('Datasites')", "Datasites"), + ("s", "switch_tab('Sync')", "Sync"), + ("q", "quit", "Quit"), + ] + + def __init__( + self, + syftbox_context: Client, + ): + super().__init__() + self.syftbox_context = syftbox_context + + def action_switch_tab(self, tab: str) -> None: + self.query_one(TabbedContent).active = tab + + def on_mount(self) -> None: + self.title = "SyftBox" + + def compose(self) -> Any: + yield Header(name="SyftBox") + with TabbedContent(): + with TabPane("Home", id="Home"): + yield HomeWidget(self.syftbox_context) + with TabPane("APIs", id="APIs"): + yield APIWidget(self.syftbox_context) + with TabPane("Datasites", id="Datasites"): + yield DatasiteSelector( + base_path=self.syftbox_context.workspace.datasites, + default_datasite=self.syftbox_context.email, + ) + with TabPane("Sync", id="Sync"): + yield SyncWidget(self.syftbox_context) + yield Footer() + + +# config = SyftClientConfig.load() +# syftbox_context = Client(config) +# app = SyftBoxTUI(syftbox_context) diff --git a/packages/syftbox/syftbox/tui/cli.py b/packages/syftbox/syftbox/tui/cli.py new file mode 100644 index 00000000000..e92e262b751 --- /dev/null +++ b/packages/syftbox/syftbox/tui/cli.py @@ -0,0 +1,57 @@ +from pathlib import Path +from typing import Annotated +from venv import logger + +from rich import print as rprint +from typer import Exit, Option, Typer + +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.client_shim import Client +from syftbox.lib.constants import DEFAULT_CONFIG_PATH +from syftbox.lib.exceptions import ClientConfigException + +app = Typer( + name="SyftBox Terminal UI", + help="Launch the SyftBox Terminal UI", + pretty_exceptions_enable=False, + context_settings={"help_option_names": ["-h", "--help"]}, +) + +CONFIG_OPTS = Option("-c", "--config", "--config_path", help="Path to the SyftBox config") + + +@app.callback(invoke_without_command=True) +def run_tui( + config_path: Annotated[Path, CONFIG_OPTS] = DEFAULT_CONFIG_PATH, +) -> None: + # Late import to avoid long startup times + from syftbox.tui.app import SyftBoxTUI + + syftbox_context = get_syftbox_context(config_path) + tui = SyftBoxTUI(syftbox_context) + logger.debug("Running SyftBox TUI") + tui.run() + + +def get_syftbox_context(config_path: Path) -> Client: + try: + conf = SyftClientConfig.load(config_path) + context = Client(conf) + return context + except ClientConfigException: + msg = ( + f"[bold red]Error:[/bold red] Couldn't load config at: [yellow]'{config_path}'[/yellow]\n" + "Please ensure that:\n" + " - The configuration file exists at the specified path.\n" + " - You've run the SyftBox atleast once.\n" + f" - For custom configs, provide the proper path using [cyan]--config[/cyan] flag" + ) + rprint(msg) + raise Exit(1) + except Exception as e: + rprint(f"[bold red]Error:[/bold red] {e}") + raise Exit(1) + + +if __name__ == "__main__": + app() diff --git a/packages/syftbox/syftbox/tui/widgets/__init__.py b/packages/syftbox/syftbox/tui/widgets/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/syftbox/tui/widgets/api_widget.py b/packages/syftbox/syftbox/tui/widgets/api_widget.py new file mode 100644 index 00000000000..1cf57f1984b --- /dev/null +++ b/packages/syftbox/syftbox/tui/widgets/api_widget.py @@ -0,0 +1,56 @@ +import urllib +import urllib.parse +from typing import Any, List + +from textual.containers import Horizontal +from textual.widget import Widget +from textual.widgets import Label, ListItem, ListView + +from syftbox.lib import Client +from syftbox.tui.widgets.logs_widget import SyftLogsWidget + + +class APIWidget(Widget): + DEFAULT_CSS = """ + APIWidget { + height: auto; + } + """ + + def __init__( + self, + syftbox_context: Client, + ): + super().__init__() + self.syftbox_context = syftbox_context + self.apps: List[str] = [] + + def compose(self) -> Any: + self.apps = self.get_installed_apps() + + with Horizontal(): + list_view = ListView(*[ListItem(Label(app), id=app) for app in self.apps], classes="sidebar") + list_view.styles.width = "20%" + yield list_view + + self.log_widget = SyftLogsWidget( + self.syftbox_context, None, title="API Logs", refresh_every=2, classes="api-logs" + ) + self.set_app_logs(self.apps[0]) + + yield self.log_widget + + def set_app_logs(self, app_name: str) -> None: + """Update the logs widget to show logs for the given app.""" + app_name = urllib.parse.quote(app_name) + endpoint = f"/apps/logs/{app_name}" + self.log_widget.endpoint = endpoint + self.log_widget.refresh_logs() + + def get_installed_apps(self) -> list[str]: + api_dir = self.syftbox_context.workspace.apps + return [d.name for d in api_dir.iterdir() if d.is_dir() and not d.name.startswith(".")] + + def on_list_view_highlighted(self, event: ListView.Highlighted) -> None: + app_name = event.item.id + self.set_app_logs(app_name) diff --git a/packages/syftbox/syftbox/tui/widgets/datasites_widget.py b/packages/syftbox/syftbox/tui/widgets/datasites_widget.py new file mode 100644 index 00000000000..a538c6fdf97 --- /dev/null +++ b/packages/syftbox/syftbox/tui/widgets/datasites_widget.py @@ -0,0 +1,65 @@ +from pathlib import Path +from typing import Any, List + +from textual.containers import Container +from textual.suggester import Suggester, SuggestFromList +from textual.widgets import DirectoryTree, Input, Label, Static + + +class DatasiteSuggester(Suggester): + """Autocomplete suggester for datasite input field.""" + + def __init__(self, *, base_path: Path, use_cache: bool = True, case_sensitive: bool = False): + super().__init__(use_cache=use_cache, case_sensitive=case_sensitive) + self.base_path = base_path + + async def get_suggestion(self, value: str) -> None: + paths = [p.name for p in self.base_path.iterdir() if p.is_dir()] + return await SuggestFromList( + paths, + case_sensitive=self.case_sensitive, + ).get_suggestion(value) + + +class DatasiteSelector(Static): + def __init__(self, base_path: Path, default_datasite: str) -> None: + super().__init__() + self.base_path = base_path.expanduser() + self.default_datasite = default_datasite + self.current_datasite = self.base_path / default_datasite + + def compose(self) -> Any: + yield Label("Browse Datasite:") + path_input = Input( + value=self.default_datasite, + placeholder="Enter datasite path...", + suggester=DatasiteSuggester(base_path=self.base_path), + ) + dir_tree = DirectoryTree(str(self.current_datasite)) + path_input.styles.width = "100%" + yield path_input + + yield Static("", classes="spacer") # Spacer with vertical margin + + self.files_container = Container() + with self.files_container: + yield Label("Files:") + yield dir_tree + + self.error_message = Static("", classes="error") + self.error_message.visible = False + yield self.error_message + + def _get_available_datasites(self) -> List[str]: + return [p.name for p in self.base_path.iterdir() if p.is_dir()] + + def on_input_submitted(self, event: Input.Submitted) -> None: + self.current_datasite = self.base_path / event.value + if not self.current_datasite.exists(): + self.error_message.update(f"[red]Datasite '{event.value}' does not exist[/red]") + self.error_message.visible = True + self.files_container.visible = False + else: + self.error_message.visible = False + self.files_container.visible = True + self.query_one(DirectoryTree).path = str(self.current_datasite) diff --git a/packages/syftbox/syftbox/tui/widgets/home_widget.py b/packages/syftbox/syftbox/tui/widgets/home_widget.py new file mode 100644 index 00000000000..bd57e93e8eb --- /dev/null +++ b/packages/syftbox/syftbox/tui/widgets/home_widget.py @@ -0,0 +1,119 @@ +from typing import Any, Optional + +import requests +from textual.containers import Horizontal, Vertical +from textual.widget import Widget +from textual.widgets import Label, Markdown, Static + +from syftbox import __version__ +from syftbox.lib import Client +from syftbox.tui.widgets.logs_widget import SyftLogsWidget + +INTRO_MD = """ +### Welcome to SyftBox! + +SyftBox is an innovative project by [OpenMined](https://openmined.org) that aims to make privacy-enhancing technologies (PETs) more accessible and user-friendly for developers. It provides a modular and intuitive framework for building PETs applications with minimal barriers, regardless of the programming language or environment. + +### Important Resources +- 📚 Check the docs at https://syftbox-documentation.openmined.org/ +- 📊 View the [Stats Dashboard](https://syftbox.openmined.org/datasites/andrew@openmined.org/stats.html) +- 🔧 View our [GitHub Repository](https://github.com/OpenMined/syft) +- 🔍 Browse [Available Datasets](https://syftbox.openmined.org/datasites/aggregator@openmined.org/data_search/) + +Need help? Join us on [Slack](https://slack.openmined.org/) 💬 +""" + + +class StatusDashboard(Widget): + DEFAULT_CSS = """ + StatusDashboard { + height: auto; + } + """ + + def __init__( + self, + syftbox_context: Client, + *, + classes: Optional[str] = None, + ): + self.syftbox_context = syftbox_context + super().__init__( + classes=classes, + ) + + def compose(self) -> Any: + yield Static("[blue]Status[/blue]\n") + server_url = f"[link={self.syftbox_context.config.server_url}]{self.syftbox_context.config.server_url}[/link]" + client_url = f"[link={self.syftbox_context.config.client_url}]{self.syftbox_context.config.client_url}[/link]" + data_dir = ( + f"[link=file://{self.syftbox_context.workspace.data_dir}]{self.syftbox_context.workspace.data_dir}[/link]" + ) + yield Static(f"Syftbox version: [green]{__version__}[/green]") + yield Static(f"User: [green]{self.syftbox_context.email}[/green]") + yield Static(f"Syftbox folder: [green]{data_dir}[/green]") + yield Static(f"Server URL: [green]{server_url}[/green]") + yield Static(f"Local URL: [green]{client_url}[/green]") + + sync_status = "🟢 [green]Active[/green]" if self._sync_is_alive() else "🔴 [red]Inactive[/red]" + yield Label(f"Sync: {sync_status}", id="sync_status") + + apps_count = self.count_apps() + apps_color = "green" if apps_count > 0 else "red" + yield Label(f"Installed APIs: [{apps_color}]{apps_count}[/{apps_color}]", id="api_count") + + self.set_interval(1, self.update_values) + + def update_values(self) -> None: + sync_status_widget = self.query_exactly_one("#sync_status", expect_type=Label) + api_count_widget = self.query_exactly_one("#api_count", expect_type=Label) + + sync_status = "🟢 [green]Active[/green]" if self._sync_is_alive() else "🔴 [red]Inactive[/red]" + sync_status_widget.content = f"Sync: {sync_status}" + + apps_count = self.count_apps() + apps_color = "green" if apps_count > 0 else "red" + api_count_widget.content = f"Installed APIs: [{apps_color}]{apps_count}[/{apps_color}]" + + sync_status_widget.refresh() + api_count_widget.refresh() + + def _sync_is_alive(self) -> bool: + try: + response = requests.get(f"{self.syftbox_context.config.client_url}/sync/health") + return response.status_code == 200 + except requests.exceptions.ConnectionError: + return False + + def count_apps(self) -> int: + api_dir = self.syftbox_context.workspace.apps + return len([d for d in api_dir.iterdir() if d.is_dir() and not d.name.startswith(".")]) + + +class HomeWidget(Widget): + DEFAULT_CSS = """ + HomeWidget { + height: auto; + } + """ + + def __init__(self, syftbox_context: Client) -> None: + super().__init__() + self.syftbox_context = syftbox_context + self.info_widget = Markdown(INTRO_MD, classes="info") + self.logs_widget = SyftLogsWidget( + syftbox_context=self.syftbox_context, + endpoint="/logs", + title="SyftBox Logs", + refresh_every=2, + classes="syftbox-logs", + ) + + def compose(self) -> Any: + with Horizontal(): + yield StatusDashboard(self.syftbox_context, classes="status") + yield Vertical( + self.info_widget, + self.logs_widget, + classes="main", + ) diff --git a/packages/syftbox/syftbox/tui/widgets/logs_widget.py b/packages/syftbox/syftbox/tui/widgets/logs_widget.py new file mode 100644 index 00000000000..061e5186a89 --- /dev/null +++ b/packages/syftbox/syftbox/tui/widgets/logs_widget.py @@ -0,0 +1,89 @@ +from typing import Any, Optional + +import requests +from rich.text import Text +from textual.containers import Vertical +from textual.widgets import RichLog, Static + +from syftbox.lib import Client + + +class SyftTUIError(Exception): + def __init__(self, message: str): + self.message = message + super().__init__(message) + + +class SyftLogsWidget(Static): + def __init__( + self, + syftbox_context: Client, + endpoint: Optional[str] = None, + title: Optional[str] = None, + refresh_every: int = 2, + classes: Optional[str] = None, + ) -> None: + super().__init__(classes=classes) + self.syftbox_context = syftbox_context + self.endpoint = endpoint + # Track what we polled last time to determine scrolling behavior + self._previous_source = self.endpoint + + self.title = title + self.refresh_every = refresh_every + self.logs_viewer = RichLog( + max_lines=256, + wrap=False, + markup=True, + highlight=True, + ) + self.logs_viewer.write("[dim]Fetching logs...[/dim]") + + def _get_err(self, response: requests.Response) -> str: + try: + return response.json()["detail"] + except Exception: + return response.text + + def _fetch_logs(self) -> str: + self._previous_source = self.endpoint + if self.endpoint is None: + raise SyftTUIError("No logs endpoint provided") + try: + response = requests.get(f"{self.syftbox_context.config.client_url}{self.endpoint}") + if response.status_code != 200: + raise SyftTUIError(self._get_err(response)) + logs = response.json()["logs"] + return "".join(logs) + except requests.exceptions.ConnectionError: + raise SyftTUIError("Unable to connect to SyftBox") + except Exception as e: + raise SyftTUIError(f"Failed to fetch logs: {str(e)}") + + def should_scroll_to_end(self) -> bool: + if self.endpoint != self._previous_source: + return True + return self.logs_viewer.is_vertical_scroll_end and self.logs_viewer.scroll_offset.y > 0 + + def refresh_logs(self) -> None: + should_scroll = self.should_scroll_to_end() + try: + logs = self._fetch_logs() + logs = Text.from_ansi(logs) + except SyftTUIError as e: + logs = f"[red]{e.message}[/red]\n" + self.logs_viewer.clear() + self.logs_viewer.write(logs) + + if should_scroll: + self.logs_viewer.scroll_end(animate=False) + + def compose(self) -> Any: + with Vertical(): + if self.title: + yield Static(f"[blue]{self.title}[/blue]\n") + yield self.logs_viewer + + self.refresh_logs() + if self.refresh_every > 0: + self.set_interval(self.refresh_every, self.refresh_logs) diff --git a/packages/syftbox/syftbox/tui/widgets/sync_widget.py b/packages/syftbox/syftbox/tui/widgets/sync_widget.py new file mode 100644 index 00000000000..cf91c3bee09 --- /dev/null +++ b/packages/syftbox/syftbox/tui/widgets/sync_widget.py @@ -0,0 +1,67 @@ +from typing import Any, List, Optional + +import requests +from textual.binding import Binding +from textual.containers import ScrollableContainer +from textual.widgets import DataTable, Input, Label, Static + +from syftbox.client.plugins.sync.local_state import SyncStatusInfo +from syftbox.lib import Client + + +class SyncWidget(Static): + LIMIT = 100 + BINDINGS = [ + Binding("f", "focus_search", "Search", show=True), + ] + + def __init__(self, syftbox_context: Client) -> None: + super().__init__() + self.syftbox_context = syftbox_context + + def compose(self) -> Any: + yield Label("Filter Files") + yield Input( + placeholder="Enter a glob pattern to filter files", + id="path_filter", + ) + + self.table = DataTable() + self.table.add_columns("Path", "Status", "Action", "Last Update", "Message") + yield Label("Sync Events", classes="padding-top") + yield ScrollableContainer(self.table) + yield Static(f"Showing last {self.LIMIT} sync events", classes="dim") + + def on_mount(self) -> None: + self._refresh_table() + + def _refresh_table(self, path_filter: Optional[str] = None) -> None: + self.table.clear() + + status_info = self._get_sync_state(path_filter) + for info in status_info: + self.table.add_row( + str(info.path), + info.status.value, + info.action.value if info.action else "", + info.timestamp.astimezone().strftime("%Y-%m-%d %H:%M:%S"), + info.message or "", + ) + + def _get_sync_state(self, path_glob: Optional[str] = None) -> List[SyncStatusInfo]: + try: + response = requests.get( + f"{self.syftbox_context.config.client_url}/sync/state", + params={"path_glob": path_glob, "limit": self.LIMIT}, + ) + response.raise_for_status() + return [SyncStatusInfo(**item) for item in response.json()] + except Exception: + return [] + + def on_input_changed(self, event: Input.Changed) -> None: + if event.input.id == "path_filter": + self._refresh_table(event.value) + + def action_focus_search(self) -> None: + self.query_one(Input).focus() diff --git a/packages/syftbox/tests/__init__.py b/packages/syftbox/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/tests/e2e/conftest.py b/packages/syftbox/tests/e2e/conftest.py new file mode 100644 index 00000000000..767ac17671f --- /dev/null +++ b/packages/syftbox/tests/e2e/conftest.py @@ -0,0 +1,237 @@ +import asyncio +import os +import shutil +import sys +from asyncio.subprocess import Process +from dataclasses import dataclass, field +from pathlib import Path +from typing import Dict, List + +import httpx +import pytest_asyncio +from loguru import logger + +logger.remove() +logger.add(sys.stderr, format="{level: <8} | {message}", colorize=True) +logger.level("SUCCESS", color="") +logger.level("INFO", color="") +logger.level("DEBUG", color="") + + +class E2ETestError(Exception): + pass + + +class E2ETimeoutError(E2ETestError): + pass + + +@dataclass +class Server: + port: int = 5001 + env: Dict[str, str] = field(default_factory=dict) + + +@dataclass +class Client: + name: str + port: int + server_port: int = 5001 + data_dir: Path = field(default_factory=Path.cwd) # Set by E2EContext + env: Dict[str, str] = field(default_factory=dict) + apps: List[str] = field(default_factory=list) + + @property + def email(self): + return f"{self.name}@openmined.org" + + @property + def datasite_dir(self): + """data_dir/datasites""" + return self.data_dir / "datasites" + + @property + def api_dir(self): + """data_dir/apis""" + return self.data_dir / "apis" + + @property + def private_dir(self): + """data_dir/private""" + return self.data_dir / "private" + + @property + def my_datasite(self): + """data_dir/datasites/{email}""" + return self.datasite_dir / self.email + + @property + def public_dir(self): + """data_dir/datasites/{email}/public""" + return self.my_datasite / "public" + + def api_path(self, api_name: str): + """data_dir/apis/{api_name}""" + return self.api_dir / api_name + + def api_data_dir(self, app_name: str): + """data_dir/datasites/{email}/api_data/{app_name}""" + return self.my_datasite / "api_data" / app_name + + +class E2EContext: + def __init__(self, e2e_name: str, server: Server, clients: List[Client]): + self.e2e_name = e2e_name + self.test_dir = Path.cwd() / ".e2e" / e2e_name + self.server = server + self.clients = clients + self.__procs: List[Process] = [] + + def reset_test_dir(self): + shutil.rmtree(self.test_dir.parent, ignore_errors=True) + + async def start_all(self) -> bool: + # Start server + await self.start_server(self.server) + await self.wait_for_server(self.server) + + # Start clients + await asyncio.gather(*[self.start_client(c) for c in self.clients]) + await self.wait_for_clients(self.clients) + + return True + + async def cleanup(self): + for process in self.__procs: + if process.returncode is None: + process.kill() + await process.wait() + for path in Path(".e2e").rglob("*.pid"): + try: + path.unlink() + except Exception: + pass + + async def start_server(self, server: Server) -> Process: + server_dir = self.test_dir / "server" + server_dir.mkdir(parents=True, exist_ok=True) + + logs_dir = self.test_dir / "logs" + logs_dir.mkdir(parents=True, exist_ok=True) + + env = os.environ.copy() + env["SYFTBOX_ENV"] = "DEV" + env["SYFTBOX_DATA_FOLDER"] = str(server_dir) + env["SYFTBOX_OTEL_ENABLED"] = "0" + env.update(server.env) + + process = await asyncio.create_subprocess_exec( + "gunicorn", + "syftbox.server.server:app", + "-k=uvicorn.workers.UvicornWorker", + f"--bind=127.0.0.1:{server.port}", + stdout=open(logs_dir / "server.log", "w"), + stderr=asyncio.subprocess.STDOUT, + env=env, + ) + + self.__procs.append(process) + + return process + + async def start_client(self, client: Client) -> Process: + client_dir = self.test_dir / "clients" / client.name + client_dir.mkdir(parents=True, exist_ok=True) + client.data_dir = client_dir + + logs_dir = self.test_dir / "logs" + logs_dir.mkdir(parents=True, exist_ok=True) + + env = os.environ.copy() + env["SYFTBOX_DISABLE_ICONS"] = "1" + env.update(client.env) + if len(client.apps) > 0: + env["SYFTBOX_DEFAULT_APPS"] = ",".join(client.apps) + + process = await asyncio.create_subprocess_exec( + "syftbox", + "client", + f"--config={client_dir}/config.json", + f"--data-dir={client_dir}", + f"--email={client.name}@openmined.org", + f"--server=http://localhost:{client.server_port}", + f"--port={client.port}", + "--no-open-dir", + "--verbose", + stdout=open(logs_dir / f"client.{client.name}.log", "w"), + stderr=asyncio.subprocess.STDOUT, + env=env, + ) + self.__procs.append(process) + return process + + async def wait_for_server(self, server: Server, timeout: int = 30): + logger.debug(f"Waiting for server to be ready on port {server.port} (timeout={timeout}s)") + await self.wait_for_url(f"http://localhost:{server.port}/info", timeout=timeout) + logger.success(f"Server '{server.port}' is ready") + + async def wait_for_clients(self, clients: List[Client], timeout: int = 30): + # wait for all futures with a timeout + await asyncio.gather(*[self.wait_for_client(c, timeout=timeout) for c in clients]) + + async def wait_for_client(self, client: Client, timeout: int = 30): + logger.debug(f"Waiting for client '{client.name}' to be ready on port {client.port} (timeout={timeout}s)") + await self.wait_for_url(f"http://localhost:{client.port}/info", timeout=timeout) + logger.success(f"Client '{client.name}' is ready") + + async def wait_for_api(self, app_name: str, client: Client, timeout: int = 30): + logger.debug(f"Waiting for API '{app_name}' to be ready (timeout={timeout}s)") + run_path = client.api_dir / app_name / "run.sh" + await self.wait_for_path(run_path, timeout=timeout) + logger.success(f"API '{app_name}' is ready on client '{client.name}'") + + async def wait_for_path(self, path: Path, timeout: int = 30, interval: float = 0.5) -> None: + logger.debug(f"Waiting for path '{path}' (timeout={timeout}s)") + start = asyncio.get_event_loop().time() + + while not path.exists(): + if asyncio.get_event_loop().time() - start > timeout: + raise E2ETimeoutError(f"Timeout after {timeout}s waiting for: {path}") + await asyncio.sleep(interval) + + elapsed = asyncio.get_event_loop().time() - start + logger.debug(f"Got {path} (after {elapsed:.1f}s)") + + async def wait_for_url(self, url: str, timeout: int = 30, interval: float = 1.0) -> None: + async with httpx.AsyncClient() as client: + start = asyncio.get_event_loop().time() + while True: + try: + await client.get(url) + break + except httpx.RequestError: + if asyncio.get_event_loop().time() - start > timeout: + raise E2ETimeoutError(f"Timeout after {timeout}s waiting for: {url}") + await asyncio.sleep(interval) + + elapsed = asyncio.get_event_loop().time() - start + logger.debug(f"Got response from {url} (after {elapsed:.1f}s)") + + +def get_random_port(): + import socket + + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("localhost", 0)) + return s.getsockname()[1] + + +@pytest_asyncio.fixture(loop_scope="function") +async def e2e_context(request): + try: + ctx = E2EContext(**request.param) + yield ctx + except: + raise + finally: + await ctx.cleanup() diff --git a/packages/syftbox/tests/e2e/test_aggregator_with_local_training.py b/packages/syftbox/tests/e2e/test_aggregator_with_local_training.py new file mode 100644 index 00000000000..aaf6a0ab543 --- /dev/null +++ b/packages/syftbox/tests/e2e/test_aggregator_with_local_training.py @@ -0,0 +1,156 @@ +import asyncio +import json +import secrets +import shutil +from pathlib import Path + +import pytest +from loguru import logger + +from tests.e2e.conftest import Client, E2EContext, E2ETimeoutError, Server + +AGGREGATOR_CONFIG = { + "participants": ["user1@openmined.org", "user2@openmined.org", "user3@openmined.org"], +} + +AGGREGATOR_API_NAME = "model_aggregator" +LOCAL_TRAINING_API_NAME = "model_local_training" +MAX_COPY_DATA_PARTS = 2 + + +def deployment_config(): + return { + "e2e_name": "aggregator_with_local_training", + "server": Server(port=5001), + "clients": [ + Client( + name="agg", + port=8080, + apps=["https://github.com/OpenMined/model_aggregator"], + ), + Client( + name="user1", + port=8081, + server_port=5001, + apps=[ + "https://github.com/OpenMined/model_local_training", + ], + ), + Client( + name="user2", + port=8082, + server_port=5001, + apps=[ + "https://github.com/OpenMined/model_local_training", + ], + ), + Client( + name="user3", + port=8083, + server_port=5001, + apps=[ + "https://github.com/OpenMined/model_local_training", + ], + ), + ], + } + + +async def copy_train_data_to_private(e2e_context: E2EContext, clients: list[Client]) -> dict[str, list[str]]: + client_data_map: dict[str, list[str]] = {} + + for client in clients: + await e2e_context.wait_for_api(LOCAL_TRAINING_API_NAME, client) + mnist_samples_dir = client.api_path(LOCAL_TRAINING_API_NAME) / "mnist_samples" + all_mnist_samples = list(mnist_samples_dir.glob("*.pt")) + client_private_dir = client.private_dir / LOCAL_TRAINING_API_NAME + client_private_dir.mkdir(parents=True, exist_ok=True) + + client_sample_files = [] + for _ in range(MAX_COPY_DATA_PARTS): + # random.random is not concurrency safe, using secrets.randbelow + rand_idx = secrets.randbelow(len(all_mnist_samples)) + random_mnist_sample = all_mnist_samples[rand_idx] + logger.debug(f"Copying {random_mnist_sample} to {client.email} private dir") + shutil.copy(random_mnist_sample, client_private_dir) + assert Path(client_private_dir, random_mnist_sample.name).exists() + client_sample_files.append(random_mnist_sample.name) + + client_data_map[client.email] = client_sample_files + + return client_data_map + + +async def wait_for_public_trained_models(e2e_context: E2EContext, client: Client, mnist_samples: list[str]): + await e2e_context.wait_for_api(LOCAL_TRAINING_API_NAME, client) + public_dir = client.public_dir + for mnist_sample in mnist_samples: + model_file = public_dir / f"trained_{mnist_sample}" + await e2e_context.wait_for_path(model_file, timeout=240, interval=1) + assert model_file.exists() + + +async def verify_file_sizes(src: Path, dst: Path, timeout: int = 60) -> bool: + """Compare two files using their sizes.""" + start = asyncio.get_event_loop().time() + while start + timeout > asyncio.get_event_loop().time(): + if src.stat().st_size == dst.stat().st_size: + return True + await asyncio.sleep(1) + raise E2ETimeoutError(f"Timeout after {timeout}s waiting for copying {src} to {dst}") + + +@pytest.mark.asyncio +@pytest.mark.parametrize("e2e_context", [deployment_config()], indirect=True, ids=["aggregator_with_local_training"]) +async def test_e2e_aggregator_with_local_training(e2e_context: E2EContext): + # setting up: Run 4 clients (1 agg + 3 participants) and the cache server, also installs the apps on the clients + logger.info(f"Starting E2E '{e2e_context.e2e_name}'") + e2e_context.reset_test_dir() + agg_client = e2e_context.clients[0] + + await e2e_context.start_all() + await e2e_context.wait_for_api(AGGREGATOR_API_NAME, agg_client) + + logger.info("Aggregator copies `participants.json` into the `launch` directory") + launch_dir = agg_client.api_data_dir(AGGREGATOR_API_NAME) / "launch" + launch_dir.mkdir(parents=True, exist_ok=True) + participants_file = launch_dir / "participants.json" + participants_file.write_text(json.dumps(AGGREGATOR_CONFIG)) + + logger.info("Aggregator copies test data to the private folder") + agg_private_dir = agg_client.private_dir / AGGREGATOR_API_NAME + agg_private_dir.mkdir(parents=True, exist_ok=True) + agg_priv_data_path = agg_private_dir / "mnist_dataset.pt" + test_dataset_path = agg_client.api_path(AGGREGATOR_API_NAME) / "samples" / "test_data" / "mnist_dataset.pt" + with open(test_dataset_path, "rb") as src, open(agg_priv_data_path, "wb") as dst: + shutil.copyfileobj(src, dst) + await verify_file_sizes(test_dataset_path, agg_priv_data_path) + await e2e_context.wait_for_path(agg_priv_data_path, timeout=60, interval=1) + + clients: list[Client] = e2e_context.clients[1:] + logger.info("Participants moving the MNIST data parts into private/model_local_training to train") + client_data_map = await copy_train_data_to_private(e2e_context, clients) + + logger.info("Waiting for local clients to train their models") + await asyncio.gather( + *[ + wait_for_public_trained_models(e2e_context, client, client_data_map[client.email]) + for client in e2e_context.clients[1:] + ] + ) + + logger.info("Waiting for aggregator to aggregate the models and generate results") + done_dir = agg_client.api_data_dir(AGGREGATOR_API_NAME) / "done" + results_file = done_dir / "results.json" + await e2e_context.wait_for_path(results_file, timeout=90, interval=1) + + result = json.loads(results_file.read_text()) + logger.info(f"Validating results\n{result}") + assert result["accuracy"] >= 10.0 + assert set(result["participants"]) == set(AGGREGATOR_CONFIG["participants"]) + assert len(result["missing_peers"]) == 0 + + logger.info("Check that the launch and running dir are empty") + assert len(list(launch_dir.iterdir())) == 0 + agg_running_dir = agg_client.api_data_dir(AGGREGATOR_API_NAME) / "running" + assert len(list(agg_running_dir.iterdir())) == 0 diff --git a/packages/syftbox/tests/e2e/test_basic_aggregator.py b/packages/syftbox/tests/e2e/test_basic_aggregator.py new file mode 100644 index 00000000000..cf4e6768bc5 --- /dev/null +++ b/packages/syftbox/tests/e2e/test_basic_aggregator.py @@ -0,0 +1,54 @@ +# Example test using the framework +import json +import random + +import pytest +from loguru import logger + +from tests.e2e.conftest import Client, E2EContext, Server + + +def deployment_config(): + return { + "e2e_name": "basic_aggregator", + "server": Server(port=5001), + "clients": [ + Client(name="agg", port=8080, apps=["https://github.com/OpenMined/basic_aggregator"]), + Client(name="user1", port=8081, server_port=5001, apps=["https://github.com/OpenMined/adder"]), + Client(name="user2", port=8082, server_port=5001, apps=["https://github.com/OpenMined/adder"]), + ], + } + + +@pytest.mark.asyncio +@pytest.mark.parametrize("e2e_context", [deployment_config()], indirect=True, ids=["basic_aggregator"]) +async def test_e2e_basic_aggregator(e2e_context: E2EContext): + logger.info(f"Starting E2E '{e2e_context.e2e_name}'") + e2e_context.reset_test_dir() + + await e2e_context.start_all() + await e2e_context.wait_for_api("basic_aggregator", e2e_context.clients[0]) + + total = 0 + + # write a file "values.txt" in the public directory of both users + logger.info("Copying values in public directories") + for client in e2e_context.clients[1:]: + values_txt = client.public_dir / "value.txt" + val = random.random() * 100 + values_txt.write_text(str(val)) + total += val + logger.debug(f"{client.name} value: {val}") + + # wait for the aggregator to finish + logger.info("Waiting for aggregator to generate results") + result_path = e2e_context.clients[0].api_data_dir("basic_aggregation") / "results.json" + await e2e_context.wait_for_path(result_path, timeout=60, interval=1) + + # check the result + result = json.loads(result_path.read_text()) + logger.info(f"Validating results\n{result}") + assert total == result["total"] + assert "agg@openmined.org" in result["missing"] + assert "user1@openmined.org" in result["participants"] + assert "user2@openmined.org" in result["participants"] diff --git a/packages/syftbox/tests/e2e/test_fl_model_training.py b/packages/syftbox/tests/e2e/test_fl_model_training.py new file mode 100644 index 00000000000..34e29d0300d --- /dev/null +++ b/packages/syftbox/tests/e2e/test_fl_model_training.py @@ -0,0 +1,254 @@ +import asyncio +import json +import secrets +import shutil +from pathlib import Path + +import pytest +from loguru import logger + +from tests.e2e.conftest import Client, E2EContext, Server + +PROJECT_NAME = "my_cool_fl_project" + +AGGREGATOR_CONFIG = { + "project_name": PROJECT_NAME, + "aggregator": "agg@openmined.org", + "participants": ["user1@openmined.org", "user2@openmined.org"], + "model_arch": "model.py", + "model_class_name": "FLModel", + "model_weight": "global_model_weight.pt", + "test_dataset": "mnist_test_dataset.pt", + "rounds": 3, + "epoch": 10, + "learning_rate": 0.1, +} +MAX_COPY_PVT_DATASETS = 3 + + +def deployment_config(): + return { + "e2e_name": "fl_model_training", + "server": Server(port=5001), + "clients": [ + Client( + name="agg", + port=8080, + apps=["https://github.com/OpenMined/fl_aggregator"], + ), + Client( + name="user1", + port=8081, + server_port=5001, + apps=[ + "https://github.com/OpenMined/fl_client", + ], + ), + Client( + name="user2", + port=8082, + server_port=5001, + apps=[ + "https://github.com/OpenMined/fl_client", + ], + ), + ], + } + + +async def copy_private_data(e2e_client: E2EContext, client: Client): + logger.info(f"Copying private data to FL client: {client.email}") + await e2e_client.wait_for_api("fl_client", client) + private_data_dir = client.data_dir / "private" / "fl_client" + private_data_dir.mkdir(parents=True, exist_ok=True) + + # sample private data + sample_private_data_dir = client.api_path("fl_client") / "mnist_samples" + sample_private_data_files = list(sample_private_data_dir.glob("*.pt")) + + for _ in range(MAX_COPY_PVT_DATASETS): + idx = secrets.randbelow(len(sample_private_data_files)) + mnist_data_file = sample_private_data_files[idx] + logger.debug(f"Copying {mnist_data_file.resolve()} to {client.email} private dir") + shutil.copy(mnist_data_file, private_data_dir) + assert Path(private_data_dir, mnist_data_file.name).exists() + + logger.info(f"Private data successfully added for FL client: {client.email}") + + +async def check_fl_client_installed(e2e_client: E2EContext, client: Client): + """Check if FL client is installed and running""" + logger.info(f"Checking if FL client is installed for {client.email}") + fl_client_dir = client.api_data_dir("fl_client") + + # App is installed in api_data_dir + await e2e_client.wait_for_path(fl_client_dir, timeout=30) + + # Check if request, running and done folders are created + await e2e_client.wait_for_path(fl_client_dir / "request", timeout=30) + await e2e_client.wait_for_path(fl_client_dir / "running", timeout=30) + await e2e_client.wait_for_path(fl_client_dir / "done", timeout=30) + + logger.info(f"FL client installed for {client.email}") + + +async def approve_data_request(e2e_client: E2EContext, client: Client): + """Approve data request for FL client""" + + logger.info(f"Approving data request for {client.email}") + request_dir = client.api_data_dir("fl_client") / "request" + project_dir = request_dir / PROJECT_NAME + + await e2e_client.wait_for_path(project_dir, timeout=30, interval=1) + assert project_dir.exists() + + running_dir = client.api_data_dir("fl_client") / "running" + + # Approve request + # Approve action is moving project dir to running dir + shutil.copytree(project_dir, running_dir / PROJECT_NAME, dirs_exist_ok=True) + shutil.rmtree(project_dir) + + # Wait for fl_config.json to be copied + await e2e_client.wait_for_path( + running_dir / PROJECT_NAME / "fl_config.json", + timeout=90, + ) + + assert Path(running_dir / PROJECT_NAME).exists() + assert Path(running_dir / PROJECT_NAME / "fl_config.json").exists() + logger.info(f"Data request approved for {client.email}") + + +async def check_for_training_complete(e2e_client: E2EContext, client: Client): + logger.info(f"Checking for training completion for {client.email}") + done_dir = client.api_data_dir("fl_client") / "done" + assert done_dir.exists() + + await e2e_client.wait_for_path(done_dir / PROJECT_NAME, timeout=300, interval=1) + + agg_weights_dir = done_dir / PROJECT_NAME / "agg_weights" + round_weights_dir = done_dir / PROJECT_NAME / "round_weights" + + assert agg_weights_dir.exists() + assert round_weights_dir.exists() + + assert len(list(round_weights_dir.glob("*.pt"))) == AGGREGATOR_CONFIG["rounds"] + assert len(list(agg_weights_dir.glob("*.pt"))) == AGGREGATOR_CONFIG["rounds"] + 1 + + +def validate_participant_data(participants: dict, key: str, expected_value: str): + for participant in participants: + assert key in participant + assert str(participant[key]) == str(expected_value) + + +async def validate_project_folder_empty(e2e_context: E2EContext, client: Client, timeout: int = 30): + project_folder_dir = client.api_data_dir("fl_client") / "running" / PROJECT_NAME + start_time = asyncio.get_event_loop().time() + while project_folder_dir.exists(): + await asyncio.sleep(1) + if start_time + timeout < asyncio.get_event_loop().time(): + raise TimeoutError(f"Project folder {project_folder_dir} not deleted in {timeout} seconds") + + assert not project_folder_dir.exists() + + +@pytest.mark.asyncio +@pytest.mark.parametrize("e2e_context", [deployment_config()], indirect=True, ids=["fl_model_training"]) +async def test_e2e_fl_model_aggregator(e2e_context: E2EContext): + logger.info(f"Starting E2E '{e2e_context.e2e_name}'") + e2e_context.reset_test_dir() + agg_client = e2e_context.clients[0] + + await e2e_context.start_all() + await e2e_context.wait_for_api("fl_aggregator", agg_client, timeout=60) + + # copy launch config + logger.info("Copying launch config") + + # sample launch config + sample_dir = agg_client.api_dir / "fl_aggregator" / "samples" + + sample_launch_config = sample_dir / "launch_config" + + # global_model_weight.pt model_arch.py + + launch_dir = agg_client.api_data_dir("fl_aggregator") / "launch" + launch_dir.mkdir(parents=True, exist_ok=True) + + # copy model_arch.py and global_model_weight.pt + shutil.copy(sample_launch_config / AGGREGATOR_CONFIG["model_arch"], launch_dir) + shutil.copy(sample_launch_config / AGGREGATOR_CONFIG["model_weight"], launch_dir) + + # Copy Config + fl_config = launch_dir / "fl_config.json" + fl_config.write_text(json.dumps(AGGREGATOR_CONFIG)) + + # Copy test dataset + logger.info("Copying private test dataset for global model evaluation") + sample_test_dataset = sample_dir / "test_data" / AGGREGATOR_CONFIG["test_dataset"] + assert sample_test_dataset.exists() + + test_data_dir = agg_client.data_dir / "private" / "fl_aggregator" + await e2e_context.wait_for_path(test_data_dir, timeout=240) + assert test_data_dir.exists() + + # Add test dataset for global model evaluation + shutil.copy(src=sample_test_dataset, dst=test_data_dir) + logger.info(f"Test dataset copied to {test_data_dir} from {sample_test_dataset}") + + # Add private tests for fl clients + logger.info("Copying private data to all FL clients") + await asyncio.gather(*[copy_private_data(e2e_context, fl_client) for fl_client in e2e_context.clients[1:]]) + + # Check FL client installed + await asyncio.gather(*[check_fl_client_installed(e2e_context, fl_client) for fl_client in e2e_context.clients[1:]]) + + # Approve data request for all FL Clients + await asyncio.gather(*[approve_data_request(e2e_context, fl_client) for fl_client in e2e_context.clients[1:]]) + + # Check dashboard metric data available + logger.info("Checking for dashboard metric data") + agg_public_dir = agg_client.public_dir / "fl" / PROJECT_NAME + assert agg_public_dir.exists() + + assert (agg_public_dir / "participants.json").exists() + assert (agg_public_dir / "accuracy_metrics.json").exists() + + # Check for training complete + logger.info("Checking for training completion") + await asyncio.gather( + *[check_for_training_complete(e2e_context, fl_client) for fl_client in e2e_context.clients[1:]] + ) + logger.info("All participants have completed training") + + # Validate results + logger.info("Validating results") + + participant_file = agg_public_dir / "participants.json" + participants = json.loads(participant_file.read_text()) + assert len(participants) == len(AGGREGATOR_CONFIG["participants"]) + + logger.info("Validating participants metrics") + participant_emails = [participant["Email"] for participant in participants] + assert participant_emails == AGGREGATOR_CONFIG["participants"] + + # Validate participant metrics + validate_participant_data(participants, "Fl Client Installed", True) + validate_participant_data(participants, "Project Approved", True) + validate_participant_data(participants, "Added Private Data", True) + validate_participant_data(participants, "Round (current/total)", "3/3") + + accuracy_file = agg_public_dir / "accuracy_metrics.json" + rounds_accuracy = json.loads(accuracy_file.read_text()) + + logger.info("Validating accuracy metrics") + assert len(rounds_accuracy) == AGGREGATOR_CONFIG["rounds"] + 1 + # Last round accuracy should be greater than 0.1 + assert rounds_accuracy[-1]["accuracy"] >= 0.1 + + # Validate running folder is empty, post training + await asyncio.gather( + *[validate_project_folder_empty(e2e_context, fl_client) for fl_client in e2e_context.clients[1:]] + ) diff --git a/packages/syftbox/tests/e2e/test_launch.py b/packages/syftbox/tests/e2e/test_launch.py new file mode 100644 index 00000000000..dbd6c7447dc --- /dev/null +++ b/packages/syftbox/tests/e2e/test_launch.py @@ -0,0 +1,32 @@ +import asyncio + +import pytest +from loguru import logger + +from tests.e2e.conftest import Client, E2EContext, Server + + +def deployment_config(): + return { + "e2e_name": "launch", + "server": Server(port=5001), + "clients": [ + Client(name="user1", port=8080, server_port=5001), + Client(name="user2", port=8081, server_port=5001), + ], + } + + +@pytest.mark.asyncio +@pytest.mark.parametrize("e2e_context", [deployment_config()], indirect=True, ids=["deployment"]) +async def test_e2e_launch(e2e_context: E2EContext): + logger.info(f"Starting E2E '{e2e_context.e2e_name}'") + e2e_context.reset_test_dir() + await e2e_context.start_all() + await asyncio.sleep(15) + + for client in e2e_context.clients: + assert client.datasite_dir.exists() + assert client.api_dir.exists() + assert client.public_dir.exists() + assert client.api_data_dir("").exists() diff --git a/packages/syftbox/tests/e2e/test_model_aggregator.py b/packages/syftbox/tests/e2e/test_model_aggregator.py new file mode 100644 index 00000000000..7343f7bca04 --- /dev/null +++ b/packages/syftbox/tests/e2e/test_model_aggregator.py @@ -0,0 +1,103 @@ +# Example test using the framework + +import asyncio +import json +import secrets +import shutil +from pathlib import Path + +import pytest +from loguru import logger + +from tests.e2e.conftest import Client, E2EContext, Server + +AGGREGATOR_CONFIG = { + "participants": ["user1@openmined.org", "user2@openmined.org", "user3@openmined.org"], +} +MAX_COPY_MODELS = 2 + + +def deployment_config(): + return { + "e2e_name": "model_aggregator", + "server": Server(port=5001), + "clients": [ + Client( + name="agg", + port=8080, + apps=["https://github.com/OpenMined/pretrained_model_aggregator"], + ), + Client( + name="user1", + port=8081, + server_port=5001, + apps=[ + "https://github.com/OpenMined/pretrained_model_local", + ], + ), + Client( + name="user2", + port=8082, + server_port=5001, + apps=[ + "https://github.com/OpenMined/pretrained_model_local", + ], + ), + Client( + name="user3", + port=8083, + server_port=5001, + apps=[ + "https://github.com/OpenMined/pretrained_model_local", + ], + ), + ], + } + + +async def copy_model_to_public(e2e_context: E2EContext, client: Client): + await e2e_context.wait_for_api("pretrained_model_local", client) + pretrained_models_dir = client.api_path("pretrained_model_local") / "pretrained_models" + all_models = list(pretrained_models_dir.glob("*.pt")) + for _ in range(MAX_COPY_MODELS): + # random.random is not concurrency safe, using secrets.randbelow + rand_idx = secrets.randbelow(len(all_models)) + random_model = all_models[rand_idx] + logger.debug(f"Copying {random_model} to {client.email} public dir") + shutil.copy(random_model, client.public_dir) + assert Path(client.public_dir, random_model.name).exists() + + +@pytest.mark.asyncio +@pytest.mark.parametrize("e2e_context", [deployment_config()], indirect=True, ids=["model_aggregator"]) +async def test_e2e_model_aggregator(e2e_context: E2EContext): + logger.info(f"Starting E2E '{e2e_context.e2e_name}'") + e2e_context.reset_test_dir() + agg_client = e2e_context.clients[0] + + await e2e_context.start_all() + await e2e_context.wait_for_api("pretrained_model_aggregator", agg_client) + + # copy launch config + logger.info("Copying launch config") + launch_dir = agg_client.api_data_dir("pretrained_model_aggregator") / "launch" + launch_dir.mkdir(parents=True, exist_ok=True) + participants_file = launch_dir / "participants.json" + participants_file.write_text(json.dumps(AGGREGATOR_CONFIG)) + + # copy model to public dir + logger.info("Copying models for all users in their public directories") + await asyncio.gather(*[copy_model_to_public(e2e_context, client) for client in e2e_context.clients[1:]]) + + # wait for results + logger.info("Waiting for aggregator to generate results") + done_dir = agg_client.api_data_dir("pretrained_model_aggregator") / "done" + results_file = done_dir / "results.json" + await e2e_context.wait_for_path(results_file, timeout=120, interval=1) + + # check results + result = json.loads(results_file.read_text()) + logger.info(f"Validating results\n{result}") + assert result["accuracy"] >= 10.0 + assert set(result["participants"]) == set(AGGREGATOR_CONFIG["participants"]) + assert len(result["missing_peers"]) == 0 diff --git a/packages/syftbox/tests/e2e/test_ring.py b/packages/syftbox/tests/e2e/test_ring.py new file mode 100644 index 00000000000..a822b25fefb --- /dev/null +++ b/packages/syftbox/tests/e2e/test_ring.py @@ -0,0 +1,69 @@ +import asyncio +import json +import random + +import pytest +from loguru import logger + +from tests.e2e.conftest import Client, E2EContext, Server + +INIT_DATA = { + "participants": [ + "user1@openmined.org", + "user2@openmined.org", + "user1@openmined.org", + ], + "data": 0, + "current_index": 0, +} + + +def deployment_config(): + return { + "e2e_name": "ring", + "server": Server(port=5001), + "clients": [ + Client(name="user1", port=8080, server_port=5001, apps=["https://github.com/OpenMined/ring"]), + Client(name="user2", port=8081, server_port=5001, apps=["https://github.com/OpenMined/ring"]), + ], + } + + +@pytest.mark.asyncio +@pytest.mark.parametrize("e2e_context", [deployment_config()], indirect=True, ids=["deployment"]) +async def test_e2e_ring(e2e_context: E2EContext): + logger.info(f"Starting E2E '{e2e_context.e2e_name}'") + e2e_context.reset_test_dir() + await e2e_context.start_all() + + # wait for all clients to install ring + logger.info("Waiting for clients to install ring app") + await asyncio.gather(*[e2e_context.wait_for_api("ring", client) for client in e2e_context.clients]) + + values = [] + + # copy secrets to all clients + logger.info("Copying secrets to all clients") + for client in e2e_context.clients: + val = random.randint(0, 100) + api_path = client.api_path("ring") + api_path.joinpath("secret.json").write_text(json.dumps({"data": val})) + values.append(val) + logger.debug(f"{client.name} secret: {val}") + + # kickstart ring + logger.info("Initiating ring by copying data.json") + init_user = e2e_context.clients[0] + running_dir = init_user.api_data_dir("ring") / "running" + running_dir.mkdir(parents=True, exist_ok=True) + running_dir.joinpath("data.json").write_text(json.dumps(INIT_DATA)) + + logger.info("Waiting for ring results to be available") + output = init_user.api_data_dir("ring") / "done" / "data.json" + await e2e_context.wait_for_path(output, timeout=120, interval=1) + + # check the output + result = json.loads(output.read_text()) + logger.info(f"Validating results\n{result}") + assert result["data"] == sum(values) + values[0] + assert result["current_index"] == 2 diff --git a/packages/syftbox/tests/integration/.gitkeep b/packages/syftbox/tests/integration/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/tests/integration/app_plugin/app_plugin_test.py b/packages/syftbox/tests/integration/app_plugin/app_plugin_test.py new file mode 100644 index 00000000000..b2e4a4bc1cd --- /dev/null +++ b/packages/syftbox/tests/integration/app_plugin/app_plugin_test.py @@ -0,0 +1,77 @@ +import threading +import time +from pathlib import Path +from secrets import token_hex + +from syftbox.client.plugins.apps import run_apps +from tests.integration.app_plugin.fixtures.app_mocks import AppMockFactory + + +def verify_app_execution(app_dir: Path, expected_output: str): + """Verify app execution results.""" + app_log_path = app_dir / "app.log" + assert app_log_path.exists() + assert app_log_path.read_text().strip() == expected_output + + +def verify_running_apps(running_apps: dict, expected_app_name: str = None): + """Verify running apps state.""" + if expected_app_name: + assert len(running_apps) == 1 + assert expected_app_name in running_apps + else: + assert len(running_apps) == 0 + + +def test_app_plugin_without_config(tmp_path, monkeypatch): + """Test app plugin execution without configuration.""" + app_dir = tmp_path / token_hex(8) + app_name = "test_app_without_config" + mock_app_dir, expected_output = AppMockFactory.create_app_without_config(apps_dir=app_dir, app_name=app_name) + + assert mock_app_dir.exists() + + # Patch necessary attributes + PATCHED_RUNNING = {} + monkeypatch.setattr("syftbox.client.plugins.apps.DEFAULT_APPS_PATH", "") + monkeypatch.setattr("syftbox.client.plugins.apps.RUNNING_APPS", PATCHED_RUNNING) + + # Run app + no_config = "" # dummy app doesn't need SYFTBOX_CLIENT_CONFIG_PATH + run_apps(app_dir, no_config) + + # Verify results + verify_running_apps(PATCHED_RUNNING) + verify_app_execution(mock_app_dir, expected_output) + + +def test_app_plugin_with_config(tmp_path, monkeypatch): + """Test app plugin execution with configuration.""" + app_dir = tmp_path / token_hex(8) + app_name = "test_app_with_config" + mock_app_dir, expected_output = AppMockFactory.create_app_with_config(apps_dir=app_dir, app_name=app_name) + + assert mock_app_dir.exists() + + # Patch necessary attributes + PATCHED_RUNNING = {} + EVT = threading.Event() + monkeypatch.setattr("syftbox.client.plugins.apps.DEFAULT_APPS_PATH", "") + monkeypatch.setattr("syftbox.client.plugins.apps.RUNNING_APPS", PATCHED_RUNNING) + monkeypatch.setattr("syftbox.client.plugins.apps.EVENT", EVT) + + # Run app + no_config = "" # dummy app doesn't need SYFTBOX_CLIENT_CONFIG_PATH + run_apps(app_dir, no_config) + time.sleep(2) + + # Verify results + verify_running_apps(PATCHED_RUNNING, app_name) + verify_app_execution(mock_app_dir, expected_output) + + # This doesn't kill the process gracefully, + # later need to implement a graceful shutdown mechanism for apps + if app_name in PATCHED_RUNNING: + EVT.set() + app_thread: threading.Thread = PATCHED_RUNNING[app_name] + app_thread.join(timeout=1) diff --git a/packages/syftbox/tests/integration/app_plugin/conftest.py b/packages/syftbox/tests/integration/app_plugin/conftest.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/tests/integration/app_plugin/fixtures/app_mocks.py b/packages/syftbox/tests/integration/app_plugin/fixtures/app_mocks.py new file mode 100644 index 00000000000..84fda44ae59 --- /dev/null +++ b/packages/syftbox/tests/integration/app_plugin/fixtures/app_mocks.py @@ -0,0 +1,41 @@ +import json +from pathlib import Path + + +class AppMockFactory: + @staticmethod + def create_app_without_config(apps_dir: Path, app_name: str) -> tuple[Path, str]: + """Create a mock app without configuration.""" + mock_app_dir = Path(apps_dir) / app_name + script_output = "Test executed" + + mock_app_dir.mkdir(parents=True, exist_ok=True) + run_script = mock_app_dir / "run.sh" + + run_script.write_text(f"#!/bin/bash\necho '{script_output}' | tee app.log") + run_script.chmod(0o755) + + return mock_app_dir, script_output + + @staticmethod + def create_app_with_config(apps_dir: Path, app_name: str) -> tuple[Path, str]: + """Create a mock app with configuration.""" + mock_app_dir = Path(apps_dir) / app_name + mock_app_dir.mkdir(parents=True, exist_ok=True) + + test_value = "test_value" + config_json = { + "app": { + "env": {"TEST_VAR": test_value}, + "run": {"schedule": None, "interval": 1, "command": ["./test.sh"]}, + } + } + + config_file = mock_app_dir / "config.json" + config_file.write_text(json.dumps(config_json)) + + test_script = mock_app_dir / "test.sh" + test_script.write_text("#!/bin/bash\necho $TEST_VAR | tee app.log") + test_script.chmod(0o755) + + return mock_app_dir, test_value diff --git a/packages/syftbox/tests/integration/client/api_request_name_test.py b/packages/syftbox/tests/integration/client/api_request_name_test.py new file mode 100644 index 00000000000..a4c1a7f1904 --- /dev/null +++ b/packages/syftbox/tests/integration/client/api_request_name_test.py @@ -0,0 +1,51 @@ +from pathlib import Path + +import syftbox +from syftbox.client.plugins.apps import run_apps + + +class AppFixture: + @staticmethod + def create_app_with_name_check(apps_dir: Path, app_name: str) -> tuple[Path, str]: + """Create a test app that outputs its api_request_name""" + app_dir = apps_dir / app_name + app_dir.mkdir(parents=True, exist_ok=True) + + # Create a Python script that uses api_request_name + test_script = app_dir / "main.py" + test_script.write_text(""" +from syftbox.lib import Client + +client = Client.load() +print(f"API_NAME:{client.api_request_name}") +""") + + # Create runner script + SYFTBOX_SOURCE_PATH = Path(syftbox.__file__).parent.parent + run_script = app_dir / "run.sh" + run_script.write_text(f"""#!/bin/bash +set -e + +uv venv && . .venv/bin/activate +uv pip install --upgrade --editable {SYFTBOX_SOURCE_PATH} +python3 main.py | tee app.log +deactivate +""") + run_script.chmod(0o755) + return app_dir + + +def test_api_request_name_in_app(tmp_path, mock_config): + """Test that api_request_name returns correct name when called from within an app""" + # Setup + app_name = "test_app_that_echoes_name" + app_dir = AppFixture.create_app_with_name_check(tmp_path, app_name) + + # Run the app + run_apps(tmp_path, mock_config.path) + + # Verify the output + app_log = app_dir / "app.log" + assert app_log.exists() + output = app_log.read_text().strip() + assert f"API_NAME:{app_name}" in output diff --git a/packages/syftbox/tests/integration/client/conftest.py b/packages/syftbox/tests/integration/client/conftest.py new file mode 100644 index 00000000000..61b4ca766ed --- /dev/null +++ b/packages/syftbox/tests/integration/client/conftest.py @@ -0,0 +1,30 @@ +from pathlib import Path +from typing import Any, Generator + +import pytest + +from syftbox.lib.client_shim import SyftClientConfig + + +@pytest.fixture +def mock_config(monkeypatch, tmp_path) -> Generator[SyftClientConfig, Any, None]: + config_path = Path(tmp_path, "config.json") + data_dir = Path(tmp_path) + conf = SyftClientConfig( + path=config_path, + data_dir=data_dir, + email="test@openmined.org", + client_url="http://test:8080", + ) + conf.save() + conf.data_dir.mkdir(parents=True, exist_ok=True) + + def mock_load(*args, **kwargs): + nonlocal conf + return conf + + monkeypatch.setattr(SyftClientConfig, "load", mock_load) + + yield conf + + monkeypatch.undo() diff --git a/packages/syftbox/tests/integration/sync/conftest.py b/packages/syftbox/tests/integration/sync/conftest.py new file mode 100644 index 00000000000..41976b1f0fa --- /dev/null +++ b/packages/syftbox/tests/integration/sync/conftest.py @@ -0,0 +1,93 @@ +from collections.abc import Generator +from pathlib import Path + +import pytest +from fastapi import FastAPI +from fastapi.testclient import TestClient + +from syftbox import __version__ +from syftbox.client.base import PluginManagerInterface, SyftBoxContextInterface +from syftbox.client.core import SyftBoxContext +from syftbox.client.server_client import SyftBoxClient +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.datasite import create_datasite +from syftbox.lib.http import HEADER_SYFTBOX_VERSION +from syftbox.lib.workspace import SyftWorkspace +from syftbox.server.migrations import run_migrations +from syftbox.server.server import create_server +from syftbox.server.settings import ServerSettings +from tests.unit.server.conftest import get_access_token + + +def authenticate_testclient(client: TestClient, email: str) -> None: + access_token = get_access_token(client, email) + client.headers["email"] = email + client.headers["Authorization"] = f"Bearer {access_token}" + client.headers[HEADER_SYFTBOX_VERSION] = __version__ + + +class MockPluginManager(PluginManagerInterface): + pass + + +def setup_datasite(tmp_path: Path, server_client: TestClient, email: str) -> SyftBoxContextInterface: + data_dir = tmp_path / email + config = SyftClientConfig( + path=data_dir / "config.json", + data_dir=data_dir, + email=email, + server_url=str(server_client.base_url), + client_url="http://localhost:8080", + ) + config.save() + ws = SyftWorkspace(config.data_dir) + ws.mkdirs() + context = SyftBoxContext( + config, + ws, + SyftBoxClient(conn=server_client), + MockPluginManager(), + ) + create_datasite(context) + authenticate_testclient(server_client, email) + + return context + + +@pytest.fixture(scope="function") +def server_app(tmp_path: Path) -> FastAPI: + """ + NOTE we are spawning a new server thread for each datasite, + this is not ideal but it is the same as using multiple uvicorn workers + """ + path = tmp_path / "server" + path.mkdir() + settings = ServerSettings.from_data_folder(path) + settings.auth_enabled = False + settings.otel_enabled = False + server_app = create_server(settings) + run_migrations(settings) + return server_app + + +@pytest.fixture() +def datasite_1(tmp_path: Path, server_app: FastAPI) -> SyftBoxContextInterface: + email = "user_1@openmined.org" + with TestClient(server_app) as client: + client.headers[HEADER_SYFTBOX_VERSION] = __version__ + return setup_datasite(tmp_path, client, email) + + +@pytest.fixture() +def datasite_2(tmp_path: Path, server_app: FastAPI) -> SyftBoxContextInterface: + email = "user_2@openmined.org" + with TestClient(server_app) as client: + client.headers[HEADER_SYFTBOX_VERSION] = __version__ + return setup_datasite(tmp_path, client, email) + + +@pytest.fixture(scope="function") +def server_client(server_app: FastAPI) -> Generator[TestClient, None, None]: + with TestClient(server_app) as client: + client.headers[HEADER_SYFTBOX_VERSION] = __version__ + yield client diff --git a/packages/syftbox/tests/integration/sync/ignore_test.py b/packages/syftbox/tests/integration/sync/ignore_test.py new file mode 100644 index 00000000000..45fad533d3e --- /dev/null +++ b/packages/syftbox/tests/integration/sync/ignore_test.py @@ -0,0 +1,124 @@ +from pathlib import Path + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.plugins.sync.datasite_state import DatasiteState +from syftbox.client.utils.dir_tree import create_dir_tree +from syftbox.client.utils.display import display_file_tree +from syftbox.lib.ignore import IGNORE_FILENAME, filter_ignored_paths + +ignore_file = """ +# Exlude alice datasite +/alice@example.com + +# exclude all occurrences bob@example.com +bob@example.com + +# Exclude all "large" folders under any datasite +*/large/* + +# Include important_file.pdf under excluded folder +!/john@example.com/large/important_file.pdf + +# General excludes +*.tmp +_.syftignore +*.py[cod] +""" + +paths_with_result = [ + # Should be ignored + ("alice@example.com/file1.txt", True), + ("john@example.com/results/bob@example.com/file1.txt", True), + ("john@example.com/large/file1.txt", True), + ("john@example.com/docs/file1.tmp", True), + ("script.pyc", True), + # Should not be ignored + ("john@example.com/results/alice@example.com/file1.txt", False), + ("john@example.com/large/important_file.pdf", False), + ("john@example.com/docs/file3.pdf", False), + ("script.py", False), +] + + +def test_ignore_file(tmp_path): + # without ignore file + ignore_path = tmp_path / IGNORE_FILENAME + ignore_path.unlink(missing_ok=True) + + paths, results = zip(*paths_with_result) + paths = [Path(p) for p in paths] + filtered_paths = filter_ignored_paths(tmp_path, paths) + assert filtered_paths == paths + + # with ignore file + ignore_path.write_text(ignore_file) + + expected_result = [p for p, r in zip(paths, results) if r is False] + filtered_paths = filter_ignored_paths(tmp_path, paths) + assert filtered_paths == expected_result + + +def test_ignore_datasite(datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface) -> None: + datasite_2_files = { + datasite_2.email: { + "visible_file.txt": "content", + "ignored_file.pyc": "content", + } + } + num_files = 2 + num_visible_files = 1 + create_dir_tree(Path(datasite_1.workspace.datasites), datasite_2_files) + display_file_tree(Path(datasite_1.workspace.datasites)) + + # ds1 gets their local state of ds2 + datasite_state = DatasiteState(context=datasite_1, email=datasite_2.email) + changes = datasite_state.get_datasite_changes() + + assert len(changes.files) == num_visible_files + assert changes.files[0].path == Path(datasite_2.email) / "visible_file.txt" + + # ds1 ignores ds2 + ignore_path = Path(datasite_1.workspace.datasites) / IGNORE_FILENAME + with ignore_path.open("a") as f: + f.write(f"\n/{datasite_2.email}\n") + + # ds1 gets their local state of ds2 + changes = datasite_state.get_datasite_changes() + assert len(changes.files) == 0 + + # remove ignore file + ignore_path.unlink() + changes = datasite_state.get_datasite_changes() + assert len(changes.files) == num_files + + +def test_ignore_symlinks(datasite_1: SyftBoxContextInterface) -> None: + # create a symlinked folder containing a file + folder_to_symlink = Path(datasite_1.workspace.data_dir) / "folder" + folder_to_symlink.mkdir() + symlinked_file = folder_to_symlink / "file.txt" + symlinked_file.write_text("content") + + symlinked_folder = Path(datasite_1.workspace.datasites) / "symlinked_folder" + symlinked_folder.symlink_to(folder_to_symlink) + + paths = [ + Path("symlinked_folder/file.txt"), + Path("symlinked_folder/non_existent_file.txt"), + Path("symlinked_folder/subfolder/file.txt"), + ] + + filtered_paths = filter_ignored_paths(datasite_1.workspace.datasites, paths, ignore_symlinks=True) + assert filtered_paths == [] + + +def test_ignore_hidden_files(datasite_1: SyftBoxContextInterface) -> None: + paths = [ + Path(".hidden_file.txt"), # Hidden files are filtered + Path("visible_file.txt"), # Visible files are not filtered + Path("subfolder/.hidden_file.txt"), # Hidden files in folders are filtered + Path(".subfolder/visible_file.txt"), # Files in hidden folders are filtered + ] + + filtered_paths = filter_ignored_paths(datasite_1.workspace.datasites, paths, ignore_hidden_files=True) + assert filtered_paths == [Path("visible_file.txt")] diff --git a/packages/syftbox/tests/integration/sync/queue_test.py b/packages/syftbox/tests/integration/sync/queue_test.py new file mode 100644 index 00000000000..e8b9288fae9 --- /dev/null +++ b/packages/syftbox/tests/integration/sync/queue_test.py @@ -0,0 +1,57 @@ +import random +from pathlib import Path +from queue import Empty + +import pytest +from pydantic import BaseModel + +from syftbox.client.plugins.sync.queue import SyncQueue, SyncQueueItem + + +class MockFileChangeInfo(BaseModel): # noqa: F821 + path: Path + + +def test_sync_queue(): + queue = SyncQueue() + + n = 10 + + paths = [Path(f"file_{i}.txt") for i in range(n)] + priorities = [random.uniform(0, 1000) for _ in range(n)] + priorities[0] = int(priorities[0]) # int and float should both work + items = [SyncQueueItem(priority, MockFileChangeInfo(path=path)) for path, priority in zip(paths, priorities)] + items_sorted = sorted(items, key=lambda x: x.priority) + + for item in items: + queue.put(item) + + assert not queue.empty() + assert set(queue.all_items.keys()) == set(paths) + + for item in items_sorted: + assert queue.get() == item + + assert queue.empty() + assert len(queue.all_items) == 0 + with pytest.raises(Empty): + queue.get(block=False) + + +def test_sync_queue_dedupe(): + queue = SyncQueue() + + path = Path("file.txt") + + queue.put(SyncQueueItem(1, MockFileChangeInfo(path=path))) + assert set(queue.all_items.keys()) == {path} + assert not queue.empty() + + for _ in range(10): + queue.put(SyncQueueItem(random.random(), MockFileChangeInfo(path=path))) + + assert set(queue.all_items.keys()) == {path} + + queue.get() + assert len(queue.all_items) == 0 + assert queue.empty() diff --git a/packages/syftbox/tests/integration/sync/sync_reject_test.py b/packages/syftbox/tests/integration/sync/sync_reject_test.py new file mode 100644 index 00000000000..e7ab12c57b0 --- /dev/null +++ b/packages/syftbox/tests/integration/sync/sync_reject_test.py @@ -0,0 +1,116 @@ +from pathlib import Path + +from fastapi.testclient import TestClient + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.plugins.sync.manager import SyncManager +from syftbox.client.plugins.sync.sync_action import format_rejected_path +from syftbox.client.utils.dir_tree import create_dir_tree +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.permissions import SyftPermission + + +def test_create_without_permission( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + # server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service_1 = SyncManager(datasite_1) + sync_service_2 = SyncManager(datasite_2) + + # Create a folder with only read permission for datasite_2 + tree = { + "folder_1": { + PERM_FILE: SyftPermission.mine_with_public_read(datasite_1, dir=datasite_1.my_datasite / "folder_1"), + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + folder_on_ds1 = datasite_1.workspace.datasites / datasite_1.email / "folder_1" + folder_on_ds2 = datasite_2.workspace.datasites / datasite_1.email / "folder_1" + + # check if datasite_1/folder_1/PERM_FILE exists on datasite_2 + assert (folder_on_ds2 / PERM_FILE).exists() + + # create a file in folder_1 and sync + new_file = folder_on_ds2 / "file.txt" + new_file.write_text("Hello, World!") + + # TODO server currently does not return 403, but unhandled exception + sync_service_2.run_single_thread() + sync_service_1.run_single_thread() + + # creating file.txt has been rejected + assert not (folder_on_ds1 / "file.txt").exists() + assert not (folder_on_ds2 / "file.txt").exists() + assert format_rejected_path(folder_on_ds2 / "file.txt").exists() + + # rejected file does not get synced + sync_service_2.run_single_thread() + sync_service_1.run_single_thread() + assert not (folder_on_ds1 / "file.txt.rejected").exists() + + +def test_delete_without_permission( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + # server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service_1 = SyncManager(datasite_1) + sync_service_2 = SyncManager(datasite_2) + + # Create a folder with only read permission for datasite_2 + tree = { + "folder_1": { + PERM_FILE: SyftPermission.mine_with_public_read(datasite_1, dir=datasite_1.my_datasite / "folder_1"), + "file.txt": "Hello, World!", + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + folder_on_ds1 = datasite_1.workspace.datasites / datasite_1.email / "folder_1" + folder_on_ds2 = datasite_2.workspace.datasites / datasite_1.email / "folder_1" + + # Delete file.txt on datasite_2 is rejected + (folder_on_ds2 / "file.txt").unlink(missing_ok=False) + sync_service_2.run_single_thread() + sync_service_1.run_single_thread() + + assert (folder_on_ds1 / "file.txt").exists() + assert (folder_on_ds2 / "file.txt").exists() + + +def test_modify_without_permissions( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + # server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service_1 = SyncManager(datasite_1) + sync_service_2 = SyncManager(datasite_2) + + # Create a folder with only read permission for datasite_2 + tree = { + "folder_1": { + PERM_FILE: SyftPermission.mine_with_public_read(datasite_1, dir=datasite_1.my_datasite / "folder_1"), + "file.txt": "Hello, World!", + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + folder_on_ds1 = datasite_1.workspace.datasites / datasite_1.email / "folder_1" + folder_on_ds2 = datasite_2.workspace.datasites / datasite_1.email / "folder_1" + + # Modify file.txt on datasite_2 is rejected + (folder_on_ds2 / "file.txt").write_text("Modified") + sync_service_2.run_single_thread() + sync_service_1.run_single_thread() + + assert (folder_on_ds1 / "file.txt").read_text() == "Hello, World!" + assert (folder_on_ds2 / "file.txt").read_text() == "Hello, World!" + assert format_rejected_path(folder_on_ds2 / "file.txt").read_text() == "Modified" diff --git a/packages/syftbox/tests/integration/sync/sync_test.py b/packages/syftbox/tests/integration/sync/sync_test.py new file mode 100644 index 00000000000..815447e46fc --- /dev/null +++ b/packages/syftbox/tests/integration/sync/sync_test.py @@ -0,0 +1,469 @@ +import os +import shutil +import time +from pathlib import Path + +import faker +import pytest +import yaml +from fastapi.testclient import TestClient + +from syftbox.client.base import SyftBoxContextInterface +from syftbox.client.plugins.sync.constants import MAX_FILE_SIZE_MB +from syftbox.client.plugins.sync.datasite_state import DatasiteState +from syftbox.client.plugins.sync.exceptions import FatalSyncError +from syftbox.client.plugins.sync.manager import SyncManager +from syftbox.client.plugins.sync.queue import SyncQueueItem +from syftbox.client.utils.dir_tree import DirTree, create_dir_tree +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.permissions import SyftPermission +from syftbox.server.settings import ServerSettings + +fake = faker.Faker() + + +def assert_files_not_on_datasite(client: SyftBoxContextInterface, files: list[Path]): + for file in files: + assert not (client.workspace.datasites / file).exists(), f"File {file} exists on datasite {client.email}" + + +def assert_files_on_datasite(client: SyftBoxContextInterface, files: list[Path]): + for file in files: + assert (client.workspace.datasites / file).exists(), f"File {file} does not exist on datasite {client.email}" + + +def assert_files_on_server(server_client: TestClient, files: list[Path]): + server_settings: ServerSettings = server_client.app_state["server_settings"] + for file in files: + assert (server_settings.snapshot_folder / file).exists(), f"File {file} does not exist on server" + + +def assert_dirtree_exists(base_path: Path, tree: DirTree) -> None: + for name, content in tree.items(): + local_path = base_path / name + + if isinstance(content, str): + assert local_path.read_text() == content + elif isinstance(content, SyftPermission): + assert yaml.safe_load(local_path.read_text()) == content.to_dict() + elif isinstance(content, dict): + assert local_path.is_dir() + assert_dirtree_exists(local_path, content) + + +def test_get_datasites(datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface): + emails = {datasite_1.email, datasite_2.email} + sync_service = SyncManager(datasite_1) + sync_service2 = SyncManager(datasite_2) + sync_service.run_single_thread() + sync_service2.run_single_thread() + + datasites = sync_service.producer.get_datasite_states() + assert {datasites[0].email, datasites[1].email} == emails + + +def test_enqueue_changes(datasite_1: SyftBoxContextInterface): + sync_service = SyncManager(datasite_1) + datasites = sync_service.producer.get_datasite_states() + + datasite_changes = datasites[0].get_datasite_changes() + num_files_after_setup = len(datasite_changes.files) + len(datasite_changes.permissions) + + # Create two files in datasite_1 + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_read(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "large.txt": fake.text(max_nb_chars=1000), + "small.txt": fake.text(max_nb_chars=10), + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + datasite_changes = datasites[0].get_datasite_changes() + num_out_of_sync_files = len(datasite_changes.files) + len(datasite_changes.permissions) + # 3 new files + assert num_out_of_sync_files - num_files_after_setup == 3 + + # Enqueue the changes + verify order + for change in datasite_changes.permissions + datasite_changes.files: + sync_service.enqueue(change) + + items_from_queue: list[SyncQueueItem] = [] + while not sync_service.queue.empty(): + items_from_queue.append(sync_service.queue.get()) + + should_be_permissions = items_from_queue[: len(datasite_changes.permissions)] + should_be_files = items_from_queue[len(datasite_changes.permissions) :] + + assert all(SyftPermission.is_permission_file(item.data.path) for item in should_be_permissions) + assert all(not SyftPermission.is_permission_file(item.data.path) for item in should_be_files) + + for item in should_be_files: + print(item.priority, item.data) + + +def test_create_file( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service = SyncManager(datasite_1) + + # Create a file in datasite_1 + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_read(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "file.txt": fake.text(max_nb_chars=1000), + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + + # changes are pushed to server + sync_service.run_single_thread() + + # check if no changes are left + for datasite in sync_service.producer.get_datasite_states(): + datasite_changes = datasite.get_datasite_changes() + assert not datasite_changes.files + assert not datasite_changes.permissions + + # check if file exists on server + print(datasite_2.workspace.datasites) + datasite_snapshot = server_settings.snapshot_folder / datasite_1.email + assert_dirtree_exists(datasite_snapshot, tree) + + # check if file exists on datasite_2 + sync_client_2 = SyncManager(datasite_2) + sync_client_2.run_single_thread() + datasite_states = sync_client_2.producer.get_datasite_states() + ds1_state = datasite_states[0] + assert ds1_state.email == datasite_1.email + + print(f"datasites {[d.email for d in sync_client_2.producer.get_datasite_states()]}") + sync_client_2.run_single_thread() + + assert_files_on_datasite(datasite_2, [Path(datasite_1.email) / "folder1" / "file.txt"]) + + +def test_modify(server_client: TestClient, datasite_1: SyftBoxContextInterface): + server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service_1 = SyncManager(datasite_1) + + # Setup initial state + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_rw(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "file.txt": "content", + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + sync_service_1.run_single_thread() + + # modify the file + file_path = datasite_1.my_datasite / "folder1" / "file.txt" + new_content = "modified" + file_path.write_text(new_content) + assert file_path.read_text() == new_content + + sync_service_1.run_single_thread() + + assert file_path.read_text() == new_content + assert (server_settings.snapshot_folder / datasite_1.email / "folder1" / "file.txt").read_text() == new_content + + +def test_modify_and_pull( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service_1 = SyncManager(datasite_1) + sync_service_2 = SyncManager(datasite_2) + + # Setup initial state + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_rw(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "file.txt": "content1", + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + # modify the file + file_path = datasite_1.my_datasite / "folder1" / "file.txt" + new_content = fake.text(max_nb_chars=100_000) + file_path.write_text(new_content) + + assert file_path.read_text() == new_content + + sync_service_1.run_single_thread() + + assert file_path.read_text() == new_content + assert (server_settings.snapshot_folder / datasite_1.email / "folder1" / "file.txt").read_text() == new_content + + sync_service_2.run_single_thread() + + assert file_path.read_text() == new_content + assert (Path(datasite_2.workspace.datasites) / datasite_1.email / "folder1" / "file.txt").read_text() == new_content + + +def test_modify_with_conflict( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + sync_service_1 = SyncManager(datasite_1) + sync_service_2 = SyncManager(datasite_2) + + # Setup initial state + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_rw(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "file.txt": "content1", + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + # modify the file both clients + file_path_1 = datasite_1.my_datasite / "folder1" / "file.txt" + new_content_1 = "modified1" + file_path_1.write_text(new_content_1) + + file_path_2 = Path(datasite_2.workspace.datasites) / datasite_1.email / "folder1" / "file.txt" + new_content_2 = "modified2" + file_path_2.write_text(new_content_2) + + assert new_content_1 != new_content_2 + assert file_path_1.read_text() == new_content_1 + assert file_path_2.read_text() == new_content_2 + + # first to server wins + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + assert file_path_1.read_text() == new_content_1 + assert file_path_2.read_text() == new_content_1 + + # modify again, 2 syncs first + new_content_1 = fake.text(max_nb_chars=1000) + new_content_2 = fake.text(max_nb_chars=1000) + file_path_1.write_text(new_content_1) + file_path_2.write_text(new_content_2) + assert new_content_1 != new_content_2 + + assert file_path_1.read_text() == new_content_1 + assert file_path_2.read_text() == new_content_2 + + sync_service_2.run_single_thread() + sync_service_1.run_single_thread() + + assert file_path_1.read_text() == new_content_2 + assert file_path_2.read_text() == new_content_2 + + +def test_delete_file( + server_client: TestClient, datasite_1: SyftBoxContextInterface, datasite_2: SyftBoxContextInterface +): + server_settings: ServerSettings = server_client.app_state["server_settings"] + sync_service_1 = SyncManager(datasite_1) + sync_service_2 = SyncManager(datasite_2) + + # Setup initial state + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_rw(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "file.txt": fake.text(max_nb_chars=1000), + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + sync_service_1.run_single_thread() + sync_service_2.run_single_thread() + + # delete the file + file_path = datasite_1.my_datasite / "folder1" / "file.txt" + file_path.unlink() + + sync_service_1.run_single_thread() + + # file is deleted on server + assert (server_settings.snapshot_folder / datasite_1.email / "folder1" / "file.txt").exists() is False + + sync_service_2.run_single_thread() + assert (datasite_2.my_datasite / datasite_1.email / "folder1" / "file.txt").exists() is False + + # Check if the metadata is gone + remote_state_1 = sync_service_1.producer.get_datasite_states()[0].get_remote_state() + remote_paths = {metadata.path for metadata in remote_state_1} + assert Path(datasite_1.email) / "folder1" / "file.txt" not in remote_paths + + +def test_invalid_sync_to_remote(server_client: TestClient, datasite_1: SyftBoxContextInterface): + sync_service_1 = SyncManager(datasite_1) + sync_service_1.run_single_thread() + + # random bytes 1 byte too large + too_large_content = os.urandom((MAX_FILE_SIZE_MB * 1024 * 1024) + 1) + tree = { + "valid": { + PERM_FILE: SyftPermission.mine_with_public_rw(datasite_1, dir=datasite_1.my_datasite / "valid"), + "file.txt": "valid content", + }, + "invalid_on_modify": { + PERM_FILE: SyftPermission.mine_with_public_rw(datasite_1, dir=datasite_1.my_datasite / "invalid_on_modify"), + "file.txt": "valid content", + }, + "invalid_on_create": { + PERM_FILE: "invalid permission", + "file.txt": too_large_content, + }, + } + + create_dir_tree(Path(datasite_1.my_datasite), tree) + sync_service_1.producer.enqueue_datasite_changes( + datasite=DatasiteState( + sync_service_1.context, + email=datasite_1.email, + ), + ) + + queue = sync_service_1.queue + consumer = sync_service_1.consumer + + items_to_sync = [] + while not queue.empty(): + items_to_sync.append(queue.get()) + assert len(items_to_sync) == 6 # 3 files + 3 permissions + + for item in items_to_sync: + sync_action = consumer.determine_action(item) + abs_path = item.data.local_abs_path + + should_be_valid = item.data.path.parent.name in ["valid", "invalid_on_modify"] + print(f"path: {abs_path}, should_be_valid: {should_be_valid}, parent: {item.data.path.parent}") + + is_valid = sync_action.is_valid(context=sync_service_1.context) + assert is_valid == should_be_valid, f"path: {abs_path}, is_valid: {is_valid}" + + sync_service_1.run_single_thread() + + # Modify invalid_on_modify to be invalid + file_path = datasite_1.my_datasite / "invalid_on_modify" / "file.txt" + file_path.write_bytes(too_large_content) + permission_path = datasite_1.my_datasite / "invalid_on_modify" / PERM_FILE + permission_path.write_text("invalid permission") + + sync_service_1.producer.enqueue_datasite_changes( + datasite=DatasiteState(sync_service_1.context, email=datasite_1.email), + ) + items_to_sync = [] + while not queue.empty(): + items_to_sync.append(queue.get()) + assert len(items_to_sync) == 4 # 2 invalid files + 2 invalid permissions + + for item in items_to_sync: + sync_action = consumer.determine_action(item) + abs_path = item.data.local_abs_path + + is_valid = sync_action.is_valid(context=sync_service_1.context) + assert not is_valid, f"path: {abs_path}, is_valid: {is_valid}" + + +def test_sync_invalid_local_environment(datasite_1: SyftBoxContextInterface): + sync_service = SyncManager(datasite_1) + sync_service.sync_interval = 0.1 + sync_folder = Path(datasite_1.workspace.datasites) + + # Create a file in datasite_1 + tree = { + "folder1": { + PERM_FILE: SyftPermission.mine_with_public_read(datasite_1, dir=datasite_1.my_datasite / "folder1"), + "file.txt": fake.text(max_nb_chars=1000), + }, + } + create_dir_tree(Path(datasite_1.my_datasite), tree) + + # Start syncing in separate thread + sync_service.start() + time.sleep(sync_service.sync_interval * 2) + assert sync_service.is_alive() + + # Deleting the previous state file stops the sync + shutil.rmtree(sync_folder.as_posix()) + + max_wait_time = 5 + start_time = time.time() + while sync_service.is_alive() and time.time() - start_time < max_wait_time: + time.sleep(0.1) + assert not sync_service.is_alive() + + # Restarting is not possible + sync_service.start() + start_time = time.time() + while sync_service.is_alive() and time.time() - start_time < max_wait_time: + time.sleep(0.1) + assert not sync_service.is_alive() + + +def test_skip_symlink(server_client: TestClient, datasite_1: SyftBoxContextInterface): + sync_service = SyncManager(datasite_1) + sync_service.run_single_thread() + + apps_dir = datasite_1.workspace.apps + datasite_dir = datasite_1.my_datasite + + folder_to_symlink = apps_dir / "folder_to_symlink" + file_to_symlink = apps_dir / "file_to_symlink.txt" + + folder_to_symlink.mkdir() + file_to_symlink.write_text("content") + + # Nothing to sync, no writes to datasites + states = sync_service.producer.get_datasite_states() + assert len(states) == 1 + assert states[0].is_in_sync() + + # Make symlinks in datasite + symlink_folder = datasite_dir / "symlinked_folder" + symlink_file = datasite_dir / "symlinked_file.txt" + + symlink_folder.symlink_to(folder_to_symlink) + symlink_file.symlink_to(file_to_symlink) + + states = sync_service.producer.get_datasite_states() + assert len(states) == 1 + assert states[0].is_in_sync() + + # Check if symlinks are not synced + sync_service.run_single_thread() + snapshot_folder = server_client.app_state["server_settings"].snapshot_folder + assert not (snapshot_folder / datasite_1.email / "symlinked_folder").exists() + assert not (snapshot_folder / datasite_1.email / "symlinked_file.txt").exists() + + +def test_skip_hidden_paths(server_client: TestClient, datasite_1: SyftBoxContextInterface): + sync_service = SyncManager(datasite_1) + sync_service.run_single_thread() + + hidden_folder = datasite_1.my_datasite / ".hidden_folder" + hidden_nested_file = hidden_folder / "subfolder" / "file.txt" + hidden_file = datasite_1.my_datasite / ".hidden_file.txt" + + hidden_folder.mkdir() + hidden_nested_file.parent.mkdir(parents=True) + hidden_file.write_text("content") + + states = sync_service.producer.get_datasite_states() + assert len(states) == 1 + assert states[0].is_in_sync() + + sync_service.run_single_thread() + snapshot_folder = server_client.app_state["server_settings"].snapshot_folder + assert not (snapshot_folder / datasite_1.email / ".hidden_folder").exists() + assert not (snapshot_folder / datasite_1.email / ".hidden_file.txt").exists() + + +def test_sync_health_check(datasite_1: SyftBoxContextInterface): + sync_service = SyncManager(datasite_1) + sync_service.check_server_status() + + sync_service.context.client.conn.headers["Authorization"] = "Bearer invalid_token" + with pytest.raises(FatalSyncError): + sync_service.check_server_status() diff --git a/packages/syftbox/tests/stress/__init__.py b/packages/syftbox/tests/stress/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/tests/stress/locustfile.py b/packages/syftbox/tests/stress/locustfile.py new file mode 100644 index 00000000000..6e7a8e29540 --- /dev/null +++ b/packages/syftbox/tests/stress/locustfile.py @@ -0,0 +1,73 @@ +import uuid +from pathlib import Path + +from locust import FastHttpUser, between, task + +import syftbox.client.exceptions +from syftbox.client.core import SyftBoxContext +from syftbox.client.plugins.sync.sync_action import ModifyRemoteAction +from syftbox.client.server_client import SyftBoxClient +from syftbox.lib.workspace import SyftWorkspace +from syftbox.server.sync.hash import hash_file +from syftbox.server.sync.models import FileMetadata + +file_name = Path("loadtest.txt") + + +class SyftBoxUser(FastHttpUser): + network_timeout = 5.0 + connection_timeout = 5.0 + wait_time = between(0.5, 1.5) + + def on_start(self): + self.datasites = [] + self.email = "aziz@openmined.org" + self.remote_state: dict[str, list[FileMetadata]] = {} + + self.syft_context = SyftBoxContext( + email=self.email, + client=SyftBoxClient(conn=self.client), + workspace=SyftWorkspace(data_dir=Path(".")), + ) + + self.filepath = self.init_file() + + def init_file(self) -> Path: + # create a file on local and send to server + filepath = self.syft_context.datasite / file_name + filepath.parent.mkdir(parents=True, exist_ok=True) + filepath.touch() + filepath.write_text(uuid.uuid4().hex) + local_syncstate = hash_file(filepath.absolute(), root_dir=filepath.parent.absolute()) + try: + self.syft_context.client.sync.create(local_syncstate.path, filepath.read_bytes()) + except syftbox.client.exceptions.SyftServerError: + pass + return filepath + + @task + def sync_datasites(self): + remote_datasite_states = self.sync_client.get_datasite_states() + # logger.info(f"Syncing {len(remote_datasite_states)} datasites") + all_files: list[FileMetadata] = [] + for remote_state in remote_datasite_states.values(): + all_files.extend(remote_state) + + all_paths = [f.path for f in all_files][:10] + self.syft_context.client.sync.download_files_streaming(all_paths, self.syft_context.workspace.datasites) + + @task + def apply_diff(self): + self.filepath.write_text(uuid.uuid4().hex) + local_syncstate = hash_file(self.filepath, root_dir=self.syft_context.datasite) + remote_syncstate = self.syft_context.client.sync.get_metadata(self.filepath) + + action = ModifyRemoteAction( + local_metadata=local_syncstate, + remote_metadata=remote_syncstate, + ) + action.execute(self.syft_context) + + @task + def download(self): + self.sync_client.download(self.filepath) diff --git a/packages/syftbox/tests/unit/__init__.py b/packages/syftbox/tests/unit/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/syftbox/tests/unit/apps/apps_cli_test.py b/packages/syftbox/tests/unit/apps/apps_cli_test.py new file mode 100644 index 00000000000..8506651b5cf --- /dev/null +++ b/packages/syftbox/tests/unit/apps/apps_cli_test.py @@ -0,0 +1,75 @@ +import pytest +from typer.testing import CliRunner + +from syftbox.app.cli import app as app_cli +from syftbox.app.install import InstallResult + +# Initialize test runner +runner = CliRunner() + + +@pytest.fixture +def mock_apps_dir(mock_config): + apps_dir = mock_config.data_dir / "apis" + apps_dir.mkdir(exist_ok=True) + yield apps_dir + + +@pytest.fixture +def mock_app_list(mock_apps_dir): + apps = [mock_apps_dir / "app1", mock_apps_dir / "app2"] + for app in apps: + app.mkdir(exist_ok=True) + yield (mock_apps_dir, apps) + + +@pytest.fixture +def mocked_app_install(monkeypatch, mock_apps_dir): + def mock_install(config, repository, branch): + return InstallResult( + app_name=repository, + app_path=mock_apps_dir / repository, + error=None, + details=None, + ) + + monkeypatch.setattr("syftbox.app.cli.install_app", mock_install) + yield + monkeypatch.undo() + + +@pytest.fixture +def mocked_app_uninstall(monkeypatch, mock_config): + def mock_uninstall(*args, **kwargs): + return mock_config.data_dir / "apps" + + monkeypatch.setattr("syftbox.app.cli.uninstall_app", mock_uninstall) + yield + monkeypatch.undo() + + +def test_list_app(mock_app_list): + result = runner.invoke(app_cli, ["list"]) + assert result.exit_code == 0 + assert "Apps installed" in result.stdout + assert "app1" in result.stdout + assert "app2" in result.stdout + + +def test_list_app_empty(mock_apps_dir): + result = runner.invoke(app_cli, ["list"]) + assert result.exit_code == 0 + assert "No apps installed" in result.stdout + + +def test_install_app(mocked_app_install): + result = runner.invoke(app_cli, ["install", "OpenMined/tutorials-app"]) + assert result.exit_code == 0 + assert "tutorials-app" in result.stdout + + +def test_uninstall_app(mocked_app_uninstall): + result = runner.invoke(app_cli, ["uninstall", "app1"]) + assert result.exit_code == 0 + assert "Uninstalled app" in result.stdout + assert "app1" in result.stdout diff --git a/packages/syftbox/tests/unit/apps/compat_test.py b/packages/syftbox/tests/unit/apps/compat_test.py new file mode 100644 index 00000000000..a742039f1fb --- /dev/null +++ b/packages/syftbox/tests/unit/apps/compat_test.py @@ -0,0 +1,40 @@ +from types import SimpleNamespace + +import pytest + +from syftbox.app.install import check_os_compatibility + + +def test_os_compatibility_compatible(): + app_config_mock = SimpleNamespace( + **{ + "app": SimpleNamespace( + **{ + "platforms": ["darwin", "linux"], + } + ), + } + ) + + check_os_compatibility(app_config_mock) + + +def test_os_compatibility_incompatible(): + app_config_mock = SimpleNamespace( + **{ + "app": SimpleNamespace( + **{ + "platforms": ["different_os"], + } + ), + } + ) + with pytest.raises(OSError) as e: + check_os_compatibility(app_config_mock) + assert e.value == "Your OS isn't supported by this app." + + +def test_os_compatibility_without_config(): + app_config_mock = SimpleNamespace(**{"app": {}}) + + check_os_compatibility(app_config_mock) diff --git a/packages/syftbox/tests/unit/apps/config_test.py b/packages/syftbox/tests/unit/apps/config_test.py new file mode 100644 index 00000000000..030ec52c644 --- /dev/null +++ b/packages/syftbox/tests/unit/apps/config_test.py @@ -0,0 +1,49 @@ +import json + +import pytest + +from syftbox.app.install import load_config + + +def test_load_app_config(tmp_path): + valid_json_config = { + "version": "0.1.0", + "app": { + "version": "0.1.0", + "run": {"command": ["python", "main.py"], "interval": "10"}, + "env": {}, + "platforms": ["linux"], + "pre_install": ["pip", "install", "psutil"], + "post_install": [], + "pre_update": [], + "post_update": [], + }, + } + + app_conf = tmp_path / "app.json" + app_conf.write_text(json.dumps(valid_json_config)) + + app_config = load_config(app_conf) + assert app_config.version == valid_json_config["version"] + assert app_config.app.version == valid_json_config["app"]["version"] + assert app_config.app.run.command == valid_json_config["app"]["run"]["command"] + assert vars(app_config.app.env) == valid_json_config["app"]["env"] + assert app_config.app.platforms == valid_json_config["app"]["platforms"] + assert app_config.app.pre_install == valid_json_config["app"]["pre_install"] + assert app_config.app.pre_update == valid_json_config["app"]["pre_update"] + assert app_config.app.post_update == valid_json_config["app"]["post_update"] + + +def test_load_invalid_app_config(tmp_path): + app_conf = tmp_path / "app.json" + app_conf.write_text("Invalid JSON") + + with pytest.raises(ValueError) as expt: + load_config(app_conf) + assert expt.value == "File isn't in JSON format" + + +def test_load_inexistent_app_config(): + with pytest.raises(ValueError) as expt: + load_config("inexistent_app.json") + assert expt.value == "Couln't find the json config file for this path." diff --git a/packages/syftbox/tests/unit/apps/install_test.py b/packages/syftbox/tests/unit/apps/install_test.py new file mode 100644 index 00000000000..cf1eb018655 --- /dev/null +++ b/packages/syftbox/tests/unit/apps/install_test.py @@ -0,0 +1,96 @@ +from pathlib import Path +from subprocess import CalledProcessError + +import pytest + +from syftbox.app.install import clone_repository, sanitize_git_path + + +def test_valid_git_path(): + path = "Example/Repository" + output_path = sanitize_git_path(path) + assert path == output_path + + +def test_valid_git_url(): + path = "Example/Repository" + http_url = f"http://github.com/{path}" + output_path = sanitize_git_path(http_url) + assert path == output_path + + https_url = f"https://github.com/{path}" + output_path = sanitize_git_path(https_url) + assert path == output_path + + +def test_invalid_git_path(): + path = "..Example/../Repository" + with pytest.raises(ValueError) as excpt: + _ = sanitize_git_path(path) + assert excpt.value == "Invalid Git repository path format." + + +def test_second_invalid_git_path(): + path = "http://example.com" + with pytest.raises(ValueError) as excpt: + _ = sanitize_git_path(path) + assert excpt.value == "Invalid Git repository path format." + + +def test_clone_valid_repository(monkeypatch): + count = 0 + + def mock_subproc_run(*args, **kwargs): + nonlocal count + count += 1 + return 0 + + monkeypatch.setattr("subprocess.run", mock_subproc_run) + + path = "OpenMined/logged_in" + temp_path = clone_repository(path, "main") + assert count == 3 + assert isinstance(temp_path, Path) + + +def test_clone_repository_to_an_existent_path(monkeypatch): + count = 0 + + def mock_subproc_run(*args, **kwargs): + nonlocal count + count += 1 + return 0 + + monkeypatch.setattr("subprocess.run", mock_subproc_run) + + # First call will make the repository path exist + path = "OpenMined/logged_in" + temp_path = clone_repository(path, "main") + assert isinstance(temp_path, Path) + assert count == 3 + + # Second call must clone it again without any exception (replaces the old one). + temp_path = clone_repository(path, "main") + assert isinstance(temp_path, Path) + assert count == 6 + + +def test_clone_invalid_repository(monkeypatch): + count = 0 + + def mock_subproc_run(*args, **kwargs): + nonlocal count + count += 1 + cmd = args[0] + + if cmd[0] == "git" and cmd[1] == "ls-remote" and "Invalid" in cmd[2]: + raise CalledProcessError(1, cmd) + + return 0 + + monkeypatch.setattr("subprocess.run", mock_subproc_run) + + path = "InvalidUser/InvalidRepo" + with pytest.raises(ValueError) as excpt: + _ = clone_repository(path, "main") + assert "Cannot access repository" in excpt.value diff --git a/packages/syftbox/tests/unit/client/api_test.py b/packages/syftbox/tests/unit/client/api_test.py new file mode 100644 index 00000000000..1e773f607d8 --- /dev/null +++ b/packages/syftbox/tests/unit/client/api_test.py @@ -0,0 +1,44 @@ +import pytest +from fastapi.testclient import TestClient + +from syftbox.client.api import create_api +from syftbox.client.base import SyftBoxContextInterface +from syftbox.lib.client_config import SyftClientConfig + + +class MockClient(SyftBoxContextInterface): + def __init__(self): + self.config = SyftClientConfig( + path="/tmp/syftbox/config.yaml", + client_url="http://localhost:8080", + server_url="http://localhost:5000", + email="test@user.com", + ) + + @property + def all_datasites(self): + return ["datasite1", "datasite2"] + + +@pytest.fixture +def mock_api(): + yield TestClient(create_api(MockClient())) + + +def test_create_api(mock_api): + response = mock_api.get("/") + assert response.status_code == 200 + + +def test_version(mock_api): + response = mock_api.get("/version") + assert response.status_code == 200 + assert "version" in response.json() + + +def test_datasites(): + app = create_api(MockClient()) + mock_api = TestClient(app) + response = mock_api.get("/datasites") + assert response.status_code == 200 + assert response.json() == {"datasites": ["datasite1", "datasite2"]} diff --git a/packages/syftbox/tests/unit/client/benchmark_network_test.py b/packages/syftbox/tests/unit/client/benchmark_network_test.py new file mode 100644 index 00000000000..3d4c3bb4ba6 --- /dev/null +++ b/packages/syftbox/tests/unit/client/benchmark_network_test.py @@ -0,0 +1,387 @@ +import socket +import threading +import time +from datetime import datetime, timedelta +from typing import Any + +import pytest +from curl_cffi import CurlInfo, CurlOpt + +from syftbox.client.benchmark import Stats +from syftbox.client.benchmark.netstats_http import HTTPPerfStats, HTTPTimings, HTTPTimingStats +from syftbox.client.benchmark.netstats_tcp import ( + ConnectionMetadata, + RateLimiter, + TCPConnection, + TCPPerfStats, + TCPTimingStats, +) + + +def get_mock_curl_response(): + return { + CurlInfo.NAMELOOKUP_TIME: 0.1, # DNS lookup + CurlInfo.CONNECT_TIME: 0.2, # Connect + CurlInfo.APPCONNECT_TIME: 0.3, # SSL handshake + CurlInfo.PRETRANSFER_TIME: 0.4, # Pre-transfer + CurlInfo.STARTTRANSFER_TIME: 0.5, # Start transfer + CurlInfo.TOTAL_TIME: 0.6, # Total time + CurlInfo.REDIRECT_TIME: 0.05, # Redirect time + } + + +class MockCurl: + def __init__(self, should_fail: bool = False): + self.options = {} + self.should_fail = should_fail + self.is_closed = False + self._mock_response = get_mock_curl_response() + + def setopt(self, option: CurlOpt, value: Any) -> None: + self.options[str(option)] = value + + def perform(self) -> None: + if self.should_fail: + raise Exception("Simulated curl failure") + if str(CurlOpt.URL) not in self.options: + raise ValueError("URL not set") + # Reset closed state when performing new request + self.is_closed = False + + def getinfo(self, info: CurlInfo) -> float: + return self._mock_response[info] + + def close(self) -> None: + self.is_closed = True + + +def assert_float_equal(a: float, b: float, tolerance: float = 1e-6): + """Assert that two floats are equal within a tolerance""" + assert abs(a - b) < tolerance, f"Expected {b}, got {a}" + + +@pytest.fixture +def mock_successful_curl(monkeypatch): + mock_instance = MockCurl(should_fail=False) + + def mock_curl_constructor(): + return mock_instance + + monkeypatch.setattr("syftbox.client.benchmark.netstats_http.Curl", mock_curl_constructor) + return mock_instance + + +@pytest.fixture +def mock_failing_curl(monkeypatch): + mock_instance = MockCurl(should_fail=True) + + def mock_curl_constructor(): + return mock_instance + + monkeypatch.setattr("syftbox.client.benchmark.netstats_http.Curl", mock_curl_constructor) + return mock_instance + + +def test_curl_cleanup(mock_successful_curl): + """Test that curl resources are properly cleaned up""" + stats = HTTPPerfStats("https://example.com") + stats._HTTPPerfStats__make_request(stats.url) + assert mock_successful_curl.is_closed is True + + +def test_http_perf_stats_initialization(): + """Test basic initialization of HTTPPerfStats""" + url = "https://example.com" + stats = HTTPPerfStats(url) + assert stats.url == url + assert stats.connect_timeout == 30 + assert stats.total_timeout == 60 + assert stats.max_redirects == 5 + + +def test_successful_single_request(mock_successful_curl): + """Test a single successful HTTP request measurement""" + stats = HTTPPerfStats("https://example.com") + timings = stats._HTTPPerfStats__make_request(stats.url) + + assert isinstance(timings, HTTPTimings) + # Use approximate float comparison for timing values + assert_float_equal(timings.dns, 100.0) # 0.1 * 1000 + assert_float_equal(timings.tcp_connect, 100.0) # (0.2 - 0.1) * 1000 + assert_float_equal(timings.ssl_handshake, 100.0) # (0.3 - 0.2) * 1000 + assert_float_equal(timings.send, 100.0) # (0.4 - 0.3) * 1000 + assert_float_equal(timings.server_wait, 100.0) # (0.5 - 0.4) * 1000 + assert_float_equal(timings.content, 100.0) # (0.6 - 0.5) * 1000 + assert_float_equal(timings.total, 600.0) # 0.6 * 1000 + assert_float_equal(timings.redirect, 50.0) # 0.05 * 1000 + + +def test_failing_request(mock_failing_curl): + """Test handling of a failed HTTP request""" + stats = HTTPPerfStats("https://example.com") + with pytest.raises(Exception, match="Simulated curl failure"): + stats._HTTPPerfStats__make_request(stats.url) + + +def test_get_stats_aggregation(mock_successful_curl): + """Test aggregation of multiple HTTP request measurements""" + stats = HTTPPerfStats("https://example.com") + timing_stats = stats.get_stats(n_runs=3) + + assert isinstance(timing_stats, HTTPTimingStats) + assert timing_stats.success_rate == 100.0 + + # Get a fresh timing measurement for comparison + expected_timing = stats._HTTPPerfStats__make_request(stats.url) + + # Check that all Stats objects are properly calculated + for field in ["dns", "tcp_connect", "ssl_handshake", "send", "server_wait", "content", "total", "redirect"]: + stat_obj = getattr(timing_stats, field) + assert isinstance(stat_obj, Stats) + # Since all measurements are identical in our mock: + assert_float_equal(stat_obj.mean, getattr(expected_timing, field)) + assert_float_equal(stat_obj.stddev, 0.0) + + +def test_no_measurements_error(monkeypatch): + """Test error handling when no measurements succeed""" + + def mock_make_request(*args, **kwargs): + return None + + stats = HTTPPerfStats("https://example.com") + monkeypatch.setattr(stats, "_HTTPPerfStats__make_request", mock_make_request) + + with pytest.raises(RuntimeError, match="No successful measurements"): + stats.get_stats(n_runs=3) + + +# Mock classes +class MockSocket: + def __init__(self, would_fail: bool = False): + self.would_fail = would_fail + self.closed = False + + def __enter__(self): + if self.would_fail: + raise socket.error("Mock connection failure") + return self + + def __exit__(self, *args): + self.closed = True + + +# Time mocking helpers +class MockTime: + def __init__(self): + self.current_time = 0.0 + self.sleep_calls = [] + + def time(self): + self.current_time += 0.1 + return self.current_time + + def sleep(self, seconds): + self.sleep_calls.append(seconds) + + +# Fixtures +@pytest.fixture +def mock_time(): + return MockTime() + + +@pytest.fixture +def mock_successful_socket(monkeypatch): + def mock_create_connection(*args, **kwargs): + return MockSocket(would_fail=False) + + monkeypatch.setattr(socket, "create_connection", mock_create_connection) + + +@pytest.fixture +def mock_failing_socket(monkeypatch): + def mock_create_connection(*args, **kwargs): + return MockSocket(would_fail=True) + + monkeypatch.setattr(socket, "create_connection", mock_create_connection) + + +@pytest.fixture +def setup_time_mocks(monkeypatch, mock_time): + monkeypatch.setattr(time, "time", mock_time.time) + monkeypatch.setattr(time, "sleep", mock_time.sleep) + return mock_time + + +# Test TCPConnection +def test_successful_connection(mock_successful_socket, setup_time_mocks): + """Test successful TCP connection measurement""" + conn = TCPConnection("example.com", 80, timeout=1.0) + latency, jitter = conn.connect() + + assert abs(latency - 100.0) < 0.001 + assert jitter == 0 + + +def test_failed_connection(mock_failing_socket, setup_time_mocks): + """Test failed TCP connection handling""" + conn = TCPConnection("example.com", 80, timeout=1.0) + latency, jitter = conn.connect() + + assert latency == -1.0 + assert jitter == -1.0 + + +def test_jitter_calculation(mock_successful_socket, setup_time_mocks): + """Test jitter calculation between consecutive connections""" + conn = TCPConnection("example.com", 80, timeout=1.0, previous_latency=90.0) + latency, jitter = conn.connect() + + assert abs(latency - 100.0) < 0.001 + assert abs(jitter - 10.0) < 0.001 + + +# Test TCPPerfStats +def test_tcp_perf_stats_initialization(): + """Test TCPPerfStats initialization""" + stats = TCPPerfStats("example.com", 80) + + assert stats.host == "example.com" + assert stats.port == 80 + assert stats.previous_latency is None + assert isinstance(stats.rate_limiter, RateLimiter) + assert isinstance(stats.connection_lock, type(threading.Lock())) + + +@pytest.mark.parametrize("num_runs", [3, 0]) +def test_get_stats_measurement(mock_successful_socket, setup_time_mocks, num_runs): + """Test statistics gathering with different numbers of runs""" + stats = TCPPerfStats("example.com", 80) + + if num_runs > 0: + result = stats.get_stats(num_runs) + assert isinstance(result, TCPTimingStats) + assert isinstance(result.latency_stats, Stats) + assert isinstance(result.jitter_stats, Stats) + assert result.connection_success_rate == 100.0 + assert result.requests_per_minute == num_runs + assert result.max_requests_per_minute == stats.max_connections_per_minute + assert result.max_concurrent_connections == stats.max_concurrent_connections + assert result.requests_in_last_minute == num_runs + else: + with pytest.raises(RuntimeError, match="No successful TCP measurements"): + stats.get_stats(num_runs) + + +def test_concurrent_connections(mock_successful_socket, setup_time_mocks, monkeypatch): + """Test concurrent connection handling""" + + def mock_thread_pool(*args, **kwargs): + class MockPool: + def __enter__(self): + return self + + def __exit__(self, *args): + pass + + def submit(self, fn, *args): + class MockFuture: + def result(self): + return fn() + + return MockFuture() + + return MockPool() + + monkeypatch.setattr("concurrent.futures.ThreadPoolExecutor", mock_thread_pool) + + stats = TCPPerfStats("example.com", 80) + stats.max_concurrent_connections = 2 + result = stats.get_stats(4) + + assert isinstance(result, TCPTimingStats) + assert result.max_concurrent_connections == 2 + assert result.requests_per_minute == 4 + assert result.requests_in_last_minute == 4 + assert isinstance(result.latency_stats, Stats) + assert isinstance(result.jitter_stats, Stats) + + +def test_connection_history_cleaning(setup_time_mocks): + """Test cleaning of old connection history""" + stats = TCPPerfStats("example.com", 80) + + old_time = datetime.now() - timedelta(minutes=2) + stats.request_history.append(ConnectionMetadata(old_time, "example.com", 80)) + stats.request_history.append(ConnectionMetadata(datetime.now(), "example.com", 80)) + + with stats._connection_context(): + pass + + assert len(stats.request_history) == 2 + assert all(conn.timestamp > datetime.now() - timedelta(minutes=1) for conn in stats.request_history) + + +def test_calculate_stats_empty(): + """Test stats calculation with empty values""" + stats = TCPPerfStats("example.com", 80) + result = stats._calculate_stats([]) + + assert isinstance(result, Stats) + assert result.mean == 0 + # Only check for attributes that exist in the Stats class + assert hasattr(result, "min") + assert hasattr(result, "max") + assert hasattr(result, "mean") + + +def test_calculate_stats_values(): + """Test stats calculation with actual values""" + stats = TCPPerfStats("example.com", 80) + values = [100.0, 110.0, 90.0] + result = stats._calculate_stats(values) + + assert isinstance(result, Stats) + assert result.mean == 100.0 + assert result.min == 90.0 + assert result.max == 110.0 + + +def test_failed_measurements_handling(mock_failing_socket, setup_time_mocks): + """Test handling of completely failed measurements""" + stats = TCPPerfStats("example.com", 80) + with pytest.raises(RuntimeError, match="No successful TCP measurements"): + stats.get_stats(3) + + +def test_get_stats_with_mixed_success(monkeypatch, setup_time_mocks): + """Test statistics gathering with both successful and failed connections""" + + # Create a stateful mock for TCPConnection that alternates between success and failure + class MockConnection: + def __init__(self): + self.call_count = 0 + + def connect(self): + self.call_count += 1 + if self.call_count % 2 == 0: + return (-1.0, -1.0) # Simulate failure + return (100.0, 0.0) # Successful measurement + + mock_conn = MockConnection() + + def mock_tcp_connection(*args, **kwargs): + return mock_conn + + monkeypatch.setattr("syftbox.client.benchmark.netstats_tcp.TCPConnection", mock_tcp_connection) + + stats = TCPPerfStats("example.com", 80) + result = stats.get_stats(4) + + assert isinstance(result, TCPTimingStats) + assert result.connection_success_rate == 50.0 # Half of the connections succeeded + assert result.requests_per_minute == 4 # Total attempts + assert result.requests_in_last_minute == 4 + assert isinstance(result.latency_stats, Stats) + assert isinstance(result.jitter_stats, Stats) diff --git a/packages/syftbox/tests/unit/client/benchmark_report_test.py b/packages/syftbox/tests/unit/client/benchmark_report_test.py new file mode 100644 index 00000000000..317a07d1ebe --- /dev/null +++ b/packages/syftbox/tests/unit/client/benchmark_report_test.py @@ -0,0 +1,133 @@ +import json + +import pytest + +from syftbox.client.benchmark.report import ConsoleReport, JSONReport + + +class MockBenchmarkResult: + """Mock class for benchmark results""" + + def dict_report(self): + return {"metric1": 100, "metric2": 200} + + def readable_report(self): + return "Metric1: 100\nMetric2: 200" + + +@pytest.fixture +def mock_metrics(): + """Fixture providing test metrics""" + return {"test1": MockBenchmarkResult(), "test2": MockBenchmarkResult()} + + +@pytest.fixture +def temp_output_dir(tmp_path): + """Fixture providing temporary directory for output""" + return tmp_path + + +def test_json_report_generate(mock_metrics, temp_output_dir): + """Test JSON report generation""" + reporter = JSONReport(temp_output_dir) + reporter.generate(mock_metrics) + + # Verify file was created + output_file = temp_output_dir / "benchmark_report.json" + assert output_file.exists() + + # Verify file contents + with open(output_file) as f: + report_data = json.load(f) + + assert "result" in report_data + assert len(report_data["result"]) == 2 + assert "test1" in report_data["result"] + assert "test2" in report_data["result"] + + # Check content structure + test1_data = report_data["result"]["test1"] + assert test1_data["metric1"] == 100 + assert test1_data["metric2"] == 200 + + +def test_console_report_generate(mock_metrics, capsys): + """Test console report generation""" + reporter = ConsoleReport() + reporter.generate(mock_metrics) + + # Capture printed output + captured = capsys.readouterr() + + # Verify output contains expected content + assert "Metric1: 100" in captured.out + assert "Metric2: 200" in captured.out + assert "\n\n" in captured.out # Check separator between reports + + +def test_json_report_file_error(mock_metrics, temp_output_dir, monkeypatch): + """Test JSON report handling of file write errors""" + + def mock_open(*args, **kwargs): + raise IOError("Mock file write error") + + monkeypatch.setattr("builtins.open", mock_open) + + reporter = JSONReport(temp_output_dir) + with pytest.raises(IOError): + reporter.generate(mock_metrics) + + +def test_json_report_with_empty_metrics(temp_output_dir): + """Test JSON report generation with empty metrics""" + reporter = JSONReport(temp_output_dir) + reporter.generate({}) + + output_file = temp_output_dir / "benchmark_report.json" + with open(output_file) as f: + report_data = json.load(f) + + assert report_data["result"] == {} + + +def test_console_report_with_empty_metrics(capsys): + """Test console report generation with empty metrics""" + reporter = ConsoleReport() + reporter.generate({}) + + captured = capsys.readouterr() + assert captured.out.strip() == "" + + +def test_json_report_output_format(mock_metrics, temp_output_dir): + """Test JSON report output formatting""" + reporter = JSONReport(temp_output_dir) + reporter.generate(mock_metrics) + + output_file = temp_output_dir / "benchmark_report.json" + with open(output_file) as f: + content = f.read() + + # Verify JSON is properly indented + assert " " in content # Check for indentation + assert "}" in content # Check for proper JSON structure + + # Verify it's valid JSON by parsing it + assert json.loads(content) is not None + + +def test_console_report_multiple_metrics(capsys): + """Test console report with multiple different metrics""" + + class CustomMockResult: + def readable_report(self): + return "Custom Report" + + metrics = {"test1": MockBenchmarkResult(), "test2": CustomMockResult()} + + reporter = ConsoleReport() + reporter.generate(metrics) + + captured = capsys.readouterr() + assert "Metric1: 100" in captured.out + assert "Custom Report" in captured.out diff --git a/packages/syftbox/tests/unit/client/benchmark_result_test.py b/packages/syftbox/tests/unit/client/benchmark_result_test.py new file mode 100644 index 00000000000..6299076b2e5 --- /dev/null +++ b/packages/syftbox/tests/unit/client/benchmark_result_test.py @@ -0,0 +1,303 @@ +from datetime import datetime, timezone + +import pytest +import requests + +from syftbox.client.benchmark import Stats +from syftbox.client.benchmark.netstats_http import HTTPTimingStats +from syftbox.client.benchmark.netstats_tcp import TCPTimingStats +from syftbox.client.benchmark.network import NetworkBenchmark, NetworkBenchmarkResult +from syftbox.client.benchmark.sync import SyncBenchmark, SyncBenchmarkResult +from syftbox.client.benchmark.syncstats import DataTransferStats + + +# Mock classes and fixtures +@pytest.fixture +def mock_config(): + class MockConfig: + def __init__(self): + self.server_url = "https://test.example.com:8443" + self.access_token = "test-token" + self.email = "test@email.com" + + return MockConfig() + + +@pytest.fixture +def mock_stats(): + return Stats(min=100.0, max=200.0, mean=150.0, stddev=25.0, p50=150.0, p95=190.0, p99=195.0) + + +@pytest.fixture +def mock_http_stats(mock_stats): + return HTTPTimingStats( + dns=mock_stats, + tcp_connect=mock_stats, + ssl_handshake=mock_stats, + send=mock_stats, + server_wait=mock_stats, + content=mock_stats, + total=mock_stats, + redirect=mock_stats, + success_rate=95.0, + ) + + +@pytest.fixture +def mock_tcp_stats(mock_stats): + return TCPTimingStats( + latency_stats=mock_stats, + jitter_stats=mock_stats, + connection_success_rate=95.0, + requests_per_minute=30, + max_requests_per_minute=60, + max_concurrent_connections=5, + requests_in_last_minute=25, + ) + + +# Tests for NetworkBenchmark class +@pytest.mark.parametrize( + "url,expected_port", + [ + ("http://example.com", 80), + ("https://example.com", 443), + ("http://example.com:8080", 8080), + ("https://example.com:8443", 8443), + ], +) +def test_network_benchmark_port_detection(url, expected_port): + """Test port detection for different URL formats""" + + class CustomConfig: + def __init__(self, url): + self.server_url = url + + benchmark = NetworkBenchmark(CustomConfig(url)) + assert benchmark.tcp_perf.port == expected_port + + +def test_ping_success(mock_config, monkeypatch): + """Test successful server ping""" + + def mock_get(*args, **kwargs): + class MockResponse: + def raise_for_status(self): + pass + + return MockResponse() + + monkeypatch.setattr(requests, "get", mock_get) + + benchmark = NetworkBenchmark(mock_config) + assert benchmark.ping() is True + + +def test_ping_failure(mock_config, monkeypatch): + """Test failed server ping""" + + def mock_get(*args, **kwargs): + raise requests.RequestException("Server not reachable") + + monkeypatch.setattr(requests, "get", mock_get) + + benchmark = NetworkBenchmark(mock_config) + with pytest.raises(requests.RequestException): + benchmark.ping() + + +def test_network_collect_metrics(mock_config, mock_http_stats, mock_tcp_stats, monkeypatch): + """Test metric collection""" + + # Mock ping + def mock_ping(*args, **kwargs): + return True + + # Mock HTTP stats + def mock_http_get_stats(*args, **kwargs): + return mock_http_stats + + # Mock TCP stats + def mock_tcp_get_stats(*args, **kwargs): + return mock_tcp_stats + + benchmark = NetworkBenchmark(mock_config) + monkeypatch.setattr(benchmark, "ping", mock_ping) + monkeypatch.setattr(benchmark.http_perf, "get_stats", mock_http_get_stats) + monkeypatch.setattr(benchmark.tcp_perf, "get_stats", mock_tcp_get_stats) + + result = benchmark.collect_metrics(num_runs=3) + + assert isinstance(result, NetworkBenchmarkResult) + assert result.url == benchmark.url + assert result.num_runs == 3 + assert result.http_stats == mock_http_stats + assert result.tcp_stats == mock_tcp_stats + assert isinstance(result.timestamp, str) + + +# Tests for NetworkBenchmarkResult class +def test_network_benchmark_result_formatting(mock_http_stats, mock_tcp_stats): + """Test benchmark result formatting""" + result = NetworkBenchmarkResult( + timestamp=datetime.now(timezone.utc).isoformat(), + num_runs=3, + url="https://test.example.com", + http_stats=mock_http_stats, + tcp_stats=mock_tcp_stats, + ) + + report = result.readable_report() + + # Verify report content + assert "Network Benchmark" in report + assert "Server URL" in report + assert "HTTP Timings" in report + assert "TCP Timings" in report + assert str(mock_http_stats.success_rate) in report + assert str(mock_tcp_stats.connection_success_rate) in report + + +def test_network_benchmark_error_handling(mock_config, monkeypatch): + """Test error handling during metric collection""" + + def mock_ping(*args, **kwargs): + raise requests.RequestException("Server not reachable") + + benchmark = NetworkBenchmark(mock_config) + monkeypatch.setattr(benchmark, "ping", mock_ping) + + with pytest.raises(requests.RequestException): + benchmark.collect_metrics(num_runs=3) + + +def test_network_benchmark_with_empty_stats(mock_config, monkeypatch): + """Test handling of empty or null statistics""" + + def mock_get_empty_stats(*args, **kwargs): + raise RuntimeError("No stats available") + + benchmark = NetworkBenchmark(mock_config) + monkeypatch.setattr(benchmark.http_perf, "get_stats", mock_get_empty_stats) + monkeypatch.setattr(benchmark.tcp_perf, "get_stats", mock_get_empty_stats) + monkeypatch.setattr(benchmark, "ping", lambda: True) + + with pytest.raises(RuntimeError): + benchmark.collect_metrics(num_runs=3) + + +@pytest.fixture +def mock_data_transfer_stats(mock_stats): + return DataTransferStats(file_size_mb=1, upload=mock_stats, download=mock_stats, total_runs=3, successful_runs=3) + + +def test_sync_benchmark_file_sizes(): + """Test benchmark file sizes are properly defined""" + expected_sizes = [1, 5, 9] # MB + assert SyncBenchmark.BENCHMARK_FILE_SIZES == expected_sizes + + +def test_sync_collect_metrics(mock_config, mock_data_transfer_stats, monkeypatch): + """Test metric collection for different file sizes""" + + def mock_get_stats(self, size_mb, num_runs): + return DataTransferStats( + file_size_mb=size_mb, + upload=mock_data_transfer_stats.upload, + download=mock_data_transfer_stats.download, + total_runs=num_runs, + successful_runs=num_runs, + ) + + monkeypatch.setattr("syftbox.client.benchmark.syncstats.SyncDataTransferStats.get_stats", mock_get_stats) + + benchmark = SyncBenchmark(mock_config) + result = benchmark.collect_metrics(num_runs=3) + + assert isinstance(result, SyncBenchmarkResult) + assert result.url == benchmark.url + assert result.num_runs == 3 + assert len(result.file_size_stats) == len(benchmark.BENCHMARK_FILE_SIZES) + + # Verify stats for each file size + for stats, expected_size in zip(result.file_size_stats, benchmark.BENCHMARK_FILE_SIZES): + assert stats.file_size_mb == expected_size + assert isinstance(stats.upload, Stats) + assert isinstance(stats.download, Stats) + + +def test_sync_benchmark_result_formatting(mock_data_transfer_stats): + """Test benchmark result formatting""" + result = SyncBenchmarkResult(url="https://test.example.com", num_runs=3, file_size_stats=[mock_data_transfer_stats]) + + report = result.readable_report() + + # Verify report content + assert "Sync Benchmark" in report + assert "Server URL" in report + assert "Runs: 3" in report + assert "File Size: 1 MB" in report + assert "Upload Timings" in report + assert "Download Timings" in report + assert "Success Rate" in report + + +def test_collect_metrics_error_handling(mock_config, monkeypatch): + """Test error handling during metric collection""" + + def mock_get_stats_error(self, size_mb, num_runs): + raise Exception("Failed to get stats") + + monkeypatch.setattr("syftbox.client.benchmark.syncstats.SyncDataTransferStats.get_stats", mock_get_stats_error) + + benchmark = SyncBenchmark(mock_config) + with pytest.raises(Exception): + benchmark.collect_metrics(num_runs=3) + + +def test_sync_benchmark_result_with_multiple_file_sizes(mock_stats): + """Test benchmark result with multiple file sizes""" + file_size_stats = [ + DataTransferStats( + file_size_mb=size, + upload=mock_stats, + download=mock_stats, + total_runs=3, + successful_runs=3, + ) + for size in [1, 5, 9] + ] + + result = SyncBenchmarkResult(url="https://test.example.com", num_runs=3, file_size_stats=file_size_stats) + + report = result.readable_report() + + # Verify report contains all file sizes + assert "File Size: 1 MB" in report + assert "File Size: 5 MB" in report + assert "File Size: 9 MB" in report + + +def test_sync_benchmark_with_empty_results(mock_config, monkeypatch): + """Test handling of empty results""" + + def mock_get_empty_stats(self, size_mb, num_runs): + return DataTransferStats( + file_size_mb=size_mb, + upload=Stats(min=0, max=0, mean=0, stddev=0, p50=0, p95=0, p99=0), + download=Stats(min=0, max=0, mean=0, stddev=0, p50=0, p95=0, p99=0), + total_runs=0, + successful_runs=0, + ) + + monkeypatch.setattr("syftbox.client.benchmark.syncstats.SyncDataTransferStats.get_stats", mock_get_empty_stats) + + benchmark = SyncBenchmark(mock_config) + result = benchmark.collect_metrics(num_runs=3) + + # Verify empty stats are handled properly + for stats in result.file_size_stats: + assert stats.upload.mean == 0 + assert stats.download.mean == 0 + assert stats.total_runs == 0 + assert stats.successful_runs == 0 diff --git a/packages/syftbox/tests/unit/client/benchmark_sync_test.py b/packages/syftbox/tests/unit/client/benchmark_sync_test.py new file mode 100644 index 00000000000..80a3af108d4 --- /dev/null +++ b/packages/syftbox/tests/unit/client/benchmark_sync_test.py @@ -0,0 +1,212 @@ +import time + +import pytest +import requests + +from syftbox.client.benchmark import Stats +from syftbox.client.benchmark.syncstats import ( + DataTransferStats, + FileTransferDuration, + SyncDataTransferStats, + generate_byte_string, + random_filename, +) + + +# Mock classes +class MockResponse: + def __init__(self, status_code=200): + self.status_code = status_code + + def raise_for_status(self): + if self.status_code != 200: + raise requests.RequestException(f"HTTP Error: {self.status_code}") + + +# Fixtures +@pytest.fixture +def sync_stats(): + return SyncDataTransferStats(url="https://example.com", token="test-token", email="test@example.com") + + +@pytest.fixture +def mock_time(monkeypatch): + class MockTime: + def __init__(self): + self.current_time = 0.0 + self.sleep_calls = [] + + def time(self): + self.current_time += 0.5 + return self.current_time + + def sleep(self, seconds): + self.sleep_calls.append(seconds) + + mock_timer = MockTime() + monkeypatch.setattr(time, "time", mock_timer.time) + monkeypatch.setattr(time, "sleep", mock_timer.sleep) + return mock_timer + + +def test_generate_byte_string(): + """Test byte string generation""" + size_mb = 2 + data = generate_byte_string(size_mb) + expected_size = size_mb * 1024 * 1024 + + assert isinstance(data, bytes) + assert len(data) == expected_size + assert data == b"\0" * expected_size + + +def test_random_filename(): + """Test random filename generation""" + size_mb = 5 + filename = random_filename(size_mb) + + assert filename.startswith("5mb-") + assert filename.endswith(".bytes") + assert len(filename) == len("5mb-") + 8 + len(".bytes") + + +def test_successful_file_operations(sync_stats, mock_time, monkeypatch): + """Test successful file upload, download, and delete operations""" + + def mock_post(*args, **kwargs): + return MockResponse(200) + + monkeypatch.setattr(requests, "post", mock_post) + + # Test upload + upload_time = sync_stats.upload_file("test.txt", b"test data") + assert upload_time == 500.0 # 0.5 seconds * 1000 + + # Test download + download_time = sync_stats.download_file("test.txt") + assert download_time == 500.0 + + # Test delete + delete_time = sync_stats.delete_file("test.txt") + assert delete_time == 500.0 + + +def test_file_operation_failures(sync_stats, mock_time, monkeypatch): + """Test handling of file operation failures""" + + def mock_post(*args, **kwargs): + return MockResponse(500) + + monkeypatch.setattr(requests, "post", mock_post) + + # Test upload failure + with pytest.raises(requests.RequestException): + sync_stats.upload_file("test.txt", b"test data") + + # Test download failure + with pytest.raises(requests.RequestException): + sync_stats.download_file("test.txt") + + # Test delete failure (should not raise exception due to ignore_errors=True) + delete_time = sync_stats.delete_file("test.txt") + assert delete_time == 500.0 + + +def test_measure_file_transfer_success(sync_stats, mock_time, monkeypatch): + """Test successful file transfer measurement""" + + def mock_post(*args, **kwargs): + return MockResponse(200) + + monkeypatch.setattr(requests, "post", mock_post) + + result = sync_stats.measure_file_transfer(1) + assert isinstance(result, FileTransferDuration) + assert result.upload == 500.0 + assert result.download == 500.0 + + +def test_measure_file_transfer_failure(sync_stats, mock_time, monkeypatch, capsys): + """Test failed file transfer measurement""" + + def mock_post(*args, **kwargs): + raise requests.RequestException("Simulated failure") + + monkeypatch.setattr(requests, "post", mock_post) + + result = sync_stats.measure_file_transfer(1) + assert result is None + + captured = capsys.readouterr() + assert "Error during file transfer" in captured.out + + +def test_get_stats_success(sync_stats, mock_time, monkeypatch): + """Test successful statistics collection""" + + def mock_post(*args, **kwargs): + return MockResponse(200) + + monkeypatch.setattr(requests, "post", mock_post) + + stats = sync_stats.get_stats(file_size_mb=1, num_runs=3) + assert isinstance(stats, DataTransferStats) + assert stats.file_size_mb == 1 + assert stats.successful_runs == 3 + assert stats.total_runs == 3 + assert isinstance(stats.upload, Stats) + assert isinstance(stats.download, Stats) + + +def test_get_stats_all_failures(sync_stats, mock_time, monkeypatch): + """Test statistics collection with all failures""" + + def mock_post(*args, **kwargs): + raise requests.RequestException("Simulated failure") + + monkeypatch.setattr(requests, "post", mock_post) + monkeypatch.setattr(time, "sleep", lambda x: None) # Skip sleep delay + + with pytest.raises(RuntimeError, match="All .* runs failed"): + sync_stats.get_stats(file_size_mb=1, num_runs=3) + + +def test_get_stats_partial_failure(sync_stats, mock_time, monkeypatch): + """Test statistics collection with some failures""" + call_count = 0 + + def mock_measure_transfer(*args, **kwargs): + nonlocal call_count + call_count += 1 + # Fail every other attempt + if call_count % 2 == 0: + return None + return FileTransferDuration(upload=500.0, download=500.0) + + monkeypatch.setattr(sync_stats, "measure_file_transfer", mock_measure_transfer) + monkeypatch.setattr(time, "sleep", lambda x: None) # Skip sleep delay + + stats = sync_stats.get_stats(file_size_mb=1, num_runs=4) + assert isinstance(stats, DataTransferStats) + assert stats.successful_runs == 2 # Half of the runs should succeed + assert stats.total_runs == 4 + + +def test_get_stats_delay_between_runs(sync_stats, mock_time, monkeypatch): + """Test delay between runs""" + calls = [] + + def mock_measure_transfer(*args, **kwargs): + calls.append(1) + return FileTransferDuration(upload=500.0, download=500.0) + + def mock_sleep(seconds): + mock_time.sleep_calls.append(seconds) + + monkeypatch.setattr(sync_stats, "measure_file_transfer", mock_measure_transfer) + monkeypatch.setattr(time, "sleep", mock_sleep) + + sync_stats.get_stats(file_size_mb=1, num_runs=3) + + assert len(mock_time.sleep_calls) == 2 + assert mock_time.sleep_calls == [5, 5] diff --git a/packages/syftbox/tests/unit/client/benchmark_test.py b/packages/syftbox/tests/unit/client/benchmark_test.py new file mode 100644 index 00000000000..44508e5dd3b --- /dev/null +++ b/packages/syftbox/tests/unit/client/benchmark_test.py @@ -0,0 +1,147 @@ +import pytest + +from syftbox.client.benchmark import BenchmarkResult +from syftbox.client.benchmark.network import NetworkBenchmark +from syftbox.client.benchmark.runner import SyftBenchmarkRunner +from syftbox.client.benchmark.sync import SyncBenchmark + + +# Mock classes +class MockResult(BenchmarkResult): + def __init__(self, num_runs: int): + self.num_runs = num_runs + + def readable_report(self): + return "Mock Report" + + def dict_report(self): + return {"runs": self.num_runs} + + +@pytest.fixture +def mock_config(): + class MockConfig: + def __init__(self): + self.server_url = "https://test.example.com" + self.access_token = "test-token" + self.email = "test@example.com" + + return MockConfig() + + +class MockCollector: + def __init__(self, config): + self.config = config + + def collect_metrics(self, num_runs: int): + return MockResult(num_runs=num_runs) + + +class MockFailingCollector: + def __init__(self, config): + self.config = config + + def collect_metrics(self, num_runs: int): + raise Exception("Mock collection failure") + + +class MockReporter: + def __init__(self): + self.metrics = None + + def generate(self, metrics): + self.metrics = metrics + + +def test_get_collectors(): + """Test getting benchmark collectors""" + runner = SyftBenchmarkRunner(None, None) + collectors = runner.get_collectors() + + assert isinstance(collectors, dict) + assert "network" in collectors + assert "sync" in collectors + assert collectors["network"] == NetworkBenchmark + assert collectors["sync"] == SyncBenchmark + + +def test_successful_benchmark_run(mock_config, monkeypatch): + """Test successful benchmark run with all collectors""" + reporter = MockReporter() + runner = SyftBenchmarkRunner(mock_config, reporter) + + def mock_get_collectors(): + return {"network": MockCollector, "sync": MockCollector} + + monkeypatch.setattr(runner, "get_collectors", mock_get_collectors) + runner.run(num_runs=3) + + # Verify metrics were collected and report was generated + assert reporter.metrics is not None + assert "network" in reporter.metrics + assert "sync" in reporter.metrics + assert isinstance(reporter.metrics["network"], MockResult) + assert isinstance(reporter.metrics["sync"], MockResult) + assert reporter.metrics["network"].num_runs == 3 + assert reporter.metrics["sync"].num_runs == 3 + + +def test_partial_benchmark_failure(mock_config, monkeypatch, capsys): + """Test benchmark run with some failing collectors""" + reporter = MockReporter() + runner = SyftBenchmarkRunner(mock_config, reporter) + + def mock_get_collectors(): + return {"network": MockCollector, "sync": MockFailingCollector} + + monkeypatch.setattr(runner, "get_collectors", mock_get_collectors) + runner.run(num_runs=3) + + # Check error message was printed + captured = capsys.readouterr() + assert "Failed to collect metrics for sync" in captured.out + + # Verify successful metrics were collected + assert reporter.metrics is not None + assert "network" in reporter.metrics + assert isinstance(reporter.metrics["network"], MockResult) + assert reporter.metrics["network"].num_runs == 3 + + +def test_all_benchmarks_failing(mock_config, monkeypatch, capsys): + """Test benchmark run with all collectors failing""" + reporter = MockReporter() + runner = SyftBenchmarkRunner(mock_config, reporter) + + def mock_get_collectors(): + return {"network": MockFailingCollector, "sync": MockFailingCollector} + + monkeypatch.setattr(runner, "get_collectors", mock_get_collectors) + runner.run(num_runs=3) + + # Check error messages were printed + captured = capsys.readouterr() + assert "Failed to collect metrics for network" in captured.out + assert "Failed to collect metrics for sync" in captured.out + + # Verify empty report was generated + assert reporter.metrics is not None + assert isinstance(reporter.metrics, dict) + assert len(reporter.metrics) == 0 + + +def test_empty_collectors(mock_config, monkeypatch): + """Test benchmark run with no collectors""" + reporter = MockReporter() + runner = SyftBenchmarkRunner(mock_config, reporter) + + def mock_get_collectors(): + return {} + + monkeypatch.setattr(runner, "get_collectors", mock_get_collectors) + runner.run(num_runs=3) + + # Verify empty report was generated + assert reporter.metrics is not None + assert isinstance(reporter.metrics, dict) + assert len(reporter.metrics) == 0 diff --git a/packages/syftbox/tests/unit/client/client_cli_test.py b/packages/syftbox/tests/unit/client/client_cli_test.py new file mode 100644 index 00000000000..43c10136320 --- /dev/null +++ b/packages/syftbox/tests/unit/client/client_cli_test.py @@ -0,0 +1,76 @@ +from pathlib import Path + +from typer.testing import CliRunner + +from syftbox.client.cli import app as client_cli + +# Initialize test runner +runner = CliRunner() + + +def mock_port_in_use(*args, **kwargs): + return False + + +def test_run_syftbox_success(monkeypatch, mock_config): + def setup_config_interactive(*args, **kwargs): + return mock_config + + def mock_run_syftbox(*args, **kwargs): + return 0 + + def get_migration_decision(*args, **kwargs): + return False + + monkeypatch.setattr("syftbox.client.core.run_syftbox", mock_run_syftbox) + monkeypatch.setattr("syftbox.client.cli_setup.setup_config_interactive", setup_config_interactive) + monkeypatch.setattr("syftbox.client.cli_setup.get_migration_decision", get_migration_decision) + monkeypatch.setattr("syftbox.client.utils.net.is_port_in_use", mock_port_in_use) + + result = runner.invoke(client_cli) + assert result.exit_code == 0 + + +def test_client_error(monkeypatch, mock_config): + def setup_config_interactive(*args, **kwargs): + return mock_config + + def mock_run_syftbox(*args, **kwargs): + return -1 + + def get_migration_decision(*args, **kwargs): + return False + + monkeypatch.setattr("syftbox.client.core.run_syftbox", mock_run_syftbox) + monkeypatch.setattr("syftbox.client.cli_setup.setup_config_interactive", setup_config_interactive) + monkeypatch.setattr("syftbox.client.utils.net.is_port_in_use", mock_port_in_use) + monkeypatch.setattr("syftbox.client.cli_setup.get_migration_decision", get_migration_decision) + + result = runner.invoke(client_cli) + assert result.exit_code == -1 + + +def test_port_error(monkeypatch): + monkeypatch.setattr("syftbox.client.utils.net.is_port_in_use", lambda p: True) + result = runner.invoke(client_cli) + assert result.exit_code == 1 + + +def test_client_report(monkeypatch, tmp_path, mock_config): + monkeypatch.setattr("syftbox.client.logger.zip_logs", lambda p, **kw: Path(str(p) + ".log")) + result = runner.invoke(client_cli, ["report"]) + assert result.exit_code == 0 + + +def test_client_benchmark(monkeypatch, tmp_path, mock_config): + def mock_generate(*args, **kwargs): + return + + def mock_run(*args, **kwargs): + return + + monkeypatch.setattr("syftbox.client.benchmark.runner.BenchmarkReporter.generate", mock_generate) + monkeypatch.setattr("syftbox.client.benchmark.runner.SyftBenchmarkRunner.run", mock_run) + + result = runner.invoke(client_cli, ["benchmark"]) + assert result.exit_code == 0 diff --git a/packages/syftbox/tests/unit/client/debug_report_test.py b/packages/syftbox/tests/unit/client/debug_report_test.py new file mode 100644 index 00000000000..320464b8809 --- /dev/null +++ b/packages/syftbox/tests/unit/client/debug_report_test.py @@ -0,0 +1,33 @@ +from syftbox.lib.debug import debug_report, debug_report_yaml + + +def test_debug_report(mock_config): + result = debug_report() + assert isinstance(result, dict) + assert "system" in result + assert "syftbox" in result + assert "syftbox_env" in result + assert "resources" in result["system"] + assert "os" in result["system"] + assert "python" in result["system"] + assert "command" in result["syftbox"] + assert "client_config_path" in result["syftbox"] + assert "client_config" in result["syftbox"] + assert "apps_dir" in result["syftbox"] + assert "apps" in result["syftbox"] + + +def test_debug_report_readable(mock_config): + result = debug_report_yaml() + assert isinstance(result, str) + assert "system" in result + assert "syftbox" in result + assert "syftbox_env" in result + assert "resources" in result + assert "os" in result + assert "python" in result + assert "command" in result + assert "client_config_path" in result + assert "client_config" in result + assert "apps_dir" in result + assert "apps" in result diff --git a/packages/syftbox/tests/unit/client/main_cli_test.py b/packages/syftbox/tests/unit/client/main_cli_test.py new file mode 100644 index 00000000000..5a191016a28 --- /dev/null +++ b/packages/syftbox/tests/unit/client/main_cli_test.py @@ -0,0 +1,45 @@ +from typer.testing import CliRunner + +from syftbox.main import app as main_cli + +# Initialize test runner +runner = CliRunner() + + +def test_version(): + result = runner.invoke(main_cli, ["version"]) + assert result.exit_code == 0 + assert len(result.stdout.split(".")) == 3 + + +def test_debug(monkeypatch): + def mock_debug_report(*args, **kwargs): + return "ok: true" + + monkeypatch.setattr("syftbox.lib.debug.debug_report_yaml", mock_debug_report) + result = runner.invoke(main_cli, ["debug"]) + assert result.exit_code == 0, result.stdout + assert "ok: true" in result.stdout + + +def test_debug_invalid_config(monkeypatch): + def mock_debug_report(*args, **kwargs): + raise Exception("Invalid config") + + monkeypatch.setattr("syftbox.lib.debug.debug_report_yaml", mock_debug_report) + + result = runner.invoke(main_cli, ["debug"]) + assert result.exit_code == 1 + assert "Error" in result.stdout + + +def test_debug_custom_config(monkeypatch, mock_config): + def mock_debug_report(conf_path): + assert str(conf_path) == str(mock_config.path) + return "ok: true" + + monkeypatch.setattr("syftbox.lib.debug.debug_report_yaml", mock_debug_report) + + result = runner.invoke(main_cli, ["debug", "--config", str(mock_config.path)]) + assert result.exit_code == 0 + assert "ok: true" in result.stdout diff --git a/packages/syftbox/tests/unit/client/prompt_test.py b/packages/syftbox/tests/unit/client/prompt_test.py new file mode 100644 index 00000000000..efa6e1d3281 --- /dev/null +++ b/packages/syftbox/tests/unit/client/prompt_test.py @@ -0,0 +1,93 @@ +import os +import tempfile +from pathlib import Path + +import pytest + +from syftbox.client.cli_setup import prompt_data_dir, prompt_email +from syftbox.lib.constants import DEFAULT_DATA_DIR +from syftbox.lib.validators import is_valid_dir, is_valid_email + + +@pytest.mark.parametrize( + "path,expected", + [ + ("/tmp", True), + ("./test", True), + (".", True), + ("..", True), + ("~", True), + ("", False), # Empty path = invalid + ("/x", False), # unwriteable path + ], +) +def test_is_valid_dir(path, expected): + """Test various email formats""" + valid, reason = is_valid_dir(path, check_empty=False, check_writable=True) + assert valid == expected, reason + + +def test_empty_dir(): + # Test with temp directory + with tempfile.TemporaryDirectory() as temp_dir: + # Valid empty directory + valid, reason = is_valid_dir(temp_dir) + assert valid + assert reason == "" + + # Non-empty directory + with open(os.path.join(temp_dir, "test.txt"), "w") as f: + f.write("test") + + valid, reason = is_valid_dir(temp_dir) + assert not valid + assert "not empty" in reason.lower() + + +@pytest.mark.parametrize( + "email,expected", + [ + ("test@example.com", True), + ("test.name@example.com", True), + ("test+label@example.com", True), + ("test@sub.example.com", True), + ("a@b.c", True), + ("", False), # Empty email + ("test@", False), # Missing domain + ("@example.com", False), # Missing username + ("test@example", False), # Mising TLD + ("test.example.com", False), # Missing @ + ("test@@example.com", False), # Double @ + ("test@exam ple.com", False), # Space + ("test@example..com", False), # Double dots + ], +) +def test_email_validation(email, expected): + """Test various email formats""" + assert is_valid_email(email) == expected + + +@pytest.mark.parametrize( + "user_input,expected", + [ + ("", Path(DEFAULT_DATA_DIR)), + ("./valid/path", Path("./valid/path")), + ], +) +def test_prompt_data_dir(user_input, expected, monkeypatch): + monkeypatch.setattr("builtins.input", lambda *a, **k: user_input) + monkeypatch.setattr("syftbox.client.cli_setup.is_valid_dir", lambda x: (True, "")) + + dir = prompt_data_dir() + assert dir.absolute() == expected.absolute() + + +@pytest.mark.timeout(1) +def test_prompt_email(monkeypatch): + valid_email = "test@example.com" + + monkeypatch.setattr("builtins.input", lambda *a, **k: valid_email) + monkeypatch.setattr("syftbox.client.cli_setup.is_valid_dir", lambda x: (True, "")) + + email = prompt_email() + assert email == valid_email diff --git a/packages/syftbox/tests/unit/client/setup_config_test.py b/packages/syftbox/tests/unit/client/setup_config_test.py new file mode 100644 index 00000000000..eecbbf63510 --- /dev/null +++ b/packages/syftbox/tests/unit/client/setup_config_test.py @@ -0,0 +1,75 @@ +from secrets import token_hex + +from syftbox.client.cli_setup import setup_config_interactive +from syftbox.lib.client_config import SyftClientConfig + + +def test_setup_new_config(tmp_path): + config_path = tmp_path / f"config_{token_hex}.json" + data_dir = tmp_path / "data" + email = "test@example.com" + server = "http://test.com/" + port = 8080 + + result = setup_config_interactive( + config_path=config_path, + email=email, + data_dir=data_dir, + server=server, + port=port, + skip_auth=True, + skip_verify_install=True, + ) + + assert isinstance(result, SyftClientConfig) + assert result.path == config_path + assert result.data_dir == data_dir + assert str(result.email) == str(email) + assert str(result.server_url) == str(server) + assert str(result.client_url) == "http://127.0.0.1:8080/" + assert result.client_url.port == port + + +def test_setup_new_config_with_prompt(tmp_path, monkeypatch): + config_path = tmp_path / f"config_{token_hex}.json" + data_dir = tmp_path / "data" + email = "test@example.com" + server = "http://test.com/" + port = 8080 + + monkeypatch.setattr("syftbox.client.cli_setup.prompt_email", lambda: email) + monkeypatch.setattr("syftbox.client.cli_setup.prompt_data_dir", lambda: data_dir) + + result = setup_config_interactive( + config_path=config_path, + email=None, + data_dir=None, + server=server, + port=port, + skip_auth=True, + skip_verify_install=True, + ) + + assert isinstance(result, SyftClientConfig) + assert result.path == config_path + assert result.data_dir == data_dir + assert str(result.email) == str(email) + assert str(result.server_url) == str(server) + assert str(result.client_url) == "http://127.0.0.1:8080/" + assert result.client_url.port == port + + +def test_setup_existing_config(tmp_path, mock_config): + new_port = 8081 + result = setup_config_interactive( + server=str(mock_config.server_url), + email=str(mock_config.email), + data_dir="", + config_path=mock_config.path, + port=new_port, + skip_auth=True, + skip_verify_install=True, + ) + + assert isinstance(result, SyftClientConfig) + assert result.client_url.port == new_port diff --git a/packages/syftbox/tests/unit/client/syft_client_test.py b/packages/syftbox/tests/unit/client/syft_client_test.py new file mode 100644 index 00000000000..ffcdcad7788 --- /dev/null +++ b/packages/syftbox/tests/unit/client/syft_client_test.py @@ -0,0 +1,97 @@ +import pytest + +from syftbox import __version__ +from syftbox.client.core import SyftBoxRunner, run_migration +from syftbox.client.exceptions import SyftBoxAlreadyRunning +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.exceptions import SyftBoxException +from syftbox.lib.http import HEADER_SYFTBOX_VERSION + + +def test_client_single_instance(tmp_path): + """Test that only one client instance can run""" + config = SyftClientConfig( + path=tmp_path / "config.json", + data_dir=tmp_path, + email="test@example.com", + server_url="http://localhost:5001", + client_url="http://localhost:8080", + ) + client1 = SyftBoxRunner(config) + client2 = SyftBoxRunner(config) + + client1.pid.create() + + # check should raise + with pytest.raises(SyftBoxAlreadyRunning): + client2.check_pidfile() + + # start should raise + with pytest.raises(SyftBoxAlreadyRunning): + client2.start() + + client1.shutdown() + + +def test_client_init_datasite(mock_config): + client = SyftBoxRunner(mock_config) + client.init_datasite() + + assert client.datasite.is_dir() + assert client.public_dir.is_dir() + + +def test_register_user(mock_config, httpx_mock): + httpx_mock.add_response(json={"token": "dummy-token"}, headers={HEADER_SYFTBOX_VERSION: __version__}) + client = SyftBoxRunner(mock_config) + client.register_self() + assert client.config.token == "dummy-token" + + +def test_register_user_error(mock_config, httpx_mock): + httpx_mock.add_response(status_code=503) + client = SyftBoxRunner(mock_config) + with pytest.raises(SyftBoxException): + client.register_self() + + assert client.config.token is None + + +def test_client_paths(tmp_path): + """Test that client paths are correctly set up""" + config = SyftClientConfig( + path=tmp_path / "config.json", + data_dir=tmp_path, + email="test@example.com", + server_url="http://localhost:5001", + client_url="http://localhost:8080", + ) + client = SyftBoxRunner(config) + + # data_dir should be the root of the client workspace + assert client.workspace.data_dir == tmp_path + + +def test_migration(mock_config): + # setup old datasites + datasites = ["test@openmined.org", "test2@openmined.org"] + old_dirs = ["apps", ".syft"] + datasites + for dir in old_dirs: + (mock_config.data_dir / dir).mkdir(parents=True) + (mock_config.data_dir / ".syft" / "local_syncstate.json").touch() + + run_migration(mock_config) + + # check new workspace + assert (mock_config.data_dir / "apis").is_dir() + assert (mock_config.data_dir / "plugins").is_dir() + assert (mock_config.data_dir / "datasites").is_dir() + + # check migrated datasites + for ds in datasites: + assert not (mock_config.data_dir / ds).exists() + assert (mock_config.data_dir / "datasites" / ds).is_dir() + + # check syncstate migration + assert not (mock_config.data_dir / ".syft").exists() + assert (mock_config.data_dir / "plugins" / "local_syncstate.json").is_file() diff --git a/packages/syftbox/tests/unit/client/workspace_test.py b/packages/syftbox/tests/unit/client/workspace_test.py new file mode 100644 index 00000000000..c3470a015d7 --- /dev/null +++ b/packages/syftbox/tests/unit/client/workspace_test.py @@ -0,0 +1,42 @@ +from pathlib import Path + +import pytest + +from syftbox.lib.workspace import SyftWorkspace + + +@pytest.fixture +def temp_workspace(tmp_path): + """Fixture to create a temporary workspace directory.""" + return SyftWorkspace(tmp_path / "workspace") + + +def test_workspace_init(): + # Test with string path + workspace = SyftWorkspace("/tmp/test") + assert isinstance(workspace.data_dir, Path) + + # Test with Path object + path_obj = Path("~/test2") + workspace = SyftWorkspace(path_obj) + + assert isinstance(workspace.data_dir, Path) + assert workspace.data_dir.is_absolute() + assert "~" not in str(workspace.data_dir) + + +def test_workspace_directory_structure(tmp_path): + workspace = SyftWorkspace(tmp_path) + + assert not workspace.datasites.exists() + assert not workspace.plugins.exists() + assert not workspace.apps.exists() + + # Create directories + workspace.mkdirs() + + # Verify directory structure + assert workspace.data_dir.is_dir() + assert workspace.datasites.is_dir() + assert workspace.plugins.is_dir() + assert workspace.apps.is_dir() diff --git a/packages/syftbox/tests/unit/conftest.py b/packages/syftbox/tests/unit/conftest.py new file mode 100644 index 00000000000..61b4ca766ed --- /dev/null +++ b/packages/syftbox/tests/unit/conftest.py @@ -0,0 +1,30 @@ +from pathlib import Path +from typing import Any, Generator + +import pytest + +from syftbox.lib.client_shim import SyftClientConfig + + +@pytest.fixture +def mock_config(monkeypatch, tmp_path) -> Generator[SyftClientConfig, Any, None]: + config_path = Path(tmp_path, "config.json") + data_dir = Path(tmp_path) + conf = SyftClientConfig( + path=config_path, + data_dir=data_dir, + email="test@openmined.org", + client_url="http://test:8080", + ) + conf.save() + conf.data_dir.mkdir(parents=True, exist_ok=True) + + def mock_load(*args, **kwargs): + nonlocal conf + return conf + + monkeypatch.setattr(SyftClientConfig, "load", mock_load) + + yield conf + + monkeypatch.undo() diff --git a/packages/syftbox/tests/unit/hash_test.py b/packages/syftbox/tests/unit/hash_test.py new file mode 100644 index 00000000000..5a0bc4cd48a --- /dev/null +++ b/packages/syftbox/tests/unit/hash_test.py @@ -0,0 +1,76 @@ +from pathlib import Path + +from syftbox.client.utils.dir_tree import create_dir_tree +from syftbox.lib.hash import collect_files + + +def test_collect_files(tmp_path: Path): + # Create entire test structure including symlink targets + tree = { + # Symlink targets + "folder_to_symlink": {"file_in_symlink.txt": "symlink content"}, + "file_to_symlink.txt": "symlink file content", + # Test structure + "test_dir": { + "file1.txt": "content1", + "file2.txt": "content2", + ".hidden_file": "hidden", + ".hidden_dir": { + "file_in_hidden.txt": "hidden content", + }, + "nested": { + "nested_file.txt": "nested content", + }, + }, + } + create_dir_tree(tmp_path, tree) + + # Create symlinks in test_dir + test_dir = tmp_path / "test_dir" + (test_dir / "symlink_dir").symlink_to(tmp_path / "folder_to_symlink") + (test_dir / "symlink_file.txt").symlink_to(tmp_path / "file_to_symlink.txt") + + def get_names(files: list[Path]) -> set[str]: + return {f.name for f in files} + + # Collect excluding hidden and symlinks + files = collect_files(test_dir) + assert get_names(files) == {"file1.txt", "file2.txt", "nested_file.txt"} + + # With hidden + files = collect_files(test_dir, include_hidden=True) + assert get_names(files) == { + "file1.txt", + "file2.txt", + "nested_file.txt", + ".hidden_file", + "file_in_hidden.txt", + } + + # With symlinks + files = collect_files(test_dir, follow_symlinks=True) + assert get_names(files) == { + "file1.txt", + "file2.txt", + "nested_file.txt", + "file_in_symlink.txt", + "symlink_file.txt", + } + + # With both + files = collect_files(test_dir, include_hidden=True, follow_symlinks=True) + assert get_names(files) == { + "file1.txt", + "file2.txt", + "nested_file.txt", + ".hidden_file", + "file_in_hidden.txt", + "file_in_symlink.txt", + "symlink_file.txt", + } + + # Edge cases + assert collect_files(test_dir / "nonexistent") == [] + regular_file = test_dir / "just_a_file" + regular_file.touch() + assert collect_files(regular_file) == [] diff --git a/packages/syftbox/tests/unit/import_test.py b/packages/syftbox/tests/unit/import_test.py new file mode 100644 index 00000000000..66c94eca373 --- /dev/null +++ b/packages/syftbox/tests/unit/import_test.py @@ -0,0 +1,9 @@ +from syftbox import lib +from syftbox.client import core as client +from syftbox.server import server + + +def test_import(): + dir(lib) + dir(client) + dir(server) diff --git a/packages/syftbox/tests/unit/lib/client_shim_test.py b/packages/syftbox/tests/unit/lib/client_shim_test.py new file mode 100644 index 00000000000..b29310f319c --- /dev/null +++ b/packages/syftbox/tests/unit/lib/client_shim_test.py @@ -0,0 +1,71 @@ +from pathlib import Path + +import pytest + +from syftbox.lib.client_config import SyftClientConfig +from syftbox.lib.client_shim import Client + + +@pytest.fixture +def client(mock_config): + return Client(conf=mock_config) + + +def test_client_properties(client, mock_config): + assert client.email == mock_config.email + assert client.config_path == mock_config.path + assert client.my_datasite == client.workspace.datasites / mock_config.email + assert client.datasites == client.workspace.datasites + assert client.sync_folder == client.workspace.datasites # Test deprecated property + assert client.datasite_path == client.workspace.datasites / mock_config.email # Test deprecated property + + +def test_client_load(mock_config, tmp_path): + _ = mock_config # this fixture creates and saves a mock tmp_path/config.json file + config_path = tmp_path / "config.json" + client = Client.load(config_path) + assert isinstance(client, Client) + assert isinstance(client.config, SyftClientConfig) + + +def test_api_request_name(client, monkeypatch): + test_dir = Path("/fake/api/request/path") + monkeypatch.setattr(Path, "cwd", lambda: test_dir) + assert client.api_request_name == "path" + + +@pytest.mark.parametrize( + "api_name,datasite,expected", + [ + ("test_api", None, "test_api"), + (None, "other@example.com", "current_api"), + ("custom_api", "other@example.com", "custom_api"), + ], +) +def test_api_data(client, monkeypatch, api_name, datasite, expected): + monkeypatch.setattr(Path, "cwd", lambda: Path("/fake/current_api")) + + result = client.api_data(api_name, datasite) + + datasite_email = datasite or client.config.email + expected_path = client.workspace.datasites / datasite_email / "api_data" / (expected) + assert result == expected_path + + +def test_makedirs(client, tmp_path): + test_paths = [tmp_path / "dir1", tmp_path / "dir2" / "subdir", tmp_path / "dir3" / "subdir" / "subsubdir"] + + client.makedirs(*test_paths) + + for path in test_paths: + assert path.exists() + assert path.is_dir() + + +def test_makedirs_existing(client, tmp_path): + test_path = tmp_path / "existing" + test_path.mkdir() + + # Should not raise error when directory exists + client.makedirs(test_path) + assert test_path.exists() diff --git a/packages/syftbox/tests/unit/lib/permission_lib_test.py b/packages/syftbox/tests/unit/lib/permission_lib_test.py new file mode 100644 index 00000000000..009851f7e36 --- /dev/null +++ b/packages/syftbox/tests/unit/lib/permission_lib_test.py @@ -0,0 +1,233 @@ +from pathlib import Path + +import pytest + +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.permissions import ( + ComputedPermission, + PermissionParsingError, + PermissionRule, + PermissionType, + SyftPermission, +) + + +def test_parsing_dicts(): + d = {"permissions": "read", "path": "x.txt", "user": "user@example.org"} + rule = PermissionRule.from_rule_dict(dir_path=Path("."), rule_dict=d, priority=0) + assert rule.permissions == [PermissionType.READ] + assert rule.path == "x.txt" + assert rule.user == "user@example.org" + assert rule.allow is True + + +def test_parsing(): + yaml_string = """ + - permissions: read + path: x.txt + user: user@example.org + + - permissions: [read, write] + path: x.txt + user: "*" + type: disallow + """ + file = SyftPermission.from_string(yaml_string, ".") + assert len(file.rules) == 2 + + assert file.rules[0].permissions == [PermissionType.READ] + assert file.rules[0].path == "x.txt" + assert file.rules[0].user == "user@example.org" + assert file.rules[0].allow is True + + # check the same for the second rule + assert file.rules[1].permissions == [PermissionType.READ, PermissionType.WRITE] + assert file.rules[1].path == "x.txt" + assert file.rules[1].user == "*" + assert file.rules[1].allow is False + + +def test_parsing_fails(): + yaml_string = """ + - permissions: read + path: "../*/x.txt" + user: user@example.org + """ + with pytest.raises(PermissionParsingError): + SyftPermission.from_string(yaml_string, ".") + + +def test_parsing_useremail(): + yaml_string = """ + - permissions: read + path: "{useremail}/*" + user: user@example.org + """ + + file = SyftPermission.from_string(yaml_string, ".") + rule = file.rules[0] + assert rule.has_email_template + assert rule.resolve_path_pattern("user@example.org") == "user@example.org/*" + + +def test_globstar(): + rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={"path": "**", "permissions": ["admin", "read", "write"], "user": "user@example.org"}, + priority=0, + ) + computed_permission = ComputedPermission(user="user@example.org", file_path=Path("a.txt")) + computed_permission.apply(rule) + assert computed_permission.has_permission(PermissionType.READ) + + computed_permission = ComputedPermission(user="user@example.org", file_path=Path("b/a.txt")) + computed_permission.apply(rule) + assert computed_permission.has_permission(PermissionType.READ) + + +def test_computed_permission_root_user(): + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[], user="user@example.org", path=Path("user@example.org/test/a.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + assert computed_permission.has_permission(PermissionType.WRITE) + assert computed_permission.has_permission(PermissionType.CREATE) + assert computed_permission.has_permission(PermissionType.ADMIN) + + +def test_computed_permission_admin(): + base_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={"path": "user@example.org/test/a.txt", "permissions": ["admin"], "user": "user@example.org"}, + priority=0, + ) + + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[base_rule], user="user@example.org", path=Path("user@example.org/test/a.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + assert computed_permission.has_permission(PermissionType.WRITE) + assert computed_permission.has_permission(PermissionType.CREATE) + assert computed_permission.has_permission(PermissionType.ADMIN) + + +def test_computed_permissions_read(): + read_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={"path": "user@example.org/test/a.txt", "permissions": ["read"], "user": "user_2@example.org"}, + priority=0, + ) + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[read_rule], user="user_2@example.org", path=Path("user@example.org/test/a.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + + +def test_computed_permission_disallow(): + allow_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={"path": "user@example.org/test/a.txt", "permissions": ["read"], "user": "user_2@example.org"}, + priority=0, + ) + disallow_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={ + "path": "user@example.org/test/a.txt", + "permissions": ["read"], + "user": "user_2@example.org", + "type": "disallow", + }, + priority=1, + ) + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[allow_rule, disallow_rule], user="user_2@example.org", path=Path("user@example.org/test/a.txt") + ) + assert not computed_permission.has_permission(PermissionType.READ) + + +def test_computed_permissions_write_create_with_read(): + write_create_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={ + "path": "user@example.org/test/a.txt", + "permissions": ["read", "write", "create"], + "user": "user_2@example.org", + }, + priority=0, + ) + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[write_create_rule], user="user_2@example.org", path=Path("user@example.org/test/a.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + assert computed_permission.has_permission(PermissionType.WRITE) + assert computed_permission.has_permission(PermissionType.CREATE) + + +def test_computed_permissions_write_create_without_read(): + write_create_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={ + "path": "user@example.org/test/a.txt", + "permissions": ["write", "create"], + "user": "user_2@example.org", + }, + priority=0, + ) + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[write_create_rule], user="user_2@example.org", path=Path("user@example.org/test/a.txt") + ) + assert not computed_permission.has_permission(PermissionType.READ) + assert not computed_permission.has_permission(PermissionType.WRITE) + assert not computed_permission.has_permission(PermissionType.CREATE) + + +def test_computed_permission_permfile_access(): + rwc_rule = PermissionRule.from_rule_dict( + dir_path=Path("."), + rule_dict={ + "path": f"user@example.org/test/{PERM_FILE}", + "permissions": ["read", "write", "create"], + "user": "user_2@example.org", + }, + priority=0, + ) + + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=[rwc_rule], user="user_2@example.org", path=Path("user@example.org/test/permfile.yaml") + ) + + assert not computed_permission.has_permission(PermissionType.READ) + assert not computed_permission.has_permission(PermissionType.WRITE) + assert not computed_permission.has_permission(PermissionType.CREATE) + + +def test_permission_file_add_rule(): + perm_file = SyftPermission.from_rule_dicts( + Path("test") / PERM_FILE, + [ + { + "path": "**", + "user": "user@example.org", + "permissions": ["read"], + } + ], + ) + + # Initially only has read permission + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=perm_file.rules, user="user@example.org", path=Path("test/text.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + assert not computed_permission.has_permission(PermissionType.WRITE) + assert not computed_permission.has_permission(PermissionType.CREATE) + + # Add write and create permissions + perm_file.add_rule(path="**", user="user@example.org", allow=True, permission=["write", "create"]) + + # Check permissions are updated + computed_permission = ComputedPermission.from_user_rules_and_path( + rules=perm_file.rules, user="user@example.org", path=Path("test/test.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + assert computed_permission.has_permission(PermissionType.WRITE) + assert computed_permission.has_permission(PermissionType.CREATE) diff --git a/packages/syftbox/tests/unit/server/conftest.py b/packages/syftbox/tests/unit/server/conftest.py new file mode 100644 index 00000000000..8d5aa21e93d --- /dev/null +++ b/packages/syftbox/tests/unit/server/conftest.py @@ -0,0 +1,119 @@ +import json + +import pytest +from fastapi.testclient import TestClient + +from syftbox import __version__ +from syftbox.client.server_client import SyncClient +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.http import HEADER_SYFTBOX_VERSION +from syftbox.server.migrations import run_migrations +from syftbox.server.server import create_server +from syftbox.server.settings import ServerSettings + +TEST_DATASITE_NAME = "test_datasite@openmined.org" +TEST_FILE = "test_file.txt" +PERMFILE_DICT = [ + { + "path": "*", + "user": "*", + "permissions": ["admin", "read", "write"], + }, + { + "path": "**/*", + "user": "*", + "permissions": ["admin", "read", "write"], + }, +] + + +def get_access_token(client: TestClient, email: str) -> str: + response = client.post("/auth/request_email_token", json={"email": email}) + email_token = response.json()["email_token"] + response = client.post( + "/auth/validate_email_token", + headers={"Authorization": f"Bearer {email_token}"}, + params={"email": email}, + ) + + if response.status_code != 200: + raise ValueError(f"Failed to get access token, {response.text}") + return response.json()["access_token"] + + +@pytest.fixture(scope="function") +def client(monkeypatch, tmp_path): + """Every client gets their own snapshot folder at `tmp_path`""" + snapshot_folder = tmp_path / "snapshot" + settings = ServerSettings.from_data_folder(snapshot_folder) + settings.auth_enabled = False + monkeypatch.setenv("SYFTBOX_DATA_FOLDER", str(settings.data_folder)) + monkeypatch.setenv("SYFTBOX_SNAPSHOT_FOLDER", str(settings.snapshot_folder)) + monkeypatch.setenv("SYFTBOX_USER_FILE_PATH", str(settings.user_file_path)) + monkeypatch.setenv("SYFTBOX_OTEL_ENABLED", str(False)) + monkeypatch.setenv("SYFTBOX_AUTH_ENABLED", str(False)) + + datasite_name = TEST_DATASITE_NAME + datasite = settings.snapshot_folder / datasite_name + datasite.mkdir(parents=True) + + datafile = datasite / TEST_FILE + datafile.touch() + datafile.write_bytes(b"Hello, World!") + + datafile = datasite / TEST_DATASITE_NAME / TEST_FILE + datafile.parent.mkdir(parents=True) + + datafile.touch() + datafile.write_bytes(b"Hello, World!") + + permfile = datasite / PERM_FILE + permfile.touch() + permfile.write_text(json.dumps(PERMFILE_DICT)) + + server_app = create_server(settings) + run_migrations(settings) + with TestClient(server_app) as client: + client.headers[HEADER_SYFTBOX_VERSION] = __version__ + access_token = get_access_token(client, TEST_DATASITE_NAME) + client.headers["Authorization"] = f"Bearer {access_token}" + yield client + + +@pytest.fixture(scope="function") +def sync_client(client: TestClient): + return SyncClient(conn=client) + + +@pytest.fixture(scope="function") +def client_without_perms(monkeypatch, tmp_path): + """Every client gets their own snapshot folder at `tmp_path`""" + settings = ServerSettings.from_data_folder(tmp_path) + settings.otel_enabled = False + settings.auth_enabled = False + + monkeypatch.setenv("SYFTBOX_DATA_FOLDER", str(settings.data_folder)) + monkeypatch.setenv("SYFTBOX_SNAPSHOT_FOLDER", str(settings.snapshot_folder)) + monkeypatch.setenv("SYFTBOX_USER_FILE_PATH", str(settings.user_file_path)) + monkeypatch.setenv("SYFTBOX_OTEL_ENABLED", str(False)) + monkeypatch.setenv("SYFTBOX_AUTH_ENABLED", str(False)) + + datasite_name = TEST_DATASITE_NAME + datasite = settings.snapshot_folder / datasite_name + datasite.mkdir(parents=True) + + datafile = datasite / TEST_FILE + datafile.touch() + datafile.write_bytes(b"Hello, World!") + + permfile = datasite / PERM_FILE + permfile.touch() + permfile.write_text("") + + server_app = create_server(settings) + run_migrations(settings) + with TestClient(server_app) as client: + client.headers[HEADER_SYFTBOX_VERSION] = __version__ + access_token = get_access_token(client, TEST_DATASITE_NAME) + client.headers["Authorization"] = f"Bearer {access_token}" + yield client diff --git a/packages/syftbox/tests/unit/server/file_store_test.py b/packages/syftbox/tests/unit/server/file_store_test.py new file mode 100644 index 00000000000..ac0421d7d70 --- /dev/null +++ b/packages/syftbox/tests/unit/server/file_store_test.py @@ -0,0 +1,27 @@ +import uuid +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path + +from syftbox.lib.hash import hash_file +from syftbox.server.db.file_store import FileStore +from syftbox.server.settings import ServerSettings + + +def test_put_atomic(tmpdir): + settings = ServerSettings.from_data_folder(tmpdir) + syft_path = Path("test.txt") + system_path = settings.snapshot_folder / syft_path + user = "example@example.com" + + with ThreadPoolExecutor(max_workers=5) as executor: + # TODO: add permissions + executor.map( + lambda _: FileStore(settings).put( + syft_path, uuid.uuid4().bytes, user, check_permission=None, skip_permission_check=True + ), + range(25), + ) + + assert system_path.exists() + metadata = FileStore(settings).get_metadata(syft_path, user, skip_permission_check=True) + assert metadata.hash_bytes == hash_file(system_path).hash_bytes diff --git a/packages/syftbox/tests/unit/server/permission_servers_test.py b/packages/syftbox/tests/unit/server/permission_servers_test.py new file mode 100644 index 00000000000..7f533df1500 --- /dev/null +++ b/packages/syftbox/tests/unit/server/permission_servers_test.py @@ -0,0 +1,587 @@ +import sqlite3 +from pathlib import Path +from typing import Optional + +import pytest + +from syftbox.lib.constants import PERM_FILE +from syftbox.lib.permissions import PermissionType, SyftPermission +from syftbox.server.db.db import ( + get_read_permissions_for_user, + get_rules_for_permfile, + link_existing_rules_to_file, + print_table, + set_rules_for_permfile, +) +from syftbox.server.db.file_store import computed_permission_for_user_and_path +from syftbox.server.db.schema import get_db + + +@pytest.fixture +def connection_with_tables(): + return get_db(":memory:") + + +def insert_file_metadata(cursor: sqlite3.Cursor, fileid: int, path: str): + cursor.execute( + """ + INSERT INTO file_metadata (id, path, datasite, hash, signature, file_size, last_modified) VALUES + (?, ?, ?, 'hash1', 'signature1', 100, '2024-01-01') + """, + (fileid, path, path.split("/")[0]), + ) + + +def insert_rule( + cursor: sqlite3.Cursor, + permfile_path: str, + priority: int, + path: str, + user: str, + can_read: bool, + admin: bool, + disallow: bool, +): + permfile_dir = permfile_path.rsplit("/", 1)[0] + permfile_depth = len(Path(permfile_path).parts) + cursor.execute( + """ + INSERT INTO rules (permfile_path, permfile_dir, permfile_depth, priority, path, user, can_read, can_create, can_write, admin, disallow) VALUES + (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + permfile_path, + permfile_dir, + permfile_depth, + priority, + path, + user, + can_read, + 0, + 0, + admin, + disallow, + ), + ) + + +def insert_rule_files( + cursor: sqlite3.Cursor, + permfile_path: str, + priority: int, + fileid: int, + match_for_email: Optional[str] = None, +): + cursor.execute( + """ + INSERT INTO rule_files (permfile_path, priority, file_id, match_for_email) VALUES + (?, ?, ?, ?) + """, + (permfile_path, priority, fileid, match_for_email), + ) + + +def insert_file_mock(connection: sqlite3.Connection, path: str): + cursor = connection.cursor() + cursor.execute( + """ + INSERT INTO file_metadata (path, datasite, hash, signature, file_size, last_modified) + VALUES (?, ?, ?, ?, ?, ?) + """, + (path, path.split("/")[0], "hash", "sig", 0, "2021-01-01"), + ) + connection.commit() + + +def get_all_file_mappings(connection: sqlite3.Connection): + cursor = connection.cursor() + cursor.execute( + """ + SELECT permfile_path, priority, file_id, match_for_email + FROM rule_files + """ + ) + return [dict(row) for row in cursor.fetchall()] + + +def test_insert_permissions_from_file(connection_with_tables: sqlite3.Connection): + for f in ["a.txt", "b.txt", "c.txt"]: + insert_file_mock(connection_with_tables, f"user@example.org/test2/{f}") + + yaml_string = """ + - permissions: read + path: a.txt + user: user@example.org + + - permissions: read + path: "*" + user: user@example.org + + - permissions: write + path: b.txt + user: user@example.org + + - permissions: write + path: z.txt + user: "*" + type: disallow + """ + file_path = f"user@example.org/test2/{PERM_FILE}" + file = SyftPermission.from_string(yaml_string, file_path) + + set_rules_for_permfile(connection_with_tables, file) + connection_with_tables.commit() + + assert len(get_all_file_mappings(connection_with_tables)) == 5 + + rules_before = len(get_all_file_mappings(connection_with_tables)) + + path = "user@example.org/test2/d.txt" + insert_file_mock(connection_with_tables, path) + assert len(get_all_file_mappings(connection_with_tables)) == rules_before + + link_existing_rules_to_file(connection_with_tables, Path(path)) + assert len(get_all_file_mappings(connection_with_tables)) == rules_before + 1 + + +def test_overwrite_permissions_from_file(connection_with_tables: sqlite3.Connection): + for f in ["a.txt", "b.txt", "c.txt"]: + insert_file_mock(connection_with_tables, f"user@example.org/test2/{f}") + + yaml_string = """ + - permissions: read + path: a.txt + user: user@example.org + + - permissions: write + path: b.txt + user: user@example.org + + - permissions: write + path: z.txt + user: "*" + type: disallow + """ + file_path = f"user@example.org/test2/{PERM_FILE}" + file = SyftPermission.from_string(yaml_string, file_path) + set_rules_for_permfile(connection_with_tables, file) + connection_with_tables.commit() + written_rules = get_rules_for_permfile(connection_with_tables, file) + # print all the tables + print_table(connection_with_tables, "rules") + print_table(connection_with_tables, "rule_files") + print_table(connection_with_tables, "file_metadata") + + permissions = [x.permissions for x in written_rules] + users = [x.user for x in written_rules] + allows = [x.allow for x in written_rules] + assert len(written_rules) == 3 + assert permissions == [ + [PermissionType.READ], + [PermissionType.WRITE], + [PermissionType.WRITE], + ] + assert users == ["user@example.org", "user@example.org", "*"] + assert allows == [True, True, False] + + assert ( + len( + [ + x + for x in get_all_file_mappings(connection_with_tables) + if x["permfile_path"] == str(file.relative_filepath) + ] + ) + == 2 + ) + + # overwrite + yaml_string = """ + - permissions: read + path: a.txt + user: user@example.org + + - permissions: create + path: x.txt + user: user@example.org + + - permissions: create + path: z.txt + user: "*" + type: disallow + + - permissions: create + path: d.txt + user: "*" + """ + + file_path = f"user@example.org/test2/{PERM_FILE}" + file = SyftPermission.from_string(yaml_string, file_path) + set_rules_for_permfile(connection_with_tables, file) + connection_with_tables.commit() + new_existing_rules = get_rules_for_permfile(connection_with_tables, file) + paths = [x.path for x in new_existing_rules] + permissions = [x.permissions for x in new_existing_rules] + users = [x.user for x in new_existing_rules] + allows = [x.allow for x in new_existing_rules] + assert len(new_existing_rules) == 4 + assert paths == ["a.txt", "x.txt", "z.txt", "d.txt"] + assert permissions == [ + [PermissionType.READ], + [PermissionType.CREATE], + [PermissionType.CREATE], + [PermissionType.CREATE], + ] + assert users == ["user@example.org", "user@example.org", "*", "*"] + assert allows == [True, True, False, True] + assert len(get_all_file_mappings(connection_with_tables)) == 1 + + +def test_computed_permissions(connection_with_tables: sqlite3.Connection): + for f in ["a.txt", "b.txt", "c.txt"]: + insert_file_mock(connection_with_tables, f"user@example.org/test2/{f}") + + # overwrite + yaml_string = """ + - permissions: read + path: a.txt + user: user@example.org + + - permissions: create + path: x.txt + user: user@example.org + + - permissions: create + path: z.txt + user: "*" + type: disallow + + - permissions: create + path: d.txt + user: "*" + """ + + file_path = f"user@example.org/test2/{PERM_FILE}" + file = SyftPermission.from_string(yaml_string, file_path) + set_rules_for_permfile(connection_with_tables, file) + connection_with_tables.commit() + + computed_permission = computed_permission_for_user_and_path( + connection_with_tables, "user@example.org", Path("user@example.org/test2/a.txt") + ) + assert computed_permission.has_permission(PermissionType.READ) + + +def test_get_all_read_permissions_for_user_default( + connection_with_tables: sqlite3.Connection, +): + # Clear existing data + cursor = connection_with_tables.cursor() + + # Insert some example file metadata + insert_file_metadata(cursor=cursor, fileid=1, path="user@example.org/test2/a.txt") + + connection_with_tables.commit() + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "user2@example.org")] + + assert len(res) == 1 + assert res[0]["path"] == "user@example.org/test2/a.txt" + assert not res[0]["read_permission"] + + +def test_get_all_read_permissions_for_owner( + connection_with_tables: sqlite3.Connection, +): + # Clear existing data + cursor = connection_with_tables.cursor() + + # Insert some example file metadata + insert_file_metadata(cursor=cursor, fileid=1, path="user@example.org/test2/a.txt") + + connection_with_tables.commit() + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "user@example.org")] + + assert len(res) == 1 + assert res[0]["path"] == "user@example.org/test2/a.txt" + assert res[0]["read_permission"] + + +def test_single_read_permission(connection_with_tables: sqlite3.Connection): + cursor = connection_with_tables.cursor() + path = "user@example.org/test2/a.txt" + insert_file_metadata(cursor=cursor, fileid=1, path=path) + + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + fileid=1, + ) + + connection_with_tables.commit() + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "user@example.org")] + + assert len(res) == 1 + assert res[0]["path"] == path + assert res[0]["read_permission"] + + +def test_single_admin_permission(connection_with_tables: sqlite3.Connection): + cursor = connection_with_tables.cursor() + path = "user@example.org/test2/a.txt" + insert_file_metadata(cursor=cursor, fileid=1, path=path) + + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=False, + admin=True, + disallow=False, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + fileid=1, + ) + + connection_with_tables.commit() + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "user@example.org")] + + assert len(res) == 1 + assert res[0]["path"] == path + assert res[0]["read_permission"] + + +def test_disallow_permission(connection_with_tables: sqlite3.Connection): + cursor = connection_with_tables.cursor() + + insert_file_metadata(cursor=cursor, fileid=1, path="user@example.org/test2/a.txt") + + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=2, + path="*", + user="*", + can_read=True, + admin=False, + disallow=True, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + fileid=1, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=2, + fileid=1, + ) + + connection_with_tables.commit() + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "user2@example.org")] + + # Print all rule mappings + cursor.execute("SELECT * FROM rule_files") + print("\nRule Mappings:") + for row in cursor.fetchall(): + print(dict(row)) + + # Print all rules + cursor.execute("SELECT * FROM rules") + print("\nRules:") + for row in cursor.fetchall(): + print(dict(row)) + + assert len(res) == 1 + assert res[0]["path"] == "user@example.org/test2/a.txt" + assert not res[0]["read_permission"] + + +def test_inheritance(connection_with_tables: sqlite3.Connection): + cursor = connection_with_tables.cursor() + insert_file_metadata(cursor=cursor, fileid=1, path="user@example.org/test2/subdir/a.txt") + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=2, + path="subdir/*", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + insert_rule( + cursor=cursor, + permfile_path=f"user@example.org/test2/subdir/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=True, + admin=False, + disallow=True, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=1, + fileid=1, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/{PERM_FILE}", + priority=2, + fileid=1, + ) + + insert_rule_files( + cursor=cursor, + permfile_path=f"user@example.org/test2/subdir/{PERM_FILE}", + priority=1, + fileid=1, + ) + + connection_with_tables.commit() + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "otheruser@example.org")] + + assert len(res) == 1 + assert res[0]["path"] == "user@example.org/test2/subdir/a.txt" + assert not res[0]["read_permission"] + + +def test_for_email(connection_with_tables: sqlite3.Connection): + cursor = connection_with_tables.cursor() + # Insert file metadata for specific user email + insert_file_metadata(cursor=cursor, fileid=1, path="alice@example.org/test/bob@example.org/data.txt") + + # Insert rule with {useremail} placeholder + insert_rule( + cursor=cursor, + permfile_path=f"alice@example.org/test/{PERM_FILE}", + priority=1, + path="{useremail}/data.txt", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + # Insert rule_file mapping that only applies for specific email + insert_rule_files( + cursor=cursor, + permfile_path=f"alice@example.org/test/{PERM_FILE}", + priority=1, + fileid=1, + match_for_email="bob@example.org", + ) + + connection_with_tables.commit() + + # Check that bob@example.org has read permission + res = [dict(x) for x in get_read_permissions_for_user(connection_with_tables, "bob@example.org")] + assert len(res) == 1 + assert res[0]["path"] == "alice@example.org/test/bob@example.org/data.txt" + assert res[0]["read_permission"] + + +def test_like_clause(connection_with_tables: sqlite3.Connection): + cursor = connection_with_tables.cursor() + # Insert file metadata for specific user email + insert_file_metadata(cursor=cursor, fileid=1, path="alice@example.org/data.txt") + insert_file_metadata(cursor=cursor, fileid=2, path="bob@example.org/data.txt") + + insert_rule( + cursor=cursor, + permfile_path=f"alice@example.org/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + insert_rule( + cursor=cursor, + permfile_path=f"bob@example.org/{PERM_FILE}", + priority=1, + path="*", + user="*", + can_read=True, + admin=False, + disallow=False, + ) + + # Insert rule_file mapping that only applies for specific email + insert_rule_files( + cursor=cursor, + permfile_path=f"alice@example.org/{PERM_FILE}", + priority=1, + fileid=1, + ) + # Insert rule_file mapping that only applies for specific email + insert_rule_files( + cursor=cursor, + permfile_path=f"bob@example.org/{PERM_FILE}", + priority=1, + fileid=2, + ) + + connection_with_tables.commit() + + # Check like clause + res = [ + dict(x) + for x in get_read_permissions_for_user( + connection_with_tables, "bob@example.org", path_like="alice@example.org/" + ) + ] + + assert len(res) == 1 + assert res[0]["path"] == "alice@example.org/data.txt" + assert res[0]["read_permission"] diff --git a/packages/syftbox/tests/unit/server/settings_test.py b/packages/syftbox/tests/unit/server/settings_test.py new file mode 100644 index 00000000000..97046ae0c9c --- /dev/null +++ b/packages/syftbox/tests/unit/server/settings_test.py @@ -0,0 +1,14 @@ +import os +from pathlib import Path + +from syftbox.server.settings import ServerSettings + + +def test_server_settings_from_env(): + os.environ["SYFTBOX_DATA_FOLDER"] = "data_folder" + + settings = ServerSettings() + # must be absolute! + assert settings.data_folder == Path("data_folder").resolve() + assert settings.snapshot_folder == Path("data_folder/snapshot").resolve() + assert settings.user_file_path == Path("data_folder/users.json").resolve() diff --git a/packages/syftbox/tests/unit/server/sync_endpoint_test.py b/packages/syftbox/tests/unit/server/sync_endpoint_test.py new file mode 100644 index 00000000000..a4d1d6af85c --- /dev/null +++ b/packages/syftbox/tests/unit/server/sync_endpoint_test.py @@ -0,0 +1,266 @@ +import base64 +import hashlib +from pathlib import Path + +import py_fast_rsync +import pytest +import yaml +from fastapi.testclient import TestClient +from py_fast_rsync import signature + +from syftbox.client.exceptions import SyftServerError +from syftbox.client.server_client import SyncClient +from syftbox.lib.constants import PERM_FILE +from syftbox.server.models.sync_models import ApplyDiffResponse, DiffResponse, FileMetadata +from tests.unit.server.conftest import TEST_DATASITE_NAME, TEST_FILE + + +def test_get_diff_2(client: TestClient): + local_data = b"This is my local data" + sig = signature.calculate(local_data) + sig_b85 = base64.b85encode(sig).decode("utf-8") + response = client.post( + "/sync/get_diff", + json={ + "path": f"{TEST_DATASITE_NAME}/{TEST_FILE}", + "signature": sig_b85, + }, + ) + + response.raise_for_status() + diff_response = DiffResponse.model_validate(response.json()) + remote_diff = diff_response.diff_bytes + probably_remote_data = py_fast_rsync.apply(local_data, remote_diff) + + server_settings = client.app_state["server_settings"] + file_server_contents = server_settings.read(f"{TEST_DATASITE_NAME}/{TEST_FILE}") + assert file_server_contents == probably_remote_data + + +def file_digest(file_path, algorithm="sha256"): + # because this doesnt work in python <=3.10, we implement it manually + hash_func = hashlib.new(algorithm) + + with open(file_path, "rb") as file: + # Read the file in chunks to handle large files efficiently + for chunk in iter(lambda: file.read(4096), b""): + hash_func.update(chunk) + + return hash_func.hexdigest() + + +def test_syft_client_push_flow(client: TestClient): + response = client.post( + "/sync/get_metadata", + json={"path": f"{TEST_DATASITE_NAME}/{TEST_FILE}"}, + ) + + response.raise_for_status() + server_signature_b85 = response.json()["signature"] + server_signature = base64.b85decode(server_signature_b85) + assert server_signature + + local_data = b"This is my local data" + delta = py_fast_rsync.diff(server_signature, local_data) + delta_b85 = base64.b85encode(delta).decode("utf-8") + expected_hash = hashlib.sha256(local_data).hexdigest() + + response = client.post( + "/sync/apply_diff", + json={ + "path": f"{TEST_DATASITE_NAME}/{TEST_FILE}", + "diff": delta_b85, + "expected_hash": expected_hash, + }, + ) + + response.raise_for_status() + + result = response.json() + snapshot_folder = client.app_state["server_settings"].snapshot_folder + sha256local = file_digest(f"{snapshot_folder}/{TEST_DATASITE_NAME}/{TEST_FILE}", "sha256") + assert result["current_hash"] == expected_hash == sha256local + + +def test_get_remote_state(sync_client: SyncClient): + metadata = sync_client.get_remote_state(Path(TEST_DATASITE_NAME)) + + assert len(metadata) == 3 + + +def test_get_metadata(sync_client: SyncClient): + metadata = sync_client.get_metadata(Path(TEST_DATASITE_NAME) / TEST_FILE) + assert metadata.path == Path(TEST_DATASITE_NAME) / TEST_FILE + + # check serde works + assert isinstance(metadata.hash_bytes, bytes) + assert isinstance(metadata.signature_bytes, bytes) + + +def test_apply_diff(sync_client: SyncClient): + local_data = b"This is my local data" + + remote_metadata = sync_client.get_metadata(Path(TEST_DATASITE_NAME) / TEST_FILE) + + diff = py_fast_rsync.diff(remote_metadata.signature_bytes, local_data) + expected_hash = hashlib.sha256(local_data).hexdigest() + + # Apply local_data to server + response = sync_client.apply_diff(Path(TEST_DATASITE_NAME) / TEST_FILE, diff, expected_hash) + assert response.current_hash == expected_hash + + # check file was written correctly + snapshot_folder = sync_client.conn.app_state["server_settings"].snapshot_folder + snapshot_file_path = snapshot_folder / Path(TEST_DATASITE_NAME) / TEST_FILE + remote_data = snapshot_file_path.read_bytes() + assert local_data == remote_data + + # another diff with incorrect hash + remote_metadata = sync_client.get_metadata(Path(TEST_DATASITE_NAME) / TEST_FILE) + diff = py_fast_rsync.diff(remote_metadata.signature_bytes, local_data) + wrong_hash = "wrong_hash" + + with pytest.raises(SyftServerError): + sync_client.apply_diff(Path(TEST_DATASITE_NAME) / TEST_FILE, diff, wrong_hash) + + +def test_get_diff(sync_client: SyncClient): + local_data = b"This is my local data" + sig = signature.calculate(local_data) + + file_path = Path(TEST_DATASITE_NAME) / TEST_FILE + response = sync_client.get_diff(file_path, sig) + assert response.path == file_path + + # apply and check hash + new_data = py_fast_rsync.apply(local_data, base64.b85decode(response.diff)) + new_hash = hashlib.sha256(new_data).hexdigest() + + assert new_hash == response.hash + + # diff nonexistent file + file_path = Path(TEST_DATASITE_NAME) / "nonexistent_file.txt" + with pytest.raises(SyftServerError): + sync_client.get_diff(file_path, sig) + + +def test_delete_file(sync_client: SyncClient): + sync_client.delete(Path(TEST_DATASITE_NAME) / TEST_FILE) + + snapshot_folder = sync_client.conn.app_state["server_settings"].snapshot_folder + path = Path(f"{snapshot_folder}/{TEST_DATASITE_NAME}/{TEST_FILE}") + assert not path.exists() + + with pytest.raises(SyftServerError): + sync_client.get_metadata(Path(TEST_DATASITE_NAME) / TEST_FILE) + + +def test_create_file(sync_client: SyncClient): + snapshot_folder = sync_client.conn.app_state["server_settings"].snapshot_folder + new_fname = "new.txt" + contents = b"Some content" + path = Path(f"{snapshot_folder}/{TEST_DATASITE_NAME}/{new_fname}") + assert not path.exists() + + with open(path, "wb") as f: + f.write(contents) + + with open(path, "rb") as f: + sync_client.create(relative_path=Path(TEST_DATASITE_NAME) / new_fname, data=f.read()) + assert path.exists() + + +def test_create_permfile(sync_client: SyncClient): + invalid_contents = b"wrong permfile" + folder = "test" + relative_path = Path(TEST_DATASITE_NAME) / folder / PERM_FILE + + # invalid + with pytest.raises(SyftServerError): + sync_client.create(relative_path=relative_path, data=invalid_contents) + + # valid + valid_contents = yaml.safe_dump( + [ + { + "path": "a", + "user": "*", + "permissions": ["write"], + } + ] + ).encode() + sync_client.create(relative_path=relative_path, data=valid_contents) + + +def test_update_permfile_success(sync_client: SyncClient): + local_data = yaml.safe_dump( + [ + { + "path": "a", + "user": "*", + "permissions": ["write"], + } + ] + ).encode() + + remote_metadata = sync_client.get_metadata(Path(TEST_DATASITE_NAME) / PERM_FILE) + + diff = py_fast_rsync.diff(remote_metadata.signature_bytes, local_data) + expected_hash = hashlib.sha256(local_data).hexdigest() + + response = sync_client.apply_diff(Path(TEST_DATASITE_NAME) / PERM_FILE, diff, expected_hash) + assert isinstance(response, ApplyDiffResponse) + + +def test_update_permfile_failure(sync_client: SyncClient): + local_data = ( + b'3gwrehtytrterfewdw ["x@x.org"], "read": ["x@x.org"], "write": ["x@x.org"], "filepath": "~/syftperm.yaml",}' + ) + + remote_metadata = sync_client.get_metadata(Path(TEST_DATASITE_NAME) / PERM_FILE) + + diff = py_fast_rsync.diff(remote_metadata.signature_bytes, local_data) + expected_hash = hashlib.sha256(local_data).hexdigest() + + with pytest.raises(SyftServerError): + sync_client.apply_diff(Path(TEST_DATASITE_NAME) / PERM_FILE, diff, expected_hash) + + +def test_list_datasites(client: TestClient): + response = client.post("/sync/datasites") + + response.raise_for_status() + + +def test_get_all_datasite_states(sync_client: SyncClient): + response = sync_client.get_datasite_states() + assert len(response) == 1 + + metadatas = response[TEST_DATASITE_NAME] + assert len(metadatas) == 3 + assert all(isinstance(m, FileMetadata) for m in metadatas) + + +def test_download_snapshot(sync_client: SyncClient, tmpdir: Path): + tmpdir = Path(tmpdir) + metadata = sync_client.get_remote_state(Path(TEST_DATASITE_NAME)) + paths = [m.path for m in metadata] + filelist = sync_client.download_files_streaming(paths, tmpdir) + assert len(filelist) == 3 + + +def test_whoami(client: TestClient): + response = client.post("/auth/whoami") + response.raise_for_status() + assert response.json() == {"email": TEST_DATASITE_NAME} + + +def test_large_file_failure(client: TestClient): + large_data = b"0" * 1024 * 1024 * 11 # 11MB + response = client.post( + "/sync/create", + files={"file": ("large.txt", large_data, "text/plain")}, + ) + + assert response.status_code == 413 + assert response.text == "Request Denied. Message size is greater than 10 MB" diff --git a/packages/syftbox/tox.ini b/packages/syftbox/tox.ini new file mode 100644 index 00000000000..4e9fffeeaec --- /dev/null +++ b/packages/syftbox/tox.ini @@ -0,0 +1,67 @@ +[tox] +envlist = + syft.test.unit + syft.jupyter +requires = + # `pip install tox tox-uv` OR `uv tool install tox --with tox-uv` + tox-uv + +[testenv] +runner = uv-venv-lock-runner +with_dev = True +allowlist_externals = + pytest +commands = + python --version +setenv = + UV_HTTP_TIMEOUT = 600 + +[testenv:syft.test.unit] +description = Syft Unit Tests +commands = + uv --version + python --version + pytest --version + python -m compileall -qf ./syftbox ./default_apps ./tests + pytest -n auto --disable-warnings \ + --cov=syftbox --cov-fail-under=50 \ + --cov-report term --cov-report html \ + --timeout=90 --durations=5 \ + tests/unit + +[testenv:syft.test.integration] +description = Syft Integration Tests +commands = + uv --version + python --version + pytest --version + pytest -n auto --disable-warnings \ + tests/integration -vv + +[testenv:syft.test.stress] +description = Syft Stress Tests +changedir = {toxinidir}/tests/stress +commands = + locust + +[testenv:syft.jupyter] +description = Jupyter Notebook with Editable Syft +allowlist_externals = + just +commands = + just run-jupyter {posargs} + +[mypy] +python_version = 3.12 +files = syftbox/ +ignore_missing_imports = True +scripts_are_modules = True +disallow_incomplete_defs = True +no_implicit_optional = True +warn_unused_ignores = True +warn_redundant_casts = True +strict_equality = True +warn_unreachable = True +disallow_untyped_defs = True +disallow_untyped_calls = True +namespace_packages = True diff --git a/packages/syftbox/uv.lock b/packages/syftbox/uv.lock new file mode 100644 index 00000000000..df7bcb293b1 --- /dev/null +++ b/packages/syftbox/uv.lock @@ -0,0 +1,3296 @@ +version = 1 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version <= '3.11'", + "python_full_version > '3.11' and python_full_version <= '3.12'", + "python_full_version > '3.12' and python_full_version <= '3.13'", + "python_full_version > '3.13'", +] + +[[package]] +name = "aiofiles" +version = "24.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896 }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/55/e4373e888fdacb15563ef6fa9fa8c8252476ea071e96fb46defac9f18bf2/aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745", size = 21977 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/74/fbb6559de3607b3300b9be3cc64e97548d55678e44623db17820dbd20002/aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8", size = 14756 }, +] + +[[package]] +name = "aiohttp" +version = "3.11.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/c4/3b5a937b16f6c2a0ada842a9066aad0b7a5708427d4a202a07bf09c67cbb/aiohttp-3.11.10.tar.gz", hash = "sha256:b1fc6b45010a8d0ff9e88f9f2418c6fd408c99c211257334aff41597ebece42e", size = 7668832 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/f2/ba44492f257a296c4bb910bf47acf41672421fd455540911b3f13d10d6cd/aiohttp-3.11.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cbad88a61fa743c5d283ad501b01c153820734118b65aee2bd7dbb735475ce0d", size = 708322 }, + { url = "https://files.pythonhosted.org/packages/2b/c7/22b0ed548c8660e978e736671f166907fb272d0a4281b2b6833310bce529/aiohttp-3.11.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80886dac673ceaef499de2f393fc80bb4481a129e6cb29e624a12e3296cc088f", size = 468211 }, + { url = "https://files.pythonhosted.org/packages/c9/0b/d326251888bb86ff7cb00b171e1cf3b0f0ed695622857f84a98bbc5f254b/aiohttp-3.11.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61b9bae80ed1f338c42f57c16918853dc51775fb5cb61da70d590de14d8b5fb4", size = 455370 }, + { url = "https://files.pythonhosted.org/packages/4e/83/28feef5a0bda728adf76e0d076566c26c6da3d29f0ccd998d07c260cae9d/aiohttp-3.11.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e2e576caec5c6a6b93f41626c9c02fc87cd91538b81a3670b2e04452a63def6", size = 1584399 }, + { url = "https://files.pythonhosted.org/packages/dc/97/6bdd39c4134ef243ffa9fd19a072ac9a0758d64b6d51eaaaaa34e67b8bcb/aiohttp-3.11.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02c13415b5732fb6ee7ff64583a5e6ed1c57aa68f17d2bda79c04888dfdc2769", size = 1632131 }, + { url = "https://files.pythonhosted.org/packages/1b/f1/8c3a1623b9d526986f03d8158c9c856e00531217998275cc6b4a14b2fb85/aiohttp-3.11.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfce37f31f20800a6a6620ce2cdd6737b82e42e06e6e9bd1b36f546feb3c44f", size = 1668081 }, + { url = "https://files.pythonhosted.org/packages/9c/3e/a2f4cee0dca934b1d2c4b6a7821040ce4452b9b2e4347c9be6cb10eaa835/aiohttp-3.11.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bbbfff4c679c64e6e23cb213f57cc2c9165c9a65d63717108a644eb5a7398df", size = 1589313 }, + { url = "https://files.pythonhosted.org/packages/fd/9c/93e9a8f39c78f0c6d938721101e28c57597046f78057ffced8a3fd571839/aiohttp-3.11.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49c7dbbc1a559ae14fc48387a115b7d4bbc84b4a2c3b9299c31696953c2a5219", size = 1544349 }, + { url = "https://files.pythonhosted.org/packages/68/d2/2054efe02be87a1af92cfcaf6875d7b2c34906c3ee2b90ce82afbc8927a5/aiohttp-3.11.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68386d78743e6570f054fe7949d6cb37ef2b672b4d3405ce91fafa996f7d9b4d", size = 1529018 }, + { url = "https://files.pythonhosted.org/packages/10/b0/a258bfd5ddd3d9c871a8d24e96531cb6e6f0cd98dc3028f0b98302454b23/aiohttp-3.11.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9ef405356ba989fb57f84cac66f7b0260772836191ccefbb987f414bcd2979d9", size = 1536357 }, + { url = "https://files.pythonhosted.org/packages/76/7f/8b60b93e7dc58d371813a9b8d451b7c9c9c4350f9c505edf6fae80e0812b/aiohttp-3.11.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5d6958671b296febe7f5f859bea581a21c1d05430d1bbdcf2b393599b1cdce77", size = 1607214 }, + { url = "https://files.pythonhosted.org/packages/2a/10/97a11dba0f6d16878164b92ce75e2e0196a2fd25560cae8283388a24289b/aiohttp-3.11.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:99b7920e7165be5a9e9a3a7f1b680f06f68ff0d0328ff4079e5163990d046767", size = 1628573 }, + { url = "https://files.pythonhosted.org/packages/45/66/70419d6cb9495ddcebfa54d3db07e6a9716049ef341ded1edd8982f9b7f9/aiohttp-3.11.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0dc49f42422163efb7e6f1df2636fe3db72713f6cd94688e339dbe33fe06d61d", size = 1564058 }, + { url = "https://files.pythonhosted.org/packages/2d/d6/d94506afaea3aca15ab3f4732d666ad80acd5a035a7478aa6377c9816cf3/aiohttp-3.11.10-cp310-cp310-win32.whl", hash = "sha256:40d1c7a7f750b5648642586ba7206999650208dbe5afbcc5284bcec6579c9b91", size = 416360 }, + { url = "https://files.pythonhosted.org/packages/55/03/731d1116d09ea7a3c6be731ab0eb1faa37b844d3e54fed28e3a6785ba5ab/aiohttp-3.11.10-cp310-cp310-win_amd64.whl", hash = "sha256:68ff6f48b51bd78ea92b31079817aff539f6c8fc80b6b8d6ca347d7c02384e33", size = 441763 }, + { url = "https://files.pythonhosted.org/packages/db/7c/584d5ca19343c9462d054337828f72628e6dc204424f525df59ebfe75d1e/aiohttp-3.11.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:77c4aa15a89847b9891abf97f3d4048f3c2d667e00f8a623c89ad2dccee6771b", size = 708395 }, + { url = "https://files.pythonhosted.org/packages/cd/2d/61c33e01baeb23aebd07620ee4d780ff40f4c17c42289bf02a405f2ac312/aiohttp-3.11.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:909af95a72cedbefe5596f0bdf3055740f96c1a4baa0dd11fd74ca4de0b4e3f1", size = 468281 }, + { url = "https://files.pythonhosted.org/packages/ab/70/0ddb3a61b835068eb0badbe8016b4b65b966bad5f8af0f2d63998ff4cfa4/aiohttp-3.11.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:386fbe79863eb564e9f3615b959e28b222259da0c48fd1be5929ac838bc65683", size = 455345 }, + { url = "https://files.pythonhosted.org/packages/44/8c/4e14e9c1767d9a6ab1af1fbad9df9c77e050b39b6afe9e8343ec1ba96508/aiohttp-3.11.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3de34936eb1a647aa919655ff8d38b618e9f6b7f250cc19a57a4bf7fd2062b6d", size = 1685464 }, + { url = "https://files.pythonhosted.org/packages/ef/6e/1bab78ebb4f5a1c54f0fc10f8d52abc06816a9cb1db52b9c908e3d69f9a8/aiohttp-3.11.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c9527819b29cd2b9f52033e7fb9ff08073df49b4799c89cb5754624ecd98299", size = 1743427 }, + { url = "https://files.pythonhosted.org/packages/5d/5e/c1b03bef621a8cc51ff551ef223c6ac606fabe0e35c950f56d01423ec2aa/aiohttp-3.11.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65a96e3e03300b41f261bbfd40dfdbf1c301e87eab7cd61c054b1f2e7c89b9e8", size = 1785188 }, + { url = "https://files.pythonhosted.org/packages/7c/b8/df6d76a149cbd969a58da478baec0be617287c496c842ddf21fe6bce07b3/aiohttp-3.11.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f5635f7b74bcd4f6f72fcd85bea2154b323a9f05226a80bc7398d0c90763b0", size = 1674911 }, + { url = "https://files.pythonhosted.org/packages/ee/8e/e460e7bb820a08cec399971fc3176afc8090dc32fb941f386e0c68bc4ecc/aiohttp-3.11.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:03b6002e20938fc6ee0918c81d9e776bebccc84690e2b03ed132331cca065ee5", size = 1619570 }, + { url = "https://files.pythonhosted.org/packages/c2/ae/3b597e09eae4e75b77ee6c65443593d245bfa067ae6a5d895abaf27cce6c/aiohttp-3.11.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6362cc6c23c08d18ddbf0e8c4d5159b5df74fea1a5278ff4f2c79aed3f4e9f46", size = 1653772 }, + { url = "https://files.pythonhosted.org/packages/b8/d1/99852f2925992c4d7004e590344e5398eb163750de2a7c1fbe07f182d3c8/aiohttp-3.11.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3691ed7726fef54e928fe26344d930c0c8575bc968c3e239c2e1a04bd8cf7838", size = 1649787 }, + { url = "https://files.pythonhosted.org/packages/39/c0/ea24627e08d722d5a6a00b3f6c9763fe3ad4650b8485f7a7a56ff932e3af/aiohttp-3.11.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31d5093d3acd02b31c649d3a69bb072d539d4c7659b87caa4f6d2bcf57c2fa2b", size = 1732666 }, + { url = "https://files.pythonhosted.org/packages/f1/27/ab52dee4443ef8bdb26473b53c841caafd2bb637a8d85751694e089913bb/aiohttp-3.11.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:8b3cf2dc0f0690a33f2d2b2cb15db87a65f1c609f53c37e226f84edb08d10f52", size = 1754910 }, + { url = "https://files.pythonhosted.org/packages/cd/08/57c919d6b1f3b70bc14433c080a6152bf99454b636eb8a88552de8baaca9/aiohttp-3.11.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fbbaea811a2bba171197b08eea288b9402faa2bab2ba0858eecdd0a4105753a3", size = 1692502 }, + { url = "https://files.pythonhosted.org/packages/ae/37/015006f669275735049e0549c37cb79c7a4a9350cbee070bbccb5a5b4b8a/aiohttp-3.11.10-cp311-cp311-win32.whl", hash = "sha256:4b2c7ac59c5698a7a8207ba72d9e9c15b0fc484a560be0788b31312c2c5504e4", size = 416178 }, + { url = "https://files.pythonhosted.org/packages/cf/8d/7bb48ae503989b15114baf9f9b19398c86ae93d30959065bc061b31331ee/aiohttp-3.11.10-cp311-cp311-win_amd64.whl", hash = "sha256:974d3a2cce5fcfa32f06b13ccc8f20c6ad9c51802bb7f829eae8a1845c4019ec", size = 442269 }, + { url = "https://files.pythonhosted.org/packages/25/17/1dbe2f619f77795409c1a13ab395b98ed1b215d3e938cacde9b8ffdac53d/aiohttp-3.11.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b78f053a7ecfc35f0451d961dacdc671f4bcbc2f58241a7c820e9d82559844cf", size = 704448 }, + { url = "https://files.pythonhosted.org/packages/e3/9b/112247ad47e9d7f6640889c6e42cc0ded8c8345dd0033c66bcede799b051/aiohttp-3.11.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab7485222db0959a87fbe8125e233b5a6f01f4400785b36e8a7878170d8c3138", size = 463829 }, + { url = "https://files.pythonhosted.org/packages/8a/36/a64b583771fc673062a7a1374728a6241d49e2eda5a9041fbf248e18c804/aiohttp-3.11.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cf14627232dfa8730453752e9cdc210966490992234d77ff90bc8dc0dce361d5", size = 455774 }, + { url = "https://files.pythonhosted.org/packages/e5/75/ee1b8f510978b3de5f185c62535b135e4fc3f5a247ca0c2245137a02d800/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076bc454a7e6fd646bc82ea7f98296be0b1219b5e3ef8a488afbdd8e81fbac50", size = 1682134 }, + { url = "https://files.pythonhosted.org/packages/87/46/65e8259432d5f73ca9ebf5edb645ef90e5303724e4e52477516cb4042240/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:482cafb7dc886bebeb6c9ba7925e03591a62ab34298ee70d3dd47ba966370d2c", size = 1736757 }, + { url = "https://files.pythonhosted.org/packages/03/f6/a6d1e791b7153fb2d101278f7146c0771b0e1569c547f8a8bc3035651984/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf3d1a519a324af764a46da4115bdbd566b3c73fb793ffb97f9111dbc684fc4d", size = 1793033 }, + { url = "https://files.pythonhosted.org/packages/a8/e9/1ac90733e36e7848693aece522936a13bf17eeb617da662f94adfafc1c25/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24213ba85a419103e641e55c27dc7ff03536c4873470c2478cce3311ba1eee7b", size = 1691609 }, + { url = "https://files.pythonhosted.org/packages/6d/a6/77b33da5a0bc04566c7ddcca94500f2c2a2334eecab4885387fffd1fc600/aiohttp-3.11.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b99acd4730ad1b196bfb03ee0803e4adac371ae8efa7e1cbc820200fc5ded109", size = 1619082 }, + { url = "https://files.pythonhosted.org/packages/48/94/5bf5f927d9a2fedd2c978adfb70a3680e16f46d178361685b56244eb52ed/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:14cdb5a9570be5a04eec2ace174a48ae85833c2aadc86de68f55541f66ce42ab", size = 1641186 }, + { url = "https://files.pythonhosted.org/packages/99/2d/e85103aa01d1064e51bc50cb51e7b40150a8ff5d34e5a3173a46b241860b/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7e97d622cb083e86f18317282084bc9fbf261801b0192c34fe4b1febd9f7ae69", size = 1646280 }, + { url = "https://files.pythonhosted.org/packages/7b/e0/44651fda8c1d865a51b3a81f1956ea55ce16fc568fe7a3e05db7fc22f139/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:012f176945af138abc10c4a48743327a92b4ca9adc7a0e078077cdb5dbab7be0", size = 1701862 }, + { url = "https://files.pythonhosted.org/packages/4e/1e/0804459ae325a5b95f6f349778fb465f29d2b863e522b6a349db0aaad54c/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44224d815853962f48fe124748227773acd9686eba6dc102578defd6fc99e8d9", size = 1734373 }, + { url = "https://files.pythonhosted.org/packages/07/87/b8f6721668cad74bcc9c7cfe6d0230b304d1250196b221e54294a0d78dbe/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c87bf31b7fdab94ae3adbe4a48e711bfc5f89d21cf4c197e75561def39e223bc", size = 1694343 }, + { url = "https://files.pythonhosted.org/packages/4b/20/42813fc60d9178ba9b1b86c58a5441ddb6cf8ffdfe66387345bff173bcff/aiohttp-3.11.10-cp312-cp312-win32.whl", hash = "sha256:06a8e2ee1cbac16fe61e51e0b0c269400e781b13bcfc33f5425912391a542985", size = 411118 }, + { url = "https://files.pythonhosted.org/packages/3a/51/df9c263c861ce93998b5ad2ba3212caab2112d5b66dbe91ddbe90c41ded4/aiohttp-3.11.10-cp312-cp312-win_amd64.whl", hash = "sha256:be2b516f56ea883a3e14dda17059716593526e10fb6303189aaf5503937db408", size = 437424 }, + { url = "https://files.pythonhosted.org/packages/8c/1d/88bfdbe28a3d1ba5b94a235f188f27726caf8ade9a0e13574848f44fe0fe/aiohttp-3.11.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8cc5203b817b748adccb07f36390feb730b1bc5f56683445bfe924fc270b8816", size = 697755 }, + { url = "https://files.pythonhosted.org/packages/86/00/4c4619d6fe5c5be32f74d1422fc719b3e6cd7097af0c9e03877ca9bd4ebc/aiohttp-3.11.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ef359ebc6949e3a34c65ce20230fae70920714367c63afd80ea0c2702902ccf", size = 460440 }, + { url = "https://files.pythonhosted.org/packages/aa/1c/2f927408f50593a29465d198ec3c57c835c8602330233163e8d89c1093db/aiohttp-3.11.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9bca390cb247dbfaec3c664326e034ef23882c3f3bfa5fbf0b56cad0320aaca5", size = 452726 }, + { url = "https://files.pythonhosted.org/packages/06/6a/ff00ed0a2ba45c34b3c366aa5b0004b1a4adcec5a9b5f67dd0648ee1c88a/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811f23b3351ca532af598405db1093f018edf81368e689d1b508c57dcc6b6a32", size = 1664944 }, + { url = "https://files.pythonhosted.org/packages/02/c2/61923f2a7c2e14d7424b3a526e054f0358f57ccdf5573d4d3d033b01921a/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddf5f7d877615f6a1e75971bfa5ac88609af3b74796ff3e06879e8422729fd01", size = 1717707 }, + { url = "https://files.pythonhosted.org/packages/8a/08/0d3d074b24d377569ec89d476a95ca918443099c0401bb31b331104e35d1/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6ab29b8a0beb6f8eaf1e5049252cfe74adbaafd39ba91e10f18caeb0e99ffb34", size = 1774890 }, + { url = "https://files.pythonhosted.org/packages/e8/49/052ada2b6e90ed65f0e6a7e548614621b5f8dcd193cb9415d2e6bcecc94a/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c49a76c1038c2dd116fa443eba26bbb8e6c37e924e2513574856de3b6516be99", size = 1676945 }, + { url = "https://files.pythonhosted.org/packages/7c/9e/0c48e1a48e072a869b8b5e3920c9f6a8092861524a4a6f159cd7e6fda939/aiohttp-3.11.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f3dc0e330575f5b134918976a645e79adf333c0a1439dcf6899a80776c9ab39", size = 1602959 }, + { url = "https://files.pythonhosted.org/packages/ab/98/791f979093ff7f67f80344c182cb0ca4c2c60daed397ecaf454cc8d7a5cd/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:efb15a17a12497685304b2d976cb4939e55137df7b09fa53f1b6a023f01fcb4e", size = 1618058 }, + { url = "https://files.pythonhosted.org/packages/7b/5d/2d4b05feb3fd68eb7c8335f73c81079b56e582633b91002da695ccb439ef/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:db1d0b28fcb7f1d35600150c3e4b490775251dea70f894bf15c678fdd84eda6a", size = 1616289 }, + { url = "https://files.pythonhosted.org/packages/50/83/68cc28c00fe681dce6150614f105efe98282da19252cd6e32dfa893bb328/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:15fccaf62a4889527539ecb86834084ecf6e9ea70588efde86e8bc775e0e7542", size = 1685239 }, + { url = "https://files.pythonhosted.org/packages/16/f9/68fc5c8928f63238ce9314f04f3f59d9190a4db924998bb9be99c7aacce8/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:593c114a2221444f30749cc5e5f4012488f56bd14de2af44fe23e1e9894a9c60", size = 1715078 }, + { url = "https://files.pythonhosted.org/packages/3f/e0/3dd3f0451c532c77e35780bafb2b6469a046bc15a6ec2e039475a1d2f161/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7852bbcb4d0d2f0c4d583f40c3bc750ee033265d80598d0f9cb6f372baa6b836", size = 1672544 }, + { url = "https://files.pythonhosted.org/packages/a5/b1/3530ab040dd5d7fb016b47115016f9b3a07ea29593b0e07e53dbe06a380c/aiohttp-3.11.10-cp313-cp313-win32.whl", hash = "sha256:65e55ca7debae8faaffee0ebb4b47a51b4075f01e9b641c31e554fd376595c6c", size = 409984 }, + { url = "https://files.pythonhosted.org/packages/49/1f/deed34e9fca639a7f873d01150d46925d3e1312051eaa591c1aa1f2e6ddc/aiohttp-3.11.10-cp313-cp313-win_amd64.whl", hash = "sha256:beb39a6d60a709ae3fb3516a1581777e7e8b76933bb88c8f4420d875bb0267c6", size = 435837 }, + { url = "https://files.pythonhosted.org/packages/1f/4b/60725fcffe8af2ff2e9c0aaef20a89b11cb8fa1d453abd951e64151db4c9/aiohttp-3.11.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0580f2e12de2138f34debcd5d88894786453a76e98febaf3e8fe5db62d01c9bf", size = 709195 }, + { url = "https://files.pythonhosted.org/packages/6f/5d/81a920e34bb43cd8d6e35b68e62c2ab1597826c6511d6ec5c5c99a4595b5/aiohttp-3.11.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a55d2ad345684e7c3dd2c20d2f9572e9e1d5446d57200ff630e6ede7612e307f", size = 468687 }, + { url = "https://files.pythonhosted.org/packages/79/58/de3da0f281460c3c415b2d1fe0d09137612dfcd7d0070837df14f9f3ef9f/aiohttp-3.11.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04814571cb72d65a6899db6099e377ed00710bf2e3eafd2985166f2918beaf59", size = 455744 }, + { url = "https://files.pythonhosted.org/packages/99/cc/18d24ffb6b33071e295707ee5b0133bea46bc84b5c0c0606586855ed69bc/aiohttp-3.11.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e44a9a3c053b90c6f09b1bb4edd880959f5328cf63052503f892c41ea786d99f", size = 1587161 }, + { url = "https://files.pythonhosted.org/packages/19/66/4430ef0ba5c88559bc18abeda095fce0225e4fae618c7de0ed6d952ffc47/aiohttp-3.11.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:502a1464ccbc800b4b1995b302efaf426e8763fadf185e933c2931df7db9a199", size = 1636007 }, + { url = "https://files.pythonhosted.org/packages/0f/b8/10a83d1d0dc9b90c461a58041d8bb0b00f68c6cf07fedf74f1a171383cfa/aiohttp-3.11.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:613e5169f8ae77b1933e42e418a95931fb4867b2991fc311430b15901ed67079", size = 1672088 }, + { url = "https://files.pythonhosted.org/packages/5b/cc/7a8fadec9610b11af3c65944666e0702c5a8a8f5632c60b2b198c6180a45/aiohttp-3.11.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cca22a61b7fe45da8fc73c3443150c3608750bbe27641fc7558ec5117b27fdf", size = 1589287 }, + { url = "https://files.pythonhosted.org/packages/16/12/62f6058e0a9cf09a14a002594da02134ee1eb6cd404e1e379034f38cf589/aiohttp-3.11.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86a5dfcc39309470bd7b68c591d84056d195428d5d2e0b5ccadfbaf25b026ebc", size = 1543823 }, + { url = "https://files.pythonhosted.org/packages/b9/ef/fc5bfe84911484092026f6399dfa7227f3d1839e416b9b3c121a7fbcabfb/aiohttp-3.11.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:77ae58586930ee6b2b6f696c82cf8e78c8016ec4795c53e36718365f6959dc82", size = 1529984 }, + { url = "https://files.pythonhosted.org/packages/91/b0/c491bd8509501f5fb83795df2363544ac7aaa35be842f4d7fd5e83beed0d/aiohttp-3.11.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:78153314f26d5abef3239b4a9af20c229c6f3ecb97d4c1c01b22c4f87669820c", size = 1535846 }, + { url = "https://files.pythonhosted.org/packages/bb/8c/f9cd0e127b7b0044138f57ab531fbfac6a8786e6bbcfdee0fbf254ddfefd/aiohttp-3.11.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:98283b94cc0e11c73acaf1c9698dea80c830ca476492c0fe2622bd931f34b487", size = 1606682 }, + { url = "https://files.pythonhosted.org/packages/c5/56/ac432399cb7f9ab1babd8b41c24edde58a35cc9736dacafcb9c582a26c0f/aiohttp-3.11.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:53bf2097e05c2accc166c142a2090e4c6fd86581bde3fd9b2d3f9e93dda66ac1", size = 1628868 }, + { url = "https://files.pythonhosted.org/packages/73/73/69b6568b0774ef5905fe69d4e53c7602c5454550dbb927f002f21d9a28fb/aiohttp-3.11.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5532f0441fc09c119e1dca18fbc0687e64fbeb45aa4d6a87211ceaee50a74c4", size = 1567107 }, + { url = "https://files.pythonhosted.org/packages/f8/8a/34d119e6513179d7d5a7f5bdacf3a775445837c78b3b5f323e6413a88188/aiohttp-3.11.10-cp39-cp39-win32.whl", hash = "sha256:47ad15a65fb41c570cd0ad9a9ff8012489e68176e7207ec7b82a0940dddfd8be", size = 416669 }, + { url = "https://files.pythonhosted.org/packages/a5/fb/c5b72bb6fa02660447fdfd0d8aa77fab3c64cf3690b4d7fe490ced18c57a/aiohttp-3.11.10-cp39-cp39-win_amd64.whl", hash = "sha256:c6b9e6d7e41656d78e37ce754813fa44b455c3d0d0dced2a047def7dc5570b74", size = 441979 }, +] + +[[package]] +name = "aiohttp-jinja2" +version = "1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "jinja2" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/39/da5a94dd89b1af7241fb7fc99ae4e73505b5f898b540b6aba6dc7afe600e/aiohttp-jinja2-1.6.tar.gz", hash = "sha256:a3a7ff5264e5bca52e8ae547bbfd0761b72495230d438d05b6c0915be619b0e2", size = 53057 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/90/65238d4246307195411b87a07d03539049819b022c01bcc773826f600138/aiohttp_jinja2-1.6-py3-none-any.whl", hash = "sha256:0df405ee6ad1b58e5a068a105407dc7dcc1704544c559f1938babde954f945c7", size = 11736 }, +] + +[[package]] +name = "aiosignal" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597 }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "anyio" +version = "4.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/40/318e58f669b1a9e00f5c4453910682e2d9dd594334539c7b7817dabb765f/anyio-4.7.0.tar.gz", hash = "sha256:2f834749c602966b7d456a7567cafcb309f96482b5081d14ac93ccd457f9dd48", size = 177076 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/7a/4daaf3b6c08ad7ceffea4634ec206faeff697526421c20f07628c7372156/anyio-4.7.0-py3-none-any.whl", hash = "sha256:ea60c3723ab42ba6fff7e8ccb0488c898ec538ff4df1f1d5e642c3601d07e352", size = 93052 }, +] + +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321 }, +] + +[[package]] +name = "asgiref" +version = "3.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/38/b3395cc9ad1b56d2ddac9970bc8f4141312dbaec28bc7c218b0dfafd0f42/asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590", size = 35186 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/e3/893e8757be2612e6c266d9bb58ad2e3651524b5b40cf56761e985a28b13e/asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47", size = 23828 }, +] + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + +[[package]] +name = "async-timeout" +version = "5.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/ae/136395dfbfe00dfc94da3f3e136d0b13f394cba8f4841120e34226265780/async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3", size = 9274 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233 }, +] + +[[package]] +name = "attrs" +version = "24.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397 }, +] + +[[package]] +name = "blinker" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458 }, +] + +[[package]] +name = "bracex" +version = "2.5.post1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/6c/57418c4404cd22fe6275b8301ca2b46a8cdaa8157938017a9ae0b3edf363/bracex-2.5.post1.tar.gz", hash = "sha256:12c50952415bfa773d2d9ccb8e79651b8cdb1f31a42f6091b804f6ba2b4a66b6", size = 26641 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/02/8db98cdc1a58e0abd6716d5e63244658e6e63513c65f469f34b6f1053fd0/bracex-2.5.post1-py3-none-any.whl", hash = "sha256:13e5732fec27828d6af308628285ad358047cec36801598368cb28bc631dbaf6", size = 11558 }, +] + +[[package]] +name = "brotli" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/c2/f9e977608bdf958650638c3f1e28f85a1b075f075ebbe77db8555463787b/Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724", size = 7372270 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/3a/dbf4fb970c1019a57b5e492e1e0eae745d32e59ba4d6161ab5422b08eefe/Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752", size = 873045 }, + { url = "https://files.pythonhosted.org/packages/dd/11/afc14026ea7f44bd6eb9316d800d439d092c8d508752055ce8d03086079a/Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9", size = 446218 }, + { url = "https://files.pythonhosted.org/packages/36/83/7545a6e7729db43cb36c4287ae388d6885c85a86dd251768a47015dfde32/Brotli-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3", size = 2903872 }, + { url = "https://files.pythonhosted.org/packages/32/23/35331c4d9391fcc0f29fd9bec2c76e4b4eeab769afbc4b11dd2e1098fb13/Brotli-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d", size = 2941254 }, + { url = "https://files.pythonhosted.org/packages/3b/24/1671acb450c902edb64bd765d73603797c6c7280a9ada85a195f6b78c6e5/Brotli-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e", size = 2857293 }, + { url = "https://files.pythonhosted.org/packages/d5/00/40f760cc27007912b327fe15bf6bfd8eaecbe451687f72a8abc587d503b3/Brotli-1.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da", size = 3002385 }, + { url = "https://files.pythonhosted.org/packages/b8/cb/8aaa83f7a4caa131757668c0fb0c4b6384b09ffa77f2fba9570d87ab587d/Brotli-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80", size = 2911104 }, + { url = "https://files.pythonhosted.org/packages/bc/c4/65456561d89d3c49f46b7fbeb8fe6e449f13bdc8ea7791832c5d476b2faf/Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d", size = 2809981 }, + { url = "https://files.pythonhosted.org/packages/05/1b/cf49528437bae28abce5f6e059f0d0be6fecdcc1d3e33e7c54b3ca498425/Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0", size = 2935297 }, + { url = "https://files.pythonhosted.org/packages/81/ff/190d4af610680bf0c5a09eb5d1eac6e99c7c8e216440f9c7cfd42b7adab5/Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e", size = 2930735 }, + { url = "https://files.pythonhosted.org/packages/80/7d/f1abbc0c98f6e09abd3cad63ec34af17abc4c44f308a7a539010f79aae7a/Brotli-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5dab0844f2cf82be357a0eb11a9087f70c5430b2c241493fc122bb6f2bb0917c", size = 2933107 }, + { url = "https://files.pythonhosted.org/packages/34/ce/5a5020ba48f2b5a4ad1c0522d095ad5847a0be508e7d7569c8630ce25062/Brotli-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4fe605b917c70283db7dfe5ada75e04561479075761a0b3866c081d035b01c1", size = 2845400 }, + { url = "https://files.pythonhosted.org/packages/44/89/fa2c4355ab1eecf3994e5a0a7f5492c6ff81dfcb5f9ba7859bd534bb5c1a/Brotli-1.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1e9a65b5736232e7a7f91ff3d02277f11d339bf34099a56cdab6a8b3410a02b2", size = 3031985 }, + { url = "https://files.pythonhosted.org/packages/af/a4/79196b4a1674143d19dca400866b1a4d1a089040df7b93b88ebae81f3447/Brotli-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:58d4b711689366d4a03ac7957ab8c28890415e267f9b6589969e74b6e42225ec", size = 2927099 }, + { url = "https://files.pythonhosted.org/packages/e9/54/1c0278556a097f9651e657b873ab08f01b9a9ae4cac128ceb66427d7cd20/Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2", size = 333172 }, + { url = "https://files.pythonhosted.org/packages/f7/65/b785722e941193fd8b571afd9edbec2a9b838ddec4375d8af33a50b8dab9/Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128", size = 357255 }, + { url = "https://files.pythonhosted.org/packages/96/12/ad41e7fadd5db55459c4c401842b47f7fee51068f86dd2894dd0dcfc2d2a/Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc", size = 873068 }, + { url = "https://files.pythonhosted.org/packages/95/4e/5afab7b2b4b61a84e9c75b17814198ce515343a44e2ed4488fac314cd0a9/Brotli-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6", size = 446244 }, + { url = "https://files.pythonhosted.org/packages/9d/e6/f305eb61fb9a8580c525478a4a34c5ae1a9bcb12c3aee619114940bc513d/Brotli-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd", size = 2906500 }, + { url = "https://files.pythonhosted.org/packages/3e/4f/af6846cfbc1550a3024e5d3775ede1e00474c40882c7bf5b37a43ca35e91/Brotli-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf", size = 2943950 }, + { url = "https://files.pythonhosted.org/packages/b3/e7/ca2993c7682d8629b62630ebf0d1f3bb3d579e667ce8e7ca03a0a0576a2d/Brotli-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61", size = 2918527 }, + { url = "https://files.pythonhosted.org/packages/b3/96/da98e7bedc4c51104d29cc61e5f449a502dd3dbc211944546a4cc65500d3/Brotli-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327", size = 2845489 }, + { url = "https://files.pythonhosted.org/packages/e8/ef/ccbc16947d6ce943a7f57e1a40596c75859eeb6d279c6994eddd69615265/Brotli-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd", size = 2914080 }, + { url = "https://files.pythonhosted.org/packages/80/d6/0bd38d758d1afa62a5524172f0b18626bb2392d717ff94806f741fcd5ee9/Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9", size = 2813051 }, + { url = "https://files.pythonhosted.org/packages/14/56/48859dd5d129d7519e001f06dcfbb6e2cf6db92b2702c0c2ce7d97e086c1/Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265", size = 2938172 }, + { url = "https://files.pythonhosted.org/packages/3d/77/a236d5f8cd9e9f4348da5acc75ab032ab1ab2c03cc8f430d24eea2672888/Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8", size = 2933023 }, + { url = "https://files.pythonhosted.org/packages/f1/87/3b283efc0f5cb35f7f84c0c240b1e1a1003a5e47141a4881bf87c86d0ce2/Brotli-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c247dd99d39e0338a604f8c2b3bc7061d5c2e9e2ac7ba9cc1be5a69cb6cd832f", size = 2935871 }, + { url = "https://files.pythonhosted.org/packages/f3/eb/2be4cc3e2141dc1a43ad4ca1875a72088229de38c68e842746b342667b2a/Brotli-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1b2c248cd517c222d89e74669a4adfa5577e06ab68771a529060cf5a156e9757", size = 2847784 }, + { url = "https://files.pythonhosted.org/packages/66/13/b58ddebfd35edde572ccefe6890cf7c493f0c319aad2a5badee134b4d8ec/Brotli-1.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a24c50840d89ded6c9a8fdc7b6ed3692ed4e86f1c4a4a938e1e92def92933e0", size = 3034905 }, + { url = "https://files.pythonhosted.org/packages/84/9c/bc96b6c7db824998a49ed3b38e441a2cae9234da6fa11f6ed17e8cf4f147/Brotli-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f31859074d57b4639318523d6ffdca586ace54271a73ad23ad021acd807eb14b", size = 2929467 }, + { url = "https://files.pythonhosted.org/packages/e7/71/8f161dee223c7ff7fea9d44893fba953ce97cf2c3c33f78ba260a91bcff5/Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50", size = 333169 }, + { url = "https://files.pythonhosted.org/packages/02/8a/fece0ee1057643cb2a5bbf59682de13f1725f8482b2c057d4e799d7ade75/Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1", size = 357253 }, + { url = "https://files.pythonhosted.org/packages/5c/d0/5373ae13b93fe00095a58efcbce837fd470ca39f703a235d2a999baadfbc/Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28", size = 815693 }, + { url = "https://files.pythonhosted.org/packages/8e/48/f6e1cdf86751300c288c1459724bfa6917a80e30dbfc326f92cea5d3683a/Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f", size = 422489 }, + { url = "https://files.pythonhosted.org/packages/06/88/564958cedce636d0f1bed313381dfc4b4e3d3f6015a63dae6146e1b8c65c/Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409", size = 873081 }, + { url = "https://files.pythonhosted.org/packages/58/79/b7026a8bb65da9a6bb7d14329fd2bd48d2b7f86d7329d5cc8ddc6a90526f/Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2", size = 446244 }, + { url = "https://files.pythonhosted.org/packages/e5/18/c18c32ecea41b6c0004e15606e274006366fe19436b6adccc1ae7b2e50c2/Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451", size = 2906505 }, + { url = "https://files.pythonhosted.org/packages/08/c8/69ec0496b1ada7569b62d85893d928e865df29b90736558d6c98c2031208/Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91", size = 2944152 }, + { url = "https://files.pythonhosted.org/packages/ab/fb/0517cea182219d6768113a38167ef6d4eb157a033178cc938033a552ed6d/Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408", size = 2919252 }, + { url = "https://files.pythonhosted.org/packages/c7/53/73a3431662e33ae61a5c80b1b9d2d18f58dfa910ae8dd696e57d39f1a2f5/Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0", size = 2845955 }, + { url = "https://files.pythonhosted.org/packages/55/ac/bd280708d9c5ebdbf9de01459e625a3e3803cce0784f47d633562cf40e83/Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc", size = 2914304 }, + { url = "https://files.pythonhosted.org/packages/76/58/5c391b41ecfc4527d2cc3350719b02e87cb424ef8ba2023fb662f9bf743c/Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180", size = 2814452 }, + { url = "https://files.pythonhosted.org/packages/c7/4e/91b8256dfe99c407f174924b65a01f5305e303f486cc7a2e8a5d43c8bec3/Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248", size = 2938751 }, + { url = "https://files.pythonhosted.org/packages/5a/a6/e2a39a5d3b412938362bbbeba5af904092bf3f95b867b4a3eb856104074e/Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966", size = 2933757 }, + { url = "https://files.pythonhosted.org/packages/13/f0/358354786280a509482e0e77c1a5459e439766597d280f28cb097642fc26/Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9", size = 2936146 }, + { url = "https://files.pythonhosted.org/packages/80/f7/daf538c1060d3a88266b80ecc1d1c98b79553b3f117a485653f17070ea2a/Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb", size = 2848055 }, + { url = "https://files.pythonhosted.org/packages/ad/cf/0eaa0585c4077d3c2d1edf322d8e97aabf317941d3a72d7b3ad8bce004b0/Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111", size = 3035102 }, + { url = "https://files.pythonhosted.org/packages/d8/63/1c1585b2aa554fe6dbce30f0c18bdbc877fa9a1bf5ff17677d9cca0ac122/Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839", size = 2930029 }, + { url = "https://files.pythonhosted.org/packages/5f/3b/4e3fd1893eb3bbfef8e5a80d4508bec17a57bb92d586c85c12d28666bb13/Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0", size = 333276 }, + { url = "https://files.pythonhosted.org/packages/3d/d5/942051b45a9e883b5b6e98c041698b1eb2012d25e5948c58d6bf85b1bb43/Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951", size = 357255 }, + { url = "https://files.pythonhosted.org/packages/0a/9f/fb37bb8ffc52a8da37b1c03c459a8cd55df7a57bdccd8831d500e994a0ca/Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5", size = 815681 }, + { url = "https://files.pythonhosted.org/packages/06/b3/dbd332a988586fefb0aa49c779f59f47cae76855c2d00f450364bb574cac/Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8", size = 422475 }, + { url = "https://files.pythonhosted.org/packages/bb/80/6aaddc2f63dbcf2d93c2d204e49c11a9ec93a8c7c63261e2b4bd35198283/Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f", size = 2906173 }, + { url = "https://files.pythonhosted.org/packages/ea/1d/e6ca79c96ff5b641df6097d299347507d39a9604bde8915e76bf026d6c77/Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648", size = 2943803 }, + { url = "https://files.pythonhosted.org/packages/ac/a3/d98d2472e0130b7dd3acdbb7f390d478123dbf62b7d32bda5c830a96116d/Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0", size = 2918946 }, + { url = "https://files.pythonhosted.org/packages/c4/a5/c69e6d272aee3e1423ed005d8915a7eaa0384c7de503da987f2d224d0721/Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089", size = 2845707 }, + { url = "https://files.pythonhosted.org/packages/58/9f/4149d38b52725afa39067350696c09526de0125ebfbaab5acc5af28b42ea/Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368", size = 2936231 }, + { url = "https://files.pythonhosted.org/packages/5a/5a/145de884285611838a16bebfdb060c231c52b8f84dfbe52b852a15780386/Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c", size = 2848157 }, + { url = "https://files.pythonhosted.org/packages/50/ae/408b6bfb8525dadebd3b3dd5b19d631da4f7d46420321db44cd99dcf2f2c/Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284", size = 3035122 }, + { url = "https://files.pythonhosted.org/packages/af/85/a94e5cfaa0ca449d8f91c3d6f78313ebf919a0dbd55a100c711c6e9655bc/Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7", size = 2930206 }, + { url = "https://files.pythonhosted.org/packages/c2/f0/a61d9262cd01351df22e57ad7c34f66794709acab13f34be2675f45bf89d/Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0", size = 333804 }, + { url = "https://files.pythonhosted.org/packages/7e/c1/ec214e9c94000d1c1974ec67ced1c970c148aa6b8d8373066123fc3dbf06/Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b", size = 358517 }, + { url = "https://files.pythonhosted.org/packages/1b/aa/aa6e0c9848ee4375514af0b27abf470904992939b7363ae78fc8aca8a9a8/Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a", size = 873048 }, + { url = "https://files.pythonhosted.org/packages/ae/32/38bba1a8bef9ecb1cda08439fd28d7e9c51aff13b4783a4f1610da90b6c2/Brotli-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7905193081db9bfa73b1219140b3d315831cbff0d8941f22da695832f0dd188f", size = 446207 }, + { url = "https://files.pythonhosted.org/packages/3c/6a/14cc20ddc53efc274601c8195791a27cfb7acc5e5134e0f8c493a8b8821a/Brotli-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a77def80806c421b4b0af06f45d65a136e7ac0bdca3c09d9e2ea4e515367c7e9", size = 2903803 }, + { url = "https://files.pythonhosted.org/packages/9a/26/62b2d894d4e82d7a7f4e0bb9007a42bbc765697a5679b43186acd68d7a79/Brotli-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dadd1314583ec0bf2d1379f7008ad627cd6336625d6679cf2f8e67081b83acf", size = 2941149 }, + { url = "https://files.pythonhosted.org/packages/a9/ca/00d55bbdd8631236c61777742d8a8454cf6a87eb4125cad675912c68bec7/Brotli-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:901032ff242d479a0efa956d853d16875d42157f98951c0230f69e69f9c09bac", size = 2672253 }, + { url = "https://files.pythonhosted.org/packages/e2/e6/4a730f6e5b5d538e92d09bc51bf69119914f29a222f9e1d65ae4abb27a4e/Brotli-1.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:22fc2a8549ffe699bfba2256ab2ed0421a7b8fadff114a3d201794e45a9ff578", size = 2757005 }, + { url = "https://files.pythonhosted.org/packages/cb/6b/8cf297987fe3c1bf1c87f0c0b714af2ce47092b8d307b9f6ecbc65f98968/Brotli-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae15b066e5ad21366600ebec29a7ccbc86812ed267e4b28e860b8ca16a2bc474", size = 2910658 }, + { url = "https://files.pythonhosted.org/packages/2c/1f/be9443995821c933aad7159803f84ef4923c6f5b72c2affd001192b310fc/Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c", size = 2809728 }, + { url = "https://files.pythonhosted.org/packages/76/2f/213bab6efa902658c80a1247142d42b138a27ccdd6bade49ca9cd74e714a/Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d", size = 2935043 }, + { url = "https://files.pythonhosted.org/packages/27/89/bbb14fa98e895d1e601491fba54a5feec167d262f0d3d537a3b0d4cd0029/Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59", size = 2930639 }, + { url = "https://files.pythonhosted.org/packages/14/87/03a6d6e1866eddf9f58cc57e35befbeb5514da87a416befe820150cae63f/Brotli-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0737ddb3068957cf1b054899b0883830bb1fec522ec76b1098f9b6e0f02d9419", size = 2932834 }, + { url = "https://files.pythonhosted.org/packages/a4/d5/e5f85e04f75144d1a89421ba432def6bdffc8f28b04f5b7d540bbd03362c/Brotli-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4f3607b129417e111e30637af1b56f24f7a49e64763253bbc275c75fa887d4b2", size = 2845213 }, + { url = "https://files.pythonhosted.org/packages/99/bf/25ef07add7afbb1aacd4460726a1a40370dfd60c0810b6f242a6d3871d7e/Brotli-1.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6c6e0c425f22c1c719c42670d561ad682f7bfeeef918edea971a79ac5252437f", size = 3031573 }, + { url = "https://files.pythonhosted.org/packages/55/22/948a97bda5c9dc9968d56b9ed722d9727778db43739cf12ef26ff69be94d/Brotli-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:494994f807ba0b92092a163a0a283961369a65f6cbe01e8891132b7a320e61eb", size = 2926885 }, + { url = "https://files.pythonhosted.org/packages/31/ba/e53d107399b535ef89deb6977dd8eae468e2dde7b1b74c6cbe2c0e31fda2/Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64", size = 333171 }, + { url = "https://files.pythonhosted.org/packages/99/b3/f7b3af539f74b82e1c64d28685a5200c631cc14ae751d37d6ed819655627/Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467", size = 357258 }, +] + +[[package]] +name = "bump2version" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/29/2a/688aca6eeebfe8941235be53f4da780c6edee05dbbea5d7abaa3aab6fad2/bump2version-1.0.1.tar.gz", hash = "sha256:762cb2bfad61f4ec8e2bdf452c7c267416f8c70dd9ecb1653fd0bbb01fa936e6", size = 36236 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/e3/fa60c47d7c344533142eb3af0b73234ef8ea3fb2da742ab976b947e717df/bump2version-1.0.1-py2.py3-none-any.whl", hash = "sha256:37f927ea17cde7ae2d7baf832f8e80ce3777624554a653006c9144f8017fe410", size = 22030 }, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191 }, + { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592 }, + { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024 }, + { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188 }, + { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571 }, + { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687 }, + { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211 }, + { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325 }, + { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784 }, + { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564 }, + { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804 }, + { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299 }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264 }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651 }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259 }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200 }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235 }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721 }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242 }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999 }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242 }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604 }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, + { url = "https://files.pythonhosted.org/packages/b9/ea/8bb50596b8ffbc49ddd7a1ad305035daa770202a6b782fc164647c2673ad/cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", size = 182220 }, + { url = "https://files.pythonhosted.org/packages/ae/11/e77c8cd24f58285a82c23af484cf5b124a376b32644e445960d1a4654c3a/cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", size = 178605 }, + { url = "https://files.pythonhosted.org/packages/ed/65/25a8dc32c53bf5b7b6c2686b42ae2ad58743f7ff644844af7cdb29b49361/cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", size = 424910 }, + { url = "https://files.pythonhosted.org/packages/42/7a/9d086fab7c66bd7c4d0f27c57a1b6b068ced810afc498cc8c49e0088661c/cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", size = 447200 }, + { url = "https://files.pythonhosted.org/packages/da/63/1785ced118ce92a993b0ec9e0d0ac8dc3e5dbfbcaa81135be56c69cabbb6/cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", size = 454565 }, + { url = "https://files.pythonhosted.org/packages/74/06/90b8a44abf3556599cdec107f7290277ae8901a58f75e6fe8f970cd72418/cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", size = 435635 }, + { url = "https://files.pythonhosted.org/packages/bd/62/a1f468e5708a70b1d86ead5bab5520861d9c7eacce4a885ded9faa7729c3/cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", size = 445218 }, + { url = "https://files.pythonhosted.org/packages/5b/95/b34462f3ccb09c2594aa782d90a90b045de4ff1f70148ee79c69d37a0a5a/cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", size = 460486 }, + { url = "https://files.pythonhosted.org/packages/fc/fc/a1e4bebd8d680febd29cf6c8a40067182b64f00c7d105f8f26b5bc54317b/cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", size = 437911 }, + { url = "https://files.pythonhosted.org/packages/e6/c3/21cab7a6154b6a5ea330ae80de386e7665254835b9e98ecc1340b3a7de9a/cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", size = 460632 }, + { url = "https://files.pythonhosted.org/packages/cb/b5/fd9f8b5a84010ca169ee49f4e4ad6f8c05f4e3545b72ee041dbbcb159882/cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", size = 171820 }, + { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290 }, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/8b/825cc84cf13a28bfbcba7c416ec22bf85a9584971be15b21dd8300c65b7f/charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", size = 196363 }, + { url = "https://files.pythonhosted.org/packages/23/81/d7eef6a99e42c77f444fdd7bc894b0ceca6c3a95c51239e74a722039521c/charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", size = 125639 }, + { url = "https://files.pythonhosted.org/packages/21/67/b4564d81f48042f520c948abac7079356e94b30cb8ffb22e747532cf469d/charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", size = 120451 }, + { url = "https://files.pythonhosted.org/packages/c2/72/12a7f0943dd71fb5b4e7b55c41327ac0a1663046a868ee4d0d8e9c369b85/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", size = 140041 }, + { url = "https://files.pythonhosted.org/packages/67/56/fa28c2c3e31217c4c52158537a2cf5d98a6c1e89d31faf476c89391cd16b/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", size = 150333 }, + { url = "https://files.pythonhosted.org/packages/f9/d2/466a9be1f32d89eb1554cf84073a5ed9262047acee1ab39cbaefc19635d2/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", size = 142921 }, + { url = "https://files.pythonhosted.org/packages/f8/01/344ec40cf5d85c1da3c1f57566c59e0c9b56bcc5566c08804a95a6cc8257/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", size = 144785 }, + { url = "https://files.pythonhosted.org/packages/73/8b/2102692cb6d7e9f03b9a33a710e0164cadfce312872e3efc7cfe22ed26b4/charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", size = 146631 }, + { url = "https://files.pythonhosted.org/packages/d8/96/cc2c1b5d994119ce9f088a9a0c3ebd489d360a2eb058e2c8049f27092847/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", size = 140867 }, + { url = "https://files.pythonhosted.org/packages/c9/27/cde291783715b8ec30a61c810d0120411844bc4c23b50189b81188b273db/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", size = 149273 }, + { url = "https://files.pythonhosted.org/packages/3a/a4/8633b0fc1a2d1834d5393dafecce4a1cc56727bfd82b4dc18fc92f0d3cc3/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", size = 152437 }, + { url = "https://files.pythonhosted.org/packages/64/ea/69af161062166b5975ccbb0961fd2384853190c70786f288684490913bf5/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", size = 150087 }, + { url = "https://files.pythonhosted.org/packages/3b/fd/e60a9d9fd967f4ad5a92810138192f825d77b4fa2a557990fd575a47695b/charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", size = 145142 }, + { url = "https://files.pythonhosted.org/packages/6d/02/8cb0988a1e49ac9ce2eed1e07b77ff118f2923e9ebd0ede41ba85f2dcb04/charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", size = 94701 }, + { url = "https://files.pythonhosted.org/packages/d6/20/f1d4670a8a723c46be695dff449d86d6092916f9e99c53051954ee33a1bc/charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", size = 102191 }, + { url = "https://files.pythonhosted.org/packages/9c/61/73589dcc7a719582bf56aae309b6103d2762b526bffe189d635a7fcfd998/charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", size = 193339 }, + { url = "https://files.pythonhosted.org/packages/77/d5/8c982d58144de49f59571f940e329ad6e8615e1e82ef84584c5eeb5e1d72/charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", size = 124366 }, + { url = "https://files.pythonhosted.org/packages/bf/19/411a64f01ee971bed3231111b69eb56f9331a769072de479eae7de52296d/charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", size = 118874 }, + { url = "https://files.pythonhosted.org/packages/4c/92/97509850f0d00e9f14a46bc751daabd0ad7765cff29cdfb66c68b6dad57f/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", size = 138243 }, + { url = "https://files.pythonhosted.org/packages/e2/29/d227805bff72ed6d6cb1ce08eec707f7cfbd9868044893617eb331f16295/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", size = 148676 }, + { url = "https://files.pythonhosted.org/packages/13/bc/87c2c9f2c144bedfa62f894c3007cd4530ba4b5351acb10dc786428a50f0/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", size = 141289 }, + { url = "https://files.pythonhosted.org/packages/eb/5b/6f10bad0f6461fa272bfbbdf5d0023b5fb9bc6217c92bf068fa5a99820f5/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", size = 142585 }, + { url = "https://files.pythonhosted.org/packages/3b/a0/a68980ab8a1f45a36d9745d35049c1af57d27255eff8c907e3add84cf68f/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", size = 144408 }, + { url = "https://files.pythonhosted.org/packages/d7/a1/493919799446464ed0299c8eef3c3fad0daf1c3cd48bff9263c731b0d9e2/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", size = 139076 }, + { url = "https://files.pythonhosted.org/packages/fb/9d/9c13753a5a6e0db4a0a6edb1cef7aee39859177b64e1a1e748a6e3ba62c2/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", size = 146874 }, + { url = "https://files.pythonhosted.org/packages/75/d2/0ab54463d3410709c09266dfb416d032a08f97fd7d60e94b8c6ef54ae14b/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", size = 150871 }, + { url = "https://files.pythonhosted.org/packages/8d/c9/27e41d481557be53d51e60750b85aa40eaf52b841946b3cdeff363105737/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", size = 148546 }, + { url = "https://files.pythonhosted.org/packages/ee/44/4f62042ca8cdc0cabf87c0fc00ae27cd8b53ab68be3605ba6d071f742ad3/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", size = 143048 }, + { url = "https://files.pythonhosted.org/packages/01/f8/38842422988b795220eb8038745d27a675ce066e2ada79516c118f291f07/charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", size = 94389 }, + { url = "https://files.pythonhosted.org/packages/0b/6e/b13bd47fa9023b3699e94abf565b5a2f0b0be6e9ddac9812182596ee62e4/charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", size = 101752 }, + { url = "https://files.pythonhosted.org/packages/d3/0b/4b7a70987abf9b8196845806198975b6aab4ce016632f817ad758a5aa056/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", size = 194445 }, + { url = "https://files.pythonhosted.org/packages/50/89/354cc56cf4dd2449715bc9a0f54f3aef3dc700d2d62d1fa5bbea53b13426/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", size = 125275 }, + { url = "https://files.pythonhosted.org/packages/fa/44/b730e2a2580110ced837ac083d8ad222343c96bb6b66e9e4e706e4d0b6df/charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", size = 119020 }, + { url = "https://files.pythonhosted.org/packages/9d/e4/9263b8240ed9472a2ae7ddc3e516e71ef46617fe40eaa51221ccd4ad9a27/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", size = 139128 }, + { url = "https://files.pythonhosted.org/packages/6b/e3/9f73e779315a54334240353eaea75854a9a690f3f580e4bd85d977cb2204/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", size = 149277 }, + { url = "https://files.pythonhosted.org/packages/1a/cf/f1f50c2f295312edb8a548d3fa56a5c923b146cd3f24114d5adb7e7be558/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", size = 142174 }, + { url = "https://files.pythonhosted.org/packages/16/92/92a76dc2ff3a12e69ba94e7e05168d37d0345fa08c87e1fe24d0c2a42223/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", size = 143838 }, + { url = "https://files.pythonhosted.org/packages/a4/01/2117ff2b1dfc61695daf2babe4a874bca328489afa85952440b59819e9d7/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", size = 146149 }, + { url = "https://files.pythonhosted.org/packages/f6/9b/93a332b8d25b347f6839ca0a61b7f0287b0930216994e8bf67a75d050255/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", size = 140043 }, + { url = "https://files.pythonhosted.org/packages/ab/f6/7ac4a01adcdecbc7a7587767c776d53d369b8b971382b91211489535acf0/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", size = 148229 }, + { url = "https://files.pythonhosted.org/packages/9d/be/5708ad18161dee7dc6a0f7e6cf3a88ea6279c3e8484844c0590e50e803ef/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", size = 151556 }, + { url = "https://files.pythonhosted.org/packages/5a/bb/3d8bc22bacb9eb89785e83e6723f9888265f3a0de3b9ce724d66bd49884e/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", size = 149772 }, + { url = "https://files.pythonhosted.org/packages/f7/fa/d3fc622de05a86f30beea5fc4e9ac46aead4731e73fd9055496732bcc0a4/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", size = 144800 }, + { url = "https://files.pythonhosted.org/packages/9a/65/bdb9bc496d7d190d725e96816e20e2ae3a6fa42a5cac99c3c3d6ff884118/charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", size = 94836 }, + { url = "https://files.pythonhosted.org/packages/3e/67/7b72b69d25b89c0b3cea583ee372c43aa24df15f0e0f8d3982c57804984b/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", size = 102187 }, + { url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 }, + { url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 }, + { url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 }, + { url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 }, + { url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 }, + { url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 }, + { url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 }, + { url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 }, + { url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 }, + { url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 }, + { url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 }, + { url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 }, + { url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 }, + { url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 }, + { url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 }, + { url = "https://files.pythonhosted.org/packages/54/2f/28659eee7f5d003e0f5a3b572765bf76d6e0fe6601ab1f1b1dd4cba7e4f1/charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", size = 196326 }, + { url = "https://files.pythonhosted.org/packages/d1/18/92869d5c0057baa973a3ee2af71573be7b084b3c3d428fe6463ce71167f8/charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", size = 125614 }, + { url = "https://files.pythonhosted.org/packages/d6/27/327904c5a54a7796bb9f36810ec4173d2df5d88b401d2b95ef53111d214e/charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", size = 120450 }, + { url = "https://files.pythonhosted.org/packages/a4/23/65af317914a0308495133b2d654cf67b11bbd6ca16637c4e8a38f80a5a69/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", size = 140135 }, + { url = "https://files.pythonhosted.org/packages/f2/41/6190102ad521a8aa888519bb014a74251ac4586cde9b38e790901684f9ab/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", size = 150413 }, + { url = "https://files.pythonhosted.org/packages/7b/ab/f47b0159a69eab9bd915591106859f49670c75f9a19082505ff16f50efc0/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", size = 142992 }, + { url = "https://files.pythonhosted.org/packages/28/89/60f51ad71f63aaaa7e51a2a2ad37919985a341a1d267070f212cdf6c2d22/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", size = 144871 }, + { url = "https://files.pythonhosted.org/packages/0c/48/0050550275fea585a6e24460b42465020b53375017d8596c96be57bfabca/charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", size = 146756 }, + { url = "https://files.pythonhosted.org/packages/dc/b5/47f8ee91455946f745e6c9ddbb0f8f50314d2416dd922b213e7d5551ad09/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", size = 141034 }, + { url = "https://files.pythonhosted.org/packages/84/79/5c731059ebab43e80bf61fa51666b9b18167974b82004f18c76378ed31a3/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", size = 149434 }, + { url = "https://files.pythonhosted.org/packages/ca/f3/0719cd09fc4dc42066f239cb3c48ced17fc3316afca3e2a30a4756fe49ab/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", size = 152443 }, + { url = "https://files.pythonhosted.org/packages/f7/0e/c6357297f1157c8e8227ff337e93fd0a90e498e3d6ab96b2782204ecae48/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", size = 150294 }, + { url = "https://files.pythonhosted.org/packages/54/9a/acfa96dc4ea8c928040b15822b59d0863d6e1757fba8bd7de3dc4f761c13/charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", size = 145314 }, + { url = "https://files.pythonhosted.org/packages/73/1c/b10a63032eaebb8d7bcb8544f12f063f41f5f463778ac61da15d9985e8b6/charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", size = 94724 }, + { url = "https://files.pythonhosted.org/packages/c5/77/3a78bf28bfaa0863f9cfef278dbeadf55efe064eafff8c7c424ae3c4c1bf/charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", size = 102159 }, + { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, +] + +[[package]] +name = "click" +version = "8.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "comm" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/a8/fb783cb0abe2b5fded9f55e5703015cdf1c9c85b3669087c538dd15a6a86/comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e", size = 6210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/75/49e5bfe642f71f272236b5b2d2691cf915a7283cc0ceda56357b61daa538/comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3", size = 7180 }, +] + +[[package]] +name = "configargparse" +version = "1.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/70/8a/73f1008adfad01cb923255b924b1528727b8270e67cb4ef41eabdc7d783e/ConfigArgParse-1.7.tar.gz", hash = "sha256:e7067471884de5478c58a511e529f0f9bd1c66bfef1dea90935438d6c23306d1", size = 43817 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/b3/b4ac838711fd74a2b4e6f746703cf9dd2cf5462d17dac07e349234e21b97/ConfigArgParse-1.7-py3-none-any.whl", hash = "sha256:d249da6591465c6c26df64a9f73d2536e743be2f244eb3ebe61114af2f94f86b", size = 25489 }, +] + +[[package]] +name = "coverage" +version = "7.6.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5b/d2/c25011f4d036cf7e8acbbee07a8e09e9018390aee25ba085596c4b83d510/coverage-7.6.9.tar.gz", hash = "sha256:4a8d8977b0c6ef5aeadcb644da9e69ae0dcfe66ec7f368c89c72e058bd71164d", size = 801710 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/f3/f830fb53bf7e4f1d5542756f61d9b740352a188f43854aab9409c8cdeb18/coverage-7.6.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85d9636f72e8991a1706b2b55b06c27545448baf9f6dbf51c4004609aacd7dcb", size = 207024 }, + { url = "https://files.pythonhosted.org/packages/4e/e3/ea5632a3a6efd00ab0a791adc0f3e48512097a757ee7dcbee5505f57bafa/coverage-7.6.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:608a7fd78c67bee8936378299a6cb9f5149bb80238c7a566fc3e6717a4e68710", size = 207463 }, + { url = "https://files.pythonhosted.org/packages/e4/ae/18ff8b5580e27e62ebcc888082aa47694c2772782ea7011ddf58e377e98f/coverage-7.6.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96d636c77af18b5cb664ddf12dab9b15a0cfe9c0bde715da38698c8cea748bfa", size = 235902 }, + { url = "https://files.pythonhosted.org/packages/6a/52/57030a8d15ab935624d298360f0a6704885578e39f7b4f68569e59f5902d/coverage-7.6.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75cded8a3cff93da9edc31446872d2997e327921d8eed86641efafd350e1df1", size = 233806 }, + { url = "https://files.pythonhosted.org/packages/d0/c5/4466602195ecaced298d55af1e29abceb812addabefd5bd9116a204f7bab/coverage-7.6.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7b15f589593110ae767ce997775d645b47e5cbbf54fd322f8ebea6277466cec", size = 234966 }, + { url = "https://files.pythonhosted.org/packages/b0/1c/55552c3009b7bf96732e36548596ade771c87f89cf1f5a8e3975b33539b5/coverage-7.6.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:44349150f6811b44b25574839b39ae35291f6496eb795b7366fef3bd3cf112d3", size = 234029 }, + { url = "https://files.pythonhosted.org/packages/bb/7d/da3dca6878701182ea42c51df47a47c80eaef2a76f5aa3e891dc2a8cce3f/coverage-7.6.9-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d891c136b5b310d0e702e186d70cd16d1119ea8927347045124cb286b29297e5", size = 232494 }, + { url = "https://files.pythonhosted.org/packages/28/cc/39de85ac1d5652bc34ff2bee39ae251b1fdcaae53fab4b44cab75a432bc0/coverage-7.6.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:db1dab894cc139f67822a92910466531de5ea6034ddfd2b11c0d4c6257168073", size = 233611 }, + { url = "https://files.pythonhosted.org/packages/d1/2b/7eb011a9378911088708f121825a71134d0c15fac96972a0ae7a8f5a4049/coverage-7.6.9-cp310-cp310-win32.whl", hash = "sha256:41ff7b0da5af71a51b53f501a3bac65fb0ec311ebed1632e58fc6107f03b9198", size = 209712 }, + { url = "https://files.pythonhosted.org/packages/5b/35/c3f40a2269b416db34ce1dedf682a7132c26f857e33596830fa4deebabf9/coverage-7.6.9-cp310-cp310-win_amd64.whl", hash = "sha256:35371f8438028fdccfaf3570b31d98e8d9eda8bb1d6ab9473f5a390969e98717", size = 210553 }, + { url = "https://files.pythonhosted.org/packages/b1/91/b3dc2f7f38b5cca1236ab6bbb03e84046dd887707b4ec1db2baa47493b3b/coverage-7.6.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:932fc826442132dde42ee52cf66d941f581c685a6313feebed358411238f60f9", size = 207133 }, + { url = "https://files.pythonhosted.org/packages/0d/2b/53fd6cb34d443429a92b3ec737f4953627e38b3bee2a67a3c03425ba8573/coverage-7.6.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:085161be5f3b30fd9b3e7b9a8c301f935c8313dcf928a07b116324abea2c1c2c", size = 207577 }, + { url = "https://files.pythonhosted.org/packages/74/f2/68edb1e6826f980a124f21ea5be0d324180bf11de6fd1defcf9604f76df0/coverage-7.6.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc660a77e1c2bf24ddbce969af9447a9474790160cfb23de6be4fa88e3951c7", size = 239524 }, + { url = "https://files.pythonhosted.org/packages/d3/83/8fec0ee68c2c4a5ab5f0f8527277f84ed6f2bd1310ae8a19d0c5532253ab/coverage-7.6.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c69e42c892c018cd3c8d90da61d845f50a8243062b19d228189b0224150018a9", size = 236925 }, + { url = "https://files.pythonhosted.org/packages/8b/20/8f50e7c7ad271144afbc2c1c6ec5541a8c81773f59352f8db544cad1a0ec/coverage-7.6.9-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0824a28ec542a0be22f60c6ac36d679e0e262e5353203bea81d44ee81fe9c6d4", size = 238792 }, + { url = "https://files.pythonhosted.org/packages/6f/62/4ac2e5ad9e7a5c9ec351f38947528e11541f1f00e8a0cdce56f1ba7ae301/coverage-7.6.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4401ae5fc52ad8d26d2a5d8a7428b0f0c72431683f8e63e42e70606374c311a1", size = 237682 }, + { url = "https://files.pythonhosted.org/packages/58/2f/9d2203f012f3b0533c73336c74134b608742be1ce475a5c72012573cfbb4/coverage-7.6.9-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98caba4476a6c8d59ec1eb00c7dd862ba9beca34085642d46ed503cc2d440d4b", size = 236310 }, + { url = "https://files.pythonhosted.org/packages/33/6d/31f6ab0b4f0f781636075f757eb02141ea1b34466d9d1526dbc586ed7078/coverage-7.6.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ee5defd1733fd6ec08b168bd4f5387d5b322f45ca9e0e6c817ea6c4cd36313e3", size = 237096 }, + { url = "https://files.pythonhosted.org/packages/7d/fb/e14c38adebbda9ed8b5f7f8e03340ac05d68d27b24397f8d47478927a333/coverage-7.6.9-cp311-cp311-win32.whl", hash = "sha256:f2d1ec60d6d256bdf298cb86b78dd715980828f50c46701abc3b0a2b3f8a0dc0", size = 209682 }, + { url = "https://files.pythonhosted.org/packages/a4/11/a782af39b019066af83fdc0e8825faaccbe9d7b19a803ddb753114b429cc/coverage-7.6.9-cp311-cp311-win_amd64.whl", hash = "sha256:0d59fd927b1f04de57a2ba0137166d31c1a6dd9e764ad4af552912d70428c92b", size = 210542 }, + { url = "https://files.pythonhosted.org/packages/60/52/b16af8989a2daf0f80a88522bd8e8eed90b5fcbdecf02a6888f3e80f6ba7/coverage-7.6.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:99e266ae0b5d15f1ca8d278a668df6f51cc4b854513daab5cae695ed7b721cf8", size = 207325 }, + { url = "https://files.pythonhosted.org/packages/0f/79/6b7826fca8846c1216a113227b9f114ac3e6eacf168b4adcad0cb974aaca/coverage-7.6.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9901d36492009a0a9b94b20e52ebfc8453bf49bb2b27bca2c9706f8b4f5a554a", size = 207563 }, + { url = "https://files.pythonhosted.org/packages/a7/07/0bc73da0ccaf45d0d64ef86d33b7d7fdeef84b4c44bf6b85fb12c215c5a6/coverage-7.6.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abd3e72dd5b97e3af4246cdada7738ef0e608168de952b837b8dd7e90341f015", size = 240580 }, + { url = "https://files.pythonhosted.org/packages/71/8a/9761f409910961647d892454687cedbaccb99aae828f49486734a82ede6e/coverage-7.6.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff74026a461eb0660366fb01c650c1d00f833a086b336bdad7ab00cc952072b3", size = 237613 }, + { url = "https://files.pythonhosted.org/packages/8b/10/ee7d696a17ac94f32f2dbda1e17e730bf798ae9931aec1fc01c1944cd4de/coverage-7.6.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65dad5a248823a4996724a88eb51d4b31587aa7aa428562dbe459c684e5787ae", size = 239684 }, + { url = "https://files.pythonhosted.org/packages/16/60/aa1066040d3c52fff051243c2d6ccda264da72dc6d199d047624d395b2b2/coverage-7.6.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22be16571504c9ccea919fcedb459d5ab20d41172056206eb2994e2ff06118a4", size = 239112 }, + { url = "https://files.pythonhosted.org/packages/4e/e5/69f35344c6f932ba9028bf168d14a79fedb0dd4849b796d43c81ce75a3c9/coverage-7.6.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f957943bc718b87144ecaee70762bc2bc3f1a7a53c7b861103546d3a403f0a6", size = 237428 }, + { url = "https://files.pythonhosted.org/packages/32/20/adc895523c4a28f63441b8ac645abd74f9bdd499d2d175bef5b41fc7f92d/coverage-7.6.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ae1387db4aecb1f485fb70a6c0148c6cdaebb6038f1d40089b1fc84a5db556f", size = 239098 }, + { url = "https://files.pythonhosted.org/packages/a9/a6/e0e74230c9bb3549ec8ffc137cfd16ea5d56e993d6bffed2218bff6187e3/coverage-7.6.9-cp312-cp312-win32.whl", hash = "sha256:1a330812d9cc7ac2182586f6d41b4d0fadf9be9049f350e0efb275c8ee8eb692", size = 209940 }, + { url = "https://files.pythonhosted.org/packages/3e/18/cb5b88349d4aa2f41ec78d65f92ea32572b30b3f55bc2b70e87578b8f434/coverage-7.6.9-cp312-cp312-win_amd64.whl", hash = "sha256:b12c6b18269ca471eedd41c1b6a1065b2f7827508edb9a7ed5555e9a56dcfc97", size = 210726 }, + { url = "https://files.pythonhosted.org/packages/35/26/9abab6539d2191dbda2ce8c97b67d74cbfc966cc5b25abb880ffc7c459bc/coverage-7.6.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:899b8cd4781c400454f2f64f7776a5d87bbd7b3e7f7bda0cb18f857bb1334664", size = 207356 }, + { url = "https://files.pythonhosted.org/packages/44/da/d49f19402240c93453f606e660a6676a2a1fbbaa6870cc23207790aa9697/coverage-7.6.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:61f70dc68bd36810972e55bbbe83674ea073dd1dcc121040a08cdf3416c5349c", size = 207614 }, + { url = "https://files.pythonhosted.org/packages/da/e6/93bb9bf85497816082ec8da6124c25efa2052bd4c887dd3b317b91990c9e/coverage-7.6.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a289d23d4c46f1a82d5db4abeb40b9b5be91731ee19a379d15790e53031c014", size = 240129 }, + { url = "https://files.pythonhosted.org/packages/df/65/6a824b9406fe066835c1274a9949e06f084d3e605eb1a602727a27ec2fe3/coverage-7.6.9-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e216d8044a356fc0337c7a2a0536d6de07888d7bcda76febcb8adc50bdbbd00", size = 237276 }, + { url = "https://files.pythonhosted.org/packages/9f/79/6c7a800913a9dd23ac8c8da133ebb556771a5a3d4df36b46767b1baffd35/coverage-7.6.9-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c026eb44f744acaa2bda7493dad903aa5bf5fc4f2554293a798d5606710055d", size = 239267 }, + { url = "https://files.pythonhosted.org/packages/57/e7/834d530293fdc8a63ba8ff70033d5182022e569eceb9aec7fc716b678a39/coverage-7.6.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e77363e8425325384f9d49272c54045bbed2f478e9dd698dbc65dbc37860eb0a", size = 238887 }, + { url = "https://files.pythonhosted.org/packages/15/05/ec9d6080852984f7163c96984444e7cd98b338fd045b191064f943ee1c08/coverage-7.6.9-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:777abfab476cf83b5177b84d7486497e034eb9eaea0d746ce0c1268c71652077", size = 236970 }, + { url = "https://files.pythonhosted.org/packages/0a/d8/775937670b93156aec29f694ce37f56214ed7597e1a75b4083ee4c32121c/coverage-7.6.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:447af20e25fdbe16f26e84eb714ba21d98868705cb138252d28bc400381f6ffb", size = 238831 }, + { url = "https://files.pythonhosted.org/packages/f4/58/88551cb7fdd5ec98cb6044e8814e38583436b14040a5ece15349c44c8f7c/coverage-7.6.9-cp313-cp313-win32.whl", hash = "sha256:d872ec5aeb086cbea771c573600d47944eea2dcba8be5f3ee649bfe3cb8dc9ba", size = 210000 }, + { url = "https://files.pythonhosted.org/packages/b7/12/cfbf49b95120872785ff8d56ab1c7fe3970a65e35010c311d7dd35c5fd00/coverage-7.6.9-cp313-cp313-win_amd64.whl", hash = "sha256:fd1213c86e48dfdc5a0cc676551db467495a95a662d2396ecd58e719191446e1", size = 210753 }, + { url = "https://files.pythonhosted.org/packages/7c/68/c1cb31445599b04bde21cbbaa6d21b47c5823cdfef99eae470dfce49c35a/coverage-7.6.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9e7484d286cd5a43744e5f47b0b3fb457865baf07bafc6bee91896364e1419", size = 208091 }, + { url = "https://files.pythonhosted.org/packages/11/73/84b02c6b19c4a11eb2d5b5eabe926fb26c21c080e0852f5e5a4f01165f9e/coverage-7.6.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e5ea1cf0872ee455c03e5674b5bca5e3e68e159379c1af0903e89f5eba9ccc3a", size = 208369 }, + { url = "https://files.pythonhosted.org/packages/de/e0/ae5d878b72ff26df2e994a5c5b1c1f6a7507d976b23beecb1ed4c85411ef/coverage-7.6.9-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d10e07aa2b91835d6abec555ec8b2733347956991901eea6ffac295f83a30e4", size = 251089 }, + { url = "https://files.pythonhosted.org/packages/ab/9c/0aaac011aef95a93ef3cb2fba3fde30bc7e68a6635199ed469b1f5ea355a/coverage-7.6.9-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13a9e2d3ee855db3dd6ea1ba5203316a1b1fd8eaeffc37c5b54987e61e4194ae", size = 246806 }, + { url = "https://files.pythonhosted.org/packages/f8/19/4d5d3ae66938a7dcb2f58cef3fa5386f838f469575b0bb568c8cc9e3a33d/coverage-7.6.9-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c38bf15a40ccf5619fa2fe8f26106c7e8e080d7760aeccb3722664c8656b030", size = 249164 }, + { url = "https://files.pythonhosted.org/packages/b3/0b/4ee8a7821f682af9ad440ae3c1e379da89a998883271f088102d7ca2473d/coverage-7.6.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d5275455b3e4627c8e7154feaf7ee0743c2e7af82f6e3b561967b1cca755a0be", size = 248642 }, + { url = "https://files.pythonhosted.org/packages/8a/12/36ff1d52be18a16b4700f561852e7afd8df56363a5edcfb04cf26a0e19e0/coverage-7.6.9-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8f8770dfc6e2c6a2d4569f411015c8d751c980d17a14b0530da2d7f27ffdd88e", size = 246516 }, + { url = "https://files.pythonhosted.org/packages/43/d0/8e258f6c3a527c1655602f4f576215e055ac704de2d101710a71a2affac2/coverage-7.6.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8d2dfa71665a29b153a9681edb1c8d9c1ea50dfc2375fb4dac99ea7e21a0bcd9", size = 247783 }, + { url = "https://files.pythonhosted.org/packages/a9/0d/1e4a48d289429d38aae3babdfcadbf35ca36bdcf3efc8f09b550a845bdb5/coverage-7.6.9-cp313-cp313t-win32.whl", hash = "sha256:5e6b86b5847a016d0fbd31ffe1001b63355ed309651851295315031ea7eb5a9b", size = 210646 }, + { url = "https://files.pythonhosted.org/packages/26/74/b0729f196f328ac55e42b1e22ec2f16d8bcafe4b8158a26ec9f1cdd1d93e/coverage-7.6.9-cp313-cp313t-win_amd64.whl", hash = "sha256:97ddc94d46088304772d21b060041c97fc16bdda13c6c7f9d8fcd8d5ae0d8611", size = 211815 }, + { url = "https://files.pythonhosted.org/packages/93/fe/8873d88999b8e4b0d8150df554d72d6943b3938bba328fcb5422572cfd84/coverage-7.6.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:adb697c0bd35100dc690de83154627fbab1f4f3c0386df266dded865fc50a902", size = 207022 }, + { url = "https://files.pythonhosted.org/packages/23/c1/5dc48dfe3714a6ae9d2cd128a9df39570e46d3831f19a9be84011e767209/coverage-7.6.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:be57b6d56e49c2739cdf776839a92330e933dd5e5d929966fbbd380c77f060be", size = 207458 }, + { url = "https://files.pythonhosted.org/packages/e8/08/5644e101c823f0b18aa5c408037c2438fad05e6eb9f9e6581459aa0bfb92/coverage-7.6.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1592791f8204ae9166de22ba7e6705fa4ebd02936c09436a1bb85aabca3e599", size = 235494 }, + { url = "https://files.pythonhosted.org/packages/b2/02/995c019c0a2d70188d4d8184a0376eb28fcfb759981bb0e9961b463344fd/coverage-7.6.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e12ae8cc979cf83d258acb5e1f1cf2f3f83524d1564a49d20b8bec14b637f08", size = 233416 }, + { url = "https://files.pythonhosted.org/packages/eb/d3/48ce8c9a89c7013f89ec7e01402e7a136a2e849c8f8664ea7f17b225295c/coverage-7.6.9-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb5555cff66c4d3d6213a296b360f9e1a8e323e74e0426b6c10ed7f4d021e464", size = 234546 }, + { url = "https://files.pythonhosted.org/packages/20/d2/11ac147bd76cc5d8a6254c9a9b6beaab51c3532ba0abdfaf669bf48d2c67/coverage-7.6.9-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9389a429e0e5142e69d5bf4a435dd688c14478a19bb901735cdf75e57b13845", size = 233655 }, + { url = "https://files.pythonhosted.org/packages/18/cb/6e35c5766041737f14c31ad02b5404ae6ec05d4e17ccffd69f6d99431e0a/coverage-7.6.9-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:592ac539812e9b46046620341498caf09ca21023c41c893e1eb9dbda00a70cbf", size = 232145 }, + { url = "https://files.pythonhosted.org/packages/ff/62/5de767f225e09ce959b71d1f3efc9e86e1c3de1fded85886bf705248905d/coverage-7.6.9-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a27801adef24cc30871da98a105f77995e13a25a505a0161911f6aafbd66e678", size = 233131 }, + { url = "https://files.pythonhosted.org/packages/65/72/bacb4b4c9da226e2343aa4bfebcb2bc008eda2f28aa913474aef27bfc397/coverage-7.6.9-cp39-cp39-win32.whl", hash = "sha256:8e3c3e38930cfb729cb8137d7f055e5a473ddaf1217966aa6238c88bd9fd50e6", size = 209735 }, + { url = "https://files.pythonhosted.org/packages/f4/4d/096d19dbd8998c9aaf8798078dd884f65652eb891fe7b0e657b5ac07411d/coverage-7.6.9-cp39-cp39-win_amd64.whl", hash = "sha256:e28bf44afa2b187cc9f41749138a64435bf340adfcacb5b2290c070ce99839d4", size = 210517 }, + { url = "https://files.pythonhosted.org/packages/15/0e/4ac9035ee2ee08d2b703fdad2d84283ec0bad3b46eb4ad6affb150174cb6/coverage-7.6.9-pp39.pp310-none-any.whl", hash = "sha256:f3ca78518bc6bc92828cd11867b121891d75cae4ea9e908d72030609b996db1b", size = 199270 }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "croniter" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "pytz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/2f/44d1ae153a0e27be56be43465e5cb39b9650c781e001e7864389deb25090/croniter-6.0.0.tar.gz", hash = "sha256:37c504b313956114a983ece2c2b07790b1f1094fe9d81cc94739214748255577", size = 64481 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/4b/290b4c3efd6417a8b0c284896de19b1d5855e6dbdb97d2a35e68fa42de85/croniter-6.0.0-py2.py3-none-any.whl", hash = "sha256:2f878c3856f17896979b2a4379ba1f09c83e374931ea15cc835c5dd2eee9b368", size = 25468 }, +] + +[[package]] +name = "curl-cffi" +version = "0.7.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "cffi" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/b6/81ea20376e1440a2bcb0f0574c158bccb0948621e437f5634b6fc210d2ba/curl_cffi-0.7.4.tar.gz", hash = "sha256:37a2c8ec77b9914b0c14c74f604991751948d9d5def58fcddcbe73e3b62111c1", size = 137276 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/c7/f2133c98a9956baa720dc775ba43b2cf7bf22b0feb0f921aab9bbeb2b58c/curl_cffi-0.7.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:417f5264fa746d2680ebb20fbfbcfe5d77fa11a735548d9db6734e839a238e22", size = 5106509 }, + { url = "https://files.pythonhosted.org/packages/29/e9/141ff25c5e35f4afc998cf60134df94e0a9157427da69d6ee1d2a045c554/curl_cffi-0.7.4-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:fb76b654fcf9f3e0400cf13be949e4fc525aeb0f9e2e90e61ae48d5bd8557d25", size = 2564082 }, + { url = "https://files.pythonhosted.org/packages/66/c4/442094831e7017347e866809bfba29f116864a046478e013848f272ba7b7/curl_cffi-0.7.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb9db59b164f2b6be65be62add5896a6fe125c52572aca3046caffbd7eb38f46", size = 5716431 }, + { url = "https://files.pythonhosted.org/packages/99/95/6ac63d489167f712bdc14a2cfbe5df252a2e2e95c5b376ea37bda5646fa8/curl_cffi-0.7.4-cp38-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4593b120c8101b327e4e2d2c278652c5ef58c42dd39dc4586c2789e42a8bc8b1", size = 5521870 }, + { url = "https://files.pythonhosted.org/packages/06/83/2de6b27ba8b3ac394252cadb8783f5c57219068489456d8bb58a180d4aa6/curl_cffi-0.7.4-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4b5685fab3984aae559e6590a6434a7e34f5d615c562c29c1554a90fffbf0bd", size = 6076887 }, + { url = "https://files.pythonhosted.org/packages/86/1d/29b2cf2b7c82c61aeff0076b02531b49420beb5fa89c5a0529f5c06480fe/curl_cffi-0.7.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:3f8c19b5ca979e806fcf4de24f606eff745c85b43e9e88956d1db3c07516cc4b", size = 6221911 }, + { url = "https://files.pythonhosted.org/packages/1b/7e/a9ba49576373e26169e163878cbb8d4e02cfabf3694c686e22243c12f0dd/curl_cffi-0.7.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9957464013b1f76b0e9259ab846fa60faef7ff08e96e7a1764dd63c83005b836", size = 6004845 }, + { url = "https://files.pythonhosted.org/packages/c8/d3/79175cf310f0b1c7149e5a2f25cba997aec83a2bcedc85c744a6456e33af/curl_cffi-0.7.4-cp38-abi3-win32.whl", hash = "sha256:8e9019cf6996bf508e4a51751d7217f22d5902405878679a3ac4757159251741", size = 4188474 }, + { url = "https://files.pythonhosted.org/packages/1c/86/6054fcc3fd28ec024ad36a667fa49a05b0c9caf26724186918b7c0ef8217/curl_cffi-0.7.4-cp38-abi3-win_amd64.whl", hash = "sha256:31a80d5ab1bc0f9d4bc0f98d91dc1a3ed4aa08566f21b76ecfde23ece08e0fa9", size = 3993713 }, +] + +[[package]] +name = "debugpy" +version = "1.8.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/3c/ef563dec9f7ea4c29bd26ed3c5eba5038f8d9d46ac41aacfc99dc77f0885/debugpy-1.8.10.tar.gz", hash = "sha256:ee4ed903cbeb14ee1839549f953af519ffa512598ec987b2051f9c868e2249a8", size = 1644028 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/cc/5f34d94234900a0165e0b86bc503ca97c2fdfb4a208e082fe6a1c491d6ea/debugpy-1.8.10-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97aa00af95983887806e06f37e144909d35215d66db74f8b0e9799b4eef40cfd", size = 2080716 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "deprecated" +version = "1.2.15" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/a3/53e7d78a6850ffdd394d7048a31a6f14e44900adedf190f9a165f6b69439/deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d", size = 2977612 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/8f/c7f227eb42cfeaddce3eb0c96c60cbca37797fa7b34f8e1aeadf6c5c0983/Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320", size = 9941 }, +] + +[[package]] +name = "distlib" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973 }, +] + +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277 }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 }, +] + +[[package]] +name = "email-validator" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/ce/13508a1ec3f8bb981ae4ca79ea40384becc868bfae97fd1c942bb3a001b1/email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7", size = 48967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ee/bf0adb559ad3c786f12bcbc9296b3f5675f529199bef03e2df281fa1fadb/email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631", size = 33521 }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, +] + +[[package]] +name = "execnet" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, +] + +[[package]] +name = "executing" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/e3/7d45f492c2c4a0e8e0fad57d081a7c8a0286cdd86372b070cca1ec0caa1e/executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab", size = 977485 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/fd/afcd0496feca3276f509df3dbd5dae726fcc756f1a08d9e25abe1733f962/executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", size = 25805 }, +] + +[[package]] +name = "faker" +version = "33.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/9f/012fd6049fc86029951cba5112d32c7ba076c4290d7e8873b0413655b808/faker-33.1.0.tar.gz", hash = "sha256:1c925fc0e86a51fc46648b504078c88d0cd48da1da2595c4e712841cab43a1e4", size = 1850515 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/9c/2bba87fbfa42503ddd9653e3546ffc4ed18b14ecab7a07ee86491b886486/Faker-33.1.0-py3-none-any.whl", hash = "sha256:d30c5f0e2796b8970de68978365247657486eb0311c5abe88d0b895b68dff05d", size = 1889127 }, +] + +[[package]] +name = "fastapi" +version = "0.115.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/f5/3f921e59f189e513adb9aef826e2841672d50a399fead4e69afdeb808ff4/fastapi-0.115.7.tar.gz", hash = "sha256:0f106da6c01d88a6786b3248fb4d7a940d071f6f488488898ad5d354b25ed015", size = 293177 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/7f/bbd4dcf0faf61bc68a01939256e2ed02d681e9334c1a3cef24d5f77aba9f/fastapi-0.115.7-py3-none-any.whl", hash = "sha256:eb6a8c8bf7f26009e8147111ff15b5177a0e19bb4a45bc3486ab14804539d21e", size = 94777 }, +] + +[[package]] +name = "filelock" +version = "3.16.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163 }, +] + +[[package]] +name = "flask" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/89/50/dff6380f1c7f84135484e176e0cac8690af72fa90e932ad2a0a60e28c69b/flask-3.1.0.tar.gz", hash = "sha256:5f873c5184c897c8d9d1b05df1e3d01b14910ce69607a117bd3277098a5836ac", size = 680824 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/47/93213ee66ef8fae3b93b3e29206f6b251e65c97bd91d8e1c5596ef15af0a/flask-3.1.0-py3-none-any.whl", hash = "sha256:d667207822eb83f1c4b50949b1623c8fc8d51f2341d65f72e1a1815397551136", size = 102979 }, +] + +[[package]] +name = "flask-cors" +version = "5.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "flask" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4f/d0/d9e52b154e603b0faccc0b7c2ad36a764d8755ef4036acbf1582a67fb86b/flask_cors-5.0.0.tar.gz", hash = "sha256:5aadb4b950c4e93745034594d9f3ea6591f734bb3662e16e255ffbf5e89c88ef", size = 30954 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/07/1afa0514c876282bebc1c9aee83c6bb98fe6415cf57b88d9b06e7e29bf9c/Flask_Cors-5.0.0-py2.py3-none-any.whl", hash = "sha256:b9e307d082a9261c100d8fb0ba909eec6a228ed1b60a8315fd85f783d61910bc", size = 14463 }, +] + +[[package]] +name = "flask-login" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "flask" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/6e/2f4e13e373bb49e68c02c51ceadd22d172715a06716f9299d9df01b6ddb2/Flask-Login-0.6.3.tar.gz", hash = "sha256:5e23d14a607ef12806c699590b89d0f0e0d67baeec599d75947bf9c147330333", size = 48834 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/f5/67e9cc5c2036f58115f9fe0f00d203cf6780c3ff8ae0e705e7a9d9e8ff9e/Flask_Login-0.6.3-py3-none-any.whl", hash = "sha256:849b25b82a436bf830a054e74214074af59097171562ab10bfa999e6b78aae5d", size = 17303 }, +] + +[[package]] +name = "frozenlist" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8f/ed/0f4cec13a93c02c47ec32d81d11c0c1efbadf4a471e3f3ce7cad366cbbd3/frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817", size = 39930 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/79/29d44c4af36b2b240725dce566b20f63f9b36ef267aaaa64ee7466f4f2f8/frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a", size = 94451 }, + { url = "https://files.pythonhosted.org/packages/47/47/0c999aeace6ead8a44441b4f4173e2261b18219e4ad1fe9a479871ca02fc/frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb", size = 54301 }, + { url = "https://files.pythonhosted.org/packages/8d/60/107a38c1e54176d12e06e9d4b5d755b677d71d1219217cee063911b1384f/frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec", size = 52213 }, + { url = "https://files.pythonhosted.org/packages/17/62/594a6829ac5679c25755362a9dc93486a8a45241394564309641425d3ff6/frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5", size = 240946 }, + { url = "https://files.pythonhosted.org/packages/7e/75/6c8419d8f92c80dd0ee3f63bdde2702ce6398b0ac8410ff459f9b6f2f9cb/frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76", size = 264608 }, + { url = "https://files.pythonhosted.org/packages/88/3e/82a6f0b84bc6fb7e0be240e52863c6d4ab6098cd62e4f5b972cd31e002e8/frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17", size = 261361 }, + { url = "https://files.pythonhosted.org/packages/fd/85/14e5f9ccac1b64ff2f10c927b3ffdf88772aea875882406f9ba0cec8ad84/frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba", size = 231649 }, + { url = "https://files.pythonhosted.org/packages/ee/59/928322800306f6529d1852323014ee9008551e9bb027cc38d276cbc0b0e7/frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d", size = 241853 }, + { url = "https://files.pythonhosted.org/packages/7d/bd/e01fa4f146a6f6c18c5d34cab8abdc4013774a26c4ff851128cd1bd3008e/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2", size = 243652 }, + { url = "https://files.pythonhosted.org/packages/a5/bd/e4771fd18a8ec6757033f0fa903e447aecc3fbba54e3630397b61596acf0/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f", size = 241734 }, + { url = "https://files.pythonhosted.org/packages/21/13/c83821fa5544af4f60c5d3a65d054af3213c26b14d3f5f48e43e5fb48556/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c", size = 260959 }, + { url = "https://files.pythonhosted.org/packages/71/f3/1f91c9a9bf7ed0e8edcf52698d23f3c211d8d00291a53c9f115ceb977ab1/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab", size = 262706 }, + { url = "https://files.pythonhosted.org/packages/4c/22/4a256fdf5d9bcb3ae32622c796ee5ff9451b3a13a68cfe3f68e2c95588ce/frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5", size = 250401 }, + { url = "https://files.pythonhosted.org/packages/af/89/c48ebe1f7991bd2be6d5f4ed202d94960c01b3017a03d6954dd5fa9ea1e8/frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb", size = 45498 }, + { url = "https://files.pythonhosted.org/packages/28/2f/cc27d5f43e023d21fe5c19538e08894db3d7e081cbf582ad5ed366c24446/frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4", size = 51622 }, + { url = "https://files.pythonhosted.org/packages/79/43/0bed28bf5eb1c9e4301003b74453b8e7aa85fb293b31dde352aac528dafc/frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30", size = 94987 }, + { url = "https://files.pythonhosted.org/packages/bb/bf/b74e38f09a246e8abbe1e90eb65787ed745ccab6eaa58b9c9308e052323d/frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5", size = 54584 }, + { url = "https://files.pythonhosted.org/packages/2c/31/ab01375682f14f7613a1ade30149f684c84f9b8823a4391ed950c8285656/frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778", size = 52499 }, + { url = "https://files.pythonhosted.org/packages/98/a8/d0ac0b9276e1404f58fec3ab6e90a4f76b778a49373ccaf6a563f100dfbc/frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a", size = 276357 }, + { url = "https://files.pythonhosted.org/packages/ad/c9/c7761084fa822f07dac38ac29f841d4587570dd211e2262544aa0b791d21/frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869", size = 287516 }, + { url = "https://files.pythonhosted.org/packages/a1/ff/cd7479e703c39df7bdab431798cef89dc75010d8aa0ca2514c5b9321db27/frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d", size = 283131 }, + { url = "https://files.pythonhosted.org/packages/59/a0/370941beb47d237eca4fbf27e4e91389fd68699e6f4b0ebcc95da463835b/frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45", size = 261320 }, + { url = "https://files.pythonhosted.org/packages/b8/5f/c10123e8d64867bc9b4f2f510a32042a306ff5fcd7e2e09e5ae5100ee333/frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d", size = 274877 }, + { url = "https://files.pythonhosted.org/packages/fa/79/38c505601ae29d4348f21706c5d89755ceded02a745016ba2f58bd5f1ea6/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3", size = 269592 }, + { url = "https://files.pythonhosted.org/packages/19/e2/39f3a53191b8204ba9f0bb574b926b73dd2efba2a2b9d2d730517e8f7622/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a", size = 265934 }, + { url = "https://files.pythonhosted.org/packages/d5/c9/3075eb7f7f3a91f1a6b00284af4de0a65a9ae47084930916f5528144c9dd/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9", size = 283859 }, + { url = "https://files.pythonhosted.org/packages/05/f5/549f44d314c29408b962fa2b0e69a1a67c59379fb143b92a0a065ffd1f0f/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2", size = 287560 }, + { url = "https://files.pythonhosted.org/packages/9d/f8/cb09b3c24a3eac02c4c07a9558e11e9e244fb02bf62c85ac2106d1eb0c0b/frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf", size = 277150 }, + { url = "https://files.pythonhosted.org/packages/37/48/38c2db3f54d1501e692d6fe058f45b6ad1b358d82cd19436efab80cfc965/frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942", size = 45244 }, + { url = "https://files.pythonhosted.org/packages/ca/8c/2ddffeb8b60a4bce3b196c32fcc30d8830d4615e7b492ec2071da801b8ad/frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d", size = 51634 }, + { url = "https://files.pythonhosted.org/packages/79/73/fa6d1a96ab7fd6e6d1c3500700963eab46813847f01ef0ccbaa726181dd5/frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21", size = 94026 }, + { url = "https://files.pythonhosted.org/packages/ab/04/ea8bf62c8868b8eada363f20ff1b647cf2e93377a7b284d36062d21d81d1/frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d", size = 54150 }, + { url = "https://files.pythonhosted.org/packages/d0/9a/8e479b482a6f2070b26bda572c5e6889bb3ba48977e81beea35b5ae13ece/frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e", size = 51927 }, + { url = "https://files.pythonhosted.org/packages/e3/12/2aad87deb08a4e7ccfb33600871bbe8f0e08cb6d8224371387f3303654d7/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a", size = 282647 }, + { url = "https://files.pythonhosted.org/packages/77/f2/07f06b05d8a427ea0060a9cef6e63405ea9e0d761846b95ef3fb3be57111/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a", size = 289052 }, + { url = "https://files.pythonhosted.org/packages/bd/9f/8bf45a2f1cd4aa401acd271b077989c9267ae8463e7c8b1eb0d3f561b65e/frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee", size = 291719 }, + { url = "https://files.pythonhosted.org/packages/41/d1/1f20fd05a6c42d3868709b7604c9f15538a29e4f734c694c6bcfc3d3b935/frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6", size = 267433 }, + { url = "https://files.pythonhosted.org/packages/af/f2/64b73a9bb86f5a89fb55450e97cd5c1f84a862d4ff90d9fd1a73ab0f64a5/frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e", size = 283591 }, + { url = "https://files.pythonhosted.org/packages/29/e2/ffbb1fae55a791fd6c2938dd9ea779509c977435ba3940b9f2e8dc9d5316/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9", size = 273249 }, + { url = "https://files.pythonhosted.org/packages/2e/6e/008136a30798bb63618a114b9321b5971172a5abddff44a100c7edc5ad4f/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039", size = 271075 }, + { url = "https://files.pythonhosted.org/packages/ae/f0/4e71e54a026b06724cec9b6c54f0b13a4e9e298cc8db0f82ec70e151f5ce/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784", size = 285398 }, + { url = "https://files.pythonhosted.org/packages/4d/36/70ec246851478b1c0b59f11ef8ade9c482ff447c1363c2bd5fad45098b12/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631", size = 294445 }, + { url = "https://files.pythonhosted.org/packages/37/e0/47f87544055b3349b633a03c4d94b405956cf2437f4ab46d0928b74b7526/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", size = 280569 }, + { url = "https://files.pythonhosted.org/packages/f9/7c/490133c160fb6b84ed374c266f42800e33b50c3bbab1652764e6e1fc498a/frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", size = 44721 }, + { url = "https://files.pythonhosted.org/packages/b1/56/4e45136ffc6bdbfa68c29ca56ef53783ef4c2fd395f7cbf99a2624aa9aaa/frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", size = 51329 }, + { url = "https://files.pythonhosted.org/packages/da/3b/915f0bca8a7ea04483622e84a9bd90033bab54bdf485479556c74fd5eaf5/frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953", size = 91538 }, + { url = "https://files.pythonhosted.org/packages/c7/d1/a7c98aad7e44afe5306a2b068434a5830f1470675f0e715abb86eb15f15b/frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0", size = 52849 }, + { url = "https://files.pythonhosted.org/packages/3a/c8/76f23bf9ab15d5f760eb48701909645f686f9c64fbb8982674c241fbef14/frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2", size = 50583 }, + { url = "https://files.pythonhosted.org/packages/1f/22/462a3dd093d11df623179d7754a3b3269de3b42de2808cddef50ee0f4f48/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f", size = 265636 }, + { url = "https://files.pythonhosted.org/packages/80/cf/e075e407fc2ae7328155a1cd7e22f932773c8073c1fc78016607d19cc3e5/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608", size = 270214 }, + { url = "https://files.pythonhosted.org/packages/a1/58/0642d061d5de779f39c50cbb00df49682832923f3d2ebfb0fedf02d05f7f/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b", size = 273905 }, + { url = "https://files.pythonhosted.org/packages/ab/66/3fe0f5f8f2add5b4ab7aa4e199f767fd3b55da26e3ca4ce2cc36698e50c4/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840", size = 250542 }, + { url = "https://files.pythonhosted.org/packages/f6/b8/260791bde9198c87a465224e0e2bb62c4e716f5d198fc3a1dacc4895dbd1/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439", size = 267026 }, + { url = "https://files.pythonhosted.org/packages/2e/a4/3d24f88c527f08f8d44ade24eaee83b2627793fa62fa07cbb7ff7a2f7d42/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de", size = 257690 }, + { url = "https://files.pythonhosted.org/packages/de/9a/d311d660420b2beeff3459b6626f2ab4fb236d07afbdac034a4371fe696e/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641", size = 253893 }, + { url = "https://files.pythonhosted.org/packages/c6/23/e491aadc25b56eabd0f18c53bb19f3cdc6de30b2129ee0bc39cd387cd560/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e", size = 267006 }, + { url = "https://files.pythonhosted.org/packages/08/c4/ab918ce636a35fb974d13d666dcbe03969592aeca6c3ab3835acff01f79c/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9", size = 276157 }, + { url = "https://files.pythonhosted.org/packages/c0/29/3b7a0bbbbe5a34833ba26f686aabfe982924adbdcafdc294a7a129c31688/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03", size = 264642 }, + { url = "https://files.pythonhosted.org/packages/ab/42/0595b3dbffc2e82d7fe658c12d5a5bafcd7516c6bf2d1d1feb5387caa9c1/frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c", size = 44914 }, + { url = "https://files.pythonhosted.org/packages/17/c4/b7db1206a3fea44bf3b838ca61deb6f74424a8a5db1dd53ecb21da669be6/frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28", size = 51167 }, + { url = "https://files.pythonhosted.org/packages/da/4d/d94ff0fb0f5313902c132817c62d19cdc5bdcd0c195d392006ef4b779fc6/frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972", size = 95319 }, + { url = "https://files.pythonhosted.org/packages/8c/1b/d90e554ca2b483d31cb2296e393f72c25bdc38d64526579e95576bfda587/frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336", size = 54749 }, + { url = "https://files.pythonhosted.org/packages/f8/66/7fdecc9ef49f8db2aa4d9da916e4ecf357d867d87aea292efc11e1b2e932/frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f", size = 52718 }, + { url = "https://files.pythonhosted.org/packages/08/04/e2fddc92135276e07addbc1cf413acffa0c2d848b3e54cacf684e146df49/frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f", size = 241756 }, + { url = "https://files.pythonhosted.org/packages/c6/52/be5ff200815d8a341aee5b16b6b707355e0ca3652953852238eb92b120c2/frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6", size = 267718 }, + { url = "https://files.pythonhosted.org/packages/88/be/4bd93a58be57a3722fc544c36debdf9dcc6758f761092e894d78f18b8f20/frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411", size = 263494 }, + { url = "https://files.pythonhosted.org/packages/32/ba/58348b90193caa096ce9e9befea6ae67f38dabfd3aacb47e46137a6250a8/frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08", size = 232838 }, + { url = "https://files.pythonhosted.org/packages/f6/33/9f152105227630246135188901373c4f322cc026565ca6215b063f4c82f4/frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2", size = 242912 }, + { url = "https://files.pythonhosted.org/packages/a0/10/3db38fb3ccbafadd80a1b0d6800c987b0e3fe3ef2d117c6ced0246eea17a/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d", size = 244763 }, + { url = "https://files.pythonhosted.org/packages/e2/cd/1df468fdce2f66a4608dffe44c40cdc35eeaa67ef7fd1d813f99a9a37842/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b", size = 242841 }, + { url = "https://files.pythonhosted.org/packages/ee/5f/16097a5ca0bb6b6779c02cc9379c72fe98d56115d4c54d059fb233168fb6/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b", size = 263407 }, + { url = "https://files.pythonhosted.org/packages/0f/f7/58cd220ee1c2248ee65a32f5b4b93689e3fe1764d85537eee9fc392543bc/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0", size = 265083 }, + { url = "https://files.pythonhosted.org/packages/62/b8/49768980caabf81ac4a2d156008f7cbd0107e6b36d08a313bb31035d9201/frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c", size = 251564 }, + { url = "https://files.pythonhosted.org/packages/cb/83/619327da3b86ef957ee7a0cbf3c166a09ed1e87a3f7f1ff487d7d0284683/frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3", size = 45691 }, + { url = "https://files.pythonhosted.org/packages/8b/28/407bc34a745151ed2322c690b6e7d83d7101472e81ed76e1ebdac0b70a78/frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0", size = 51767 }, + { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 }, +] + +[[package]] +name = "gevent" +version = "24.11.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/75/a53f1cb732420f5e5d79b2563fc3504d22115e7ecfe7966e5cf9b3582ae7/gevent-24.11.1.tar.gz", hash = "sha256:8bd1419114e9e4a3ed33a5bad766afff9a3cf765cb440a582a1b3a9bc80c1aca", size = 5976624 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/7d/27ed3603f4bf96b36fb2746e923e033bc600c6684de8fe164d64eb8c4dcc/gevent-24.11.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:92fe5dfee4e671c74ffaa431fd7ffd0ebb4b339363d24d0d944de532409b935e", size = 2998254 }, + { url = "https://files.pythonhosted.org/packages/a8/03/a8f6c70f50a644a79e75d9f15e6f1813115d34c3c55528e4669a9316534d/gevent-24.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7bfcfe08d038e1fa6de458891bca65c1ada6d145474274285822896a858c870", size = 4817711 }, + { url = "https://files.pythonhosted.org/packages/f0/05/4f9bc565520a18f107464d40ac15a91708431362c797e77fbb5e7ff26e64/gevent-24.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7398c629d43b1b6fd785db8ebd46c0a353880a6fab03d1cf9b6788e7240ee32e", size = 4934468 }, + { url = "https://files.pythonhosted.org/packages/4a/7d/f15561eeebecbebc0296dd7bebea10ac4af0065d98249e3d8c4998e68edd/gevent-24.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7886b63ebfb865178ab28784accd32f287d5349b3ed71094c86e4d3ca738af5", size = 5014067 }, + { url = "https://files.pythonhosted.org/packages/67/c1/07eff117a600fc3c9bd4e3a1ff3b726f146ee23ce55981156547ccae0c85/gevent-24.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9ca80711e6553880974898d99357fb649e062f9058418a92120ca06c18c3c59", size = 6625531 }, + { url = "https://files.pythonhosted.org/packages/4b/72/43f76ab6b18e5e56b1003c844829971f3044af08b39b3c9040559be00a2b/gevent-24.11.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e24181d172f50097ac8fc272c8c5b030149b630df02d1c639ee9f878a470ba2b", size = 5249671 }, + { url = "https://files.pythonhosted.org/packages/6b/fc/1a847ada0757cc7690f83959227514b1a52ff6de504619501c81805fa1da/gevent-24.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1d4fadc319b13ef0a3c44d2792f7918cf1bca27cacd4d41431c22e6b46668026", size = 6773903 }, + { url = "https://files.pythonhosted.org/packages/3b/9d/254dcf455f6659ab7e36bec0bc11f51b18ea25eac2de69185e858ccf3c30/gevent-24.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d882faa24f347f761f934786dde6c73aa6c9187ee710189f12dcc3a63ed4a50", size = 1560443 }, + { url = "https://files.pythonhosted.org/packages/ea/fd/86a170f77ef51a15297573c50dbec4cc67ddc98b677cc2d03cc7f2927f4c/gevent-24.11.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:351d1c0e4ef2b618ace74c91b9b28b3eaa0dd45141878a964e03c7873af09f62", size = 2951424 }, + { url = "https://files.pythonhosted.org/packages/7f/0a/987268c9d446f61883bc627c77c5ed4a97869c0f541f76661a62b2c411f6/gevent-24.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5efe72e99b7243e222ba0c2c2ce9618d7d36644c166d63373af239da1036bab", size = 4878504 }, + { url = "https://files.pythonhosted.org/packages/dc/d4/2f77ddd837c0e21b4a4460bcb79318b6754d95ef138b7a29f3221c7e9993/gevent-24.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d3b249e4e1f40c598ab8393fc01ae6a3b4d51fc1adae56d9ba5b315f6b2d758", size = 5007668 }, + { url = "https://files.pythonhosted.org/packages/80/a0/829e0399a1f9b84c344b72d2be9aa60fe2a64e993cac221edcc14f069679/gevent-24.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81d918e952954675f93fb39001da02113ec4d5f4921bf5a0cc29719af6824e5d", size = 5067055 }, + { url = "https://files.pythonhosted.org/packages/1e/67/0e693f9ddb7909c2414f8fcfc2409aa4157884c147bc83dab979e9cf717c/gevent-24.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9c935b83d40c748b6421625465b7308d87c7b3717275acd587eef2bd1c39546", size = 6761883 }, + { url = "https://files.pythonhosted.org/packages/fa/b6/b69883fc069d7148dd23c5dda20826044e54e7197f3c8e72b8cc2cd4035a/gevent-24.11.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff96c5739834c9a594db0e12bf59cb3fa0e5102fc7b893972118a3166733d61c", size = 5440802 }, + { url = "https://files.pythonhosted.org/packages/32/4e/b00094d995ff01fd88b3cf6b9d1d794f935c31c645c431e65cd82d808c9c/gevent-24.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d6c0a065e31ef04658f799215dddae8752d636de2bed61365c358f9c91e7af61", size = 6866992 }, + { url = "https://files.pythonhosted.org/packages/37/ed/58dbe9fb09d36f6477ff8db0459ebd3be9a77dc05ae5d96dc91ad657610d/gevent-24.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:97e2f3999a5c0656f42065d02939d64fffaf55861f7d62b0107a08f52c984897", size = 1543736 }, + { url = "https://files.pythonhosted.org/packages/dd/32/301676f67ffa996ff1c4175092fb0c48c83271cc95e5c67650b87156b6cf/gevent-24.11.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:a3d75fa387b69c751a3d7c5c3ce7092a171555126e136c1d21ecd8b50c7a6e46", size = 2956467 }, + { url = "https://files.pythonhosted.org/packages/6b/84/aef1a598123cef2375b6e2bf9d17606b961040f8a10e3dcc3c3dd2a99f05/gevent-24.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:beede1d1cff0c6fafae3ab58a0c470d7526196ef4cd6cc18e7769f207f2ea4eb", size = 5136486 }, + { url = "https://files.pythonhosted.org/packages/92/7b/04f61187ee1df7a913b3fca63b0a1206c29141ab4d2a57e7645237b6feb5/gevent-24.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85329d556aaedced90a993226d7d1186a539c843100d393f2349b28c55131c85", size = 5299718 }, + { url = "https://files.pythonhosted.org/packages/36/2a/ebd12183ac25eece91d084be2111e582b061f4d15ead32239b43ed47e9ba/gevent-24.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:816b3883fa6842c1cf9d2786722014a0fd31b6312cca1f749890b9803000bad6", size = 5400118 }, + { url = "https://files.pythonhosted.org/packages/ec/c9/f006c0cd59f0720fbb62ee11da0ad4c4c0fd12799afd957dd491137e80d9/gevent-24.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b24d800328c39456534e3bc3e1684a28747729082684634789c2f5a8febe7671", size = 6775163 }, + { url = "https://files.pythonhosted.org/packages/49/f1/5edf00b674b10d67e3b967c2d46b8a124c2bc8cfd59d4722704392206444/gevent-24.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a5f1701ce0f7832f333dd2faf624484cbac99e60656bfbb72504decd42970f0f", size = 5479886 }, + { url = "https://files.pythonhosted.org/packages/22/11/c48e62744a32c0d48984268ae62b99edb81eaf0e03b42de52e2f09855509/gevent-24.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d740206e69dfdfdcd34510c20adcb9777ce2cc18973b3441ab9767cd8948ca8a", size = 6891452 }, + { url = "https://files.pythonhosted.org/packages/11/b2/5d20664ef6a077bec9f27f7a7ee761edc64946d0b1e293726a3d074a9a18/gevent-24.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:68bee86b6e1c041a187347ef84cf03a792f0b6c7238378bf6ba4118af11feaae", size = 1541631 }, + { url = "https://files.pythonhosted.org/packages/a4/8f/4958e70caeaf469c576ecc5b5f2cb49ddaad74336fa82363d89cddb3c284/gevent-24.11.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:d618e118fdb7af1d6c1a96597a5cd6ac84a9f3732b5be8515c6a66e098d498b6", size = 2949601 }, + { url = "https://files.pythonhosted.org/packages/3b/64/79892d250b7b2aa810688dfebe783aec02568e5cecacb1e100acbb9d95c6/gevent-24.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2142704c2adce9cd92f6600f371afb2860a446bfd0be5bd86cca5b3e12130766", size = 5107052 }, + { url = "https://files.pythonhosted.org/packages/66/44/9ee0ed1909b4f41375e32bf10036d5d8624962afcbd901573afdecd2e36a/gevent-24.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92e0d7759de2450a501effd99374256b26359e801b2d8bf3eedd3751973e87f5", size = 5271736 }, + { url = "https://files.pythonhosted.org/packages/e3/48/0184b2622a388a256199c5fadcad6b52b6455019c2a4b19edd6de58e30ba/gevent-24.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca845138965c8c56d1550499d6b923eb1a2331acfa9e13b817ad8305dde83d11", size = 5367782 }, + { url = "https://files.pythonhosted.org/packages/9a/b1/1a2704c346234d889d2e0042efb182534f7d294115f0e9f99d8079fa17eb/gevent-24.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:356b73d52a227d3313f8f828025b665deada57a43d02b1cf54e5d39028dbcf8d", size = 6757533 }, + { url = "https://files.pythonhosted.org/packages/ed/6e/b2eed8dec617264f0046d50a13a42d3f0a06c50071b9fc1eae00285a03f1/gevent-24.11.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:58851f23c4bdb70390f10fc020c973ffcf409eb1664086792c8b1e20f25eef43", size = 5449436 }, + { url = "https://files.pythonhosted.org/packages/63/c2/eca6b95fbf9af287fa91c327494e4b74a8d5bfa0156cd87b233f63f118dc/gevent-24.11.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1ea50009ecb7f1327347c37e9eb6561bdbc7de290769ee1404107b9a9cba7cf1", size = 6866470 }, + { url = "https://files.pythonhosted.org/packages/b7/e6/51824bd1f2c1ce70aa01495aa6ffe04ab789fa819fa7e6f0ad2388fb03c6/gevent-24.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:ec68e270543ecd532c4c1d70fca020f90aa5486ad49c4f3b8b2e64a66f5c9274", size = 1540088 }, + { url = "https://files.pythonhosted.org/packages/a0/73/263d0f63186d27d205b3dc157efe838afe3aba10a3baca15d85e97b90eae/gevent-24.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9347690f4e53de2c4af74e62d6fabc940b6d4a6cad555b5a379f61e7d3f2a8e", size = 6658480 }, + { url = "https://files.pythonhosted.org/packages/8a/fd/ec7b5c764a3d1340160b82f7394fdc1220d18e11ae089c472cf7bcc2fe6a/gevent-24.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8619d5c888cb7aebf9aec6703e410620ef5ad48cdc2d813dd606f8aa7ace675f", size = 6808247 }, + { url = "https://files.pythonhosted.org/packages/95/82/2ce68dc8dbc2c3ed3f4e73f21e1b7a45d80b5225670225a48e695f248850/gevent-24.11.1-cp39-cp39-win32.whl", hash = "sha256:c6b775381f805ff5faf250e3a07c0819529571d19bb2a9d474bee8c3f90d66af", size = 1483133 }, + { url = "https://files.pythonhosted.org/packages/76/96/aa4cbcf1807187b65a9c9ff15b32b08c2014968be852dda34d212cf8cc58/gevent-24.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1c3443b0ed23dcb7c36a748d42587168672953d368f2956b17fad36d43b58836", size = 1566354 }, + { url = "https://files.pythonhosted.org/packages/86/63/197aa67250943b508b34995c2aa6b46402e7e6f11785487740c2057bfb20/gevent-24.11.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:f43f47e702d0c8e1b8b997c00f1601486f9f976f84ab704f8f11536e3fa144c9", size = 1271676 }, +] + +[[package]] +name = "geventhttpclient" +version = "2.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "brotli" }, + { name = "certifi" }, + { name = "gevent" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/26/018524ea81b2021dc2fe60e1a9c3f5eb347e09a5364cdcb7b92d7e7d3c28/geventhttpclient-2.3.3.tar.gz", hash = "sha256:3e74c1570d01dd09cabdfe2667fbf072520ec9bb3a31a0fd1eae3d0f43847f9b", size = 83625 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/71/6343c63d1f7d868711e6103a53ed1ae6d93b0b2c03d0f87e3a1eb42b9762/geventhttpclient-2.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d61cad95f80d5bd599e28933c187b3c4eeb0b2f6306e06fa0edcac5c9c4bac0a", size = 71588 }, + { url = "https://files.pythonhosted.org/packages/e3/b6/c3a413514e597dea887a8000ff6b0bdb2173f695d17b94ce29fc80a67391/geventhttpclient-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7a00e130577c0cf9749d1143e71543c50c7103321b7f37afc42782ad1d3c0ef7", size = 52245 }, + { url = "https://files.pythonhosted.org/packages/0f/57/1188bba121f21b1fb1efcb7787a48777e32a7990ce3a3479eaa7b5ee0342/geventhttpclient-2.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:14664f4a2d0296d6be5b65b6e57627987e0c2ecffd0ae6d7f9160bf119e8d728", size = 51647 }, + { url = "https://files.pythonhosted.org/packages/32/3a/04a5d0efa7901f0a31e9dbcaf4ab4f6d3e0de9cf63bff9708fa65347e3ae/geventhttpclient-2.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fdfcf45166cecdade78d3dcb9c7615793269fa3d2d7fea328fe007bd87d84c6", size = 118023 }, + { url = "https://files.pythonhosted.org/packages/51/07/2ed84e6863a0b5fb0e0933ac5023399b83000961849eb4cdf88916b5cb58/geventhttpclient-2.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35a6de7088ad69ba1561deaf854bf34c78a0eee33027b24aa7c44cdbe840b1d8", size = 123458 }, + { url = "https://files.pythonhosted.org/packages/9a/65/a7a04b10092713bacb20bcd68353accd8ee1a1064ab5417e663997c583a3/geventhttpclient-2.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:61b34527938e3ab477ecc90ec6bcde9780468722abececf548cbae89e4cd9d0b", size = 114506 }, + { url = "https://files.pythonhosted.org/packages/5b/c7/5841b3d2dd61c82ce6ecee4bc7342f432208da26abdba0ed7809f797a508/geventhttpclient-2.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b366bf38dd5335868a2ea077091af707c1111f70ee4cc8aa60dc14f56928158e", size = 112889 }, + { url = "https://files.pythonhosted.org/packages/a3/4e/aab0bb6c63bb447736dee1444d5367a94532d0e0003e43d3f075ceaccf51/geventhttpclient-2.3.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1fbfeea0242f30b9bfd2e982fc82aa2977eeef17e2526a681f7e8e1e37b2569a", size = 110829 }, + { url = "https://files.pythonhosted.org/packages/4f/c2/fb328a778381117322eda014c357336c315686c4937b8bda198cc7ef7c75/geventhttpclient-2.3.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f584fa36981b8a93799c63226a3deb385d1cc4f19eacd5dd6c696da0ecb4cca6", size = 112527 }, + { url = "https://files.pythonhosted.org/packages/53/05/d877878a855c320dab5d90bb83c5d9cad387361159a8510f273cb3efead0/geventhttpclient-2.3.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:b29e1383725d99e583e8ad125cfa820b8368ae7cfad642167bca869f55c4b000", size = 117627 }, + { url = "https://files.pythonhosted.org/packages/e4/dd/db8060f94d09abe1b653338fda923ed20ffd0bbce11049b6d4e3b82d9693/geventhttpclient-2.3.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c02e50baf4589c7b35db0f96fae7f3bd7e2dfbed2e1a2c1a0aa5696b91dff889", size = 110993 }, + { url = "https://files.pythonhosted.org/packages/4c/5c/68756fc1ba44247b3e3328d540ed0f01cea84ab578b54ecafa06437b447f/geventhttpclient-2.3.3-cp310-cp310-win32.whl", hash = "sha256:5865be94cf03aa219ff4d6fe3a01be798f1205d7d9611e51e75f2606c7c9ae35", size = 48165 }, + { url = "https://files.pythonhosted.org/packages/89/7f/363815faa5f4bc27cef72dac0f7d03c19b8de45ff034975eb117bf182ffb/geventhttpclient-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:53033fc1aac51b7513858662d8e17f44aa05207c3772d69fb1a07e2c5a2e45e4", size = 48793 }, + { url = "https://files.pythonhosted.org/packages/de/f0/689ada546c12ebdde04baade49ce2e5d00eec36a2486293fe8ea893f22cc/geventhttpclient-2.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b1a60f810896a3e59a0e1036aa8fc31478e1ec0dd3faac7a771dd3d956580ce", size = 71589 }, + { url = "https://files.pythonhosted.org/packages/d5/8e/8bd0d39d18583410cb3cf4172e00b865e1ac77e9a08bdb52194e256cb466/geventhttpclient-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:452c3c2c15830fc0be7ea76c6d98f49df0a94327fbdd63822a840ad3125796dc", size = 52241 }, + { url = "https://files.pythonhosted.org/packages/b9/61/ecde771d686a64aab12d3ec8829fe41dd856f0c041fb8556b932a2a6731f/geventhttpclient-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:947e4f511e45abcc24fc982cee6042d14dc765d1a9ebd3c660cb93714002f950", size = 51650 }, + { url = "https://files.pythonhosted.org/packages/8a/21/73a1f040aaccddae69fa2ca44fd2490647c658efb8d7353ff1adba675077/geventhttpclient-2.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6dea544c829894366cfaa4d36a2014557a99f8769c9dd7b8fbf9b607126e04a", size = 118173 }, + { url = "https://files.pythonhosted.org/packages/4b/e5/e7e69c898a6341df846b24cb5ebf14fcb4e9fde8a0a16d9f4ec791d5ae2e/geventhttpclient-2.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b5eba36ea0ad819386e3850a71a42af53e6b9be86d4605d6ded061503573928", size = 123536 }, + { url = "https://files.pythonhosted.org/packages/a6/77/c0d6784c5a99b4ff6f3d885b4a0703e97d4ed1e4d84038ed1f855d1528a0/geventhttpclient-2.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a96e96b63ddcea3d25f62b204aafb523782ff0fcf45b38eb596f8ae4a0f17326", size = 114646 }, + { url = "https://files.pythonhosted.org/packages/04/e0/458a6c2bf281dc8390029fe34d0c8aabcdc9a9df32e122313ca8f2eaa434/geventhttpclient-2.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:386f0c9215958b9c974031fdbaa84002b4291b67bfe6dc5833cfb6e28083bb95", size = 112985 }, + { url = "https://files.pythonhosted.org/packages/b7/c1/f45a9c931230a2e18eec007aab33b349739b3c9303e331ba63e0144e2446/geventhttpclient-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2209e77a101ae67d3355d506f65257908f1eb41db74f765b01cb191e4a5160d5", size = 110943 }, + { url = "https://files.pythonhosted.org/packages/fc/e4/d96a551d5e0ad89ef0deeb332cc75e3691d3f4b44d926cbb8a594b258169/geventhttpclient-2.3.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6552c4d91c38007824f43a13fbbf4c615b7c6abe94fc2d482752ea91d976e140", size = 112194 }, + { url = "https://files.pythonhosted.org/packages/11/0d/3cbe9af29b4aecd8983a556249c2ebceeb4d3f41d953c6b380663cfaad8b/geventhttpclient-2.3.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b503183be80a1fb027eb5582413ca2be60356a7cf8eb9d49b913703f4ecd93", size = 117768 }, + { url = "https://files.pythonhosted.org/packages/e4/6b/91b834caeeb9e442e4d4016b0b85bf7babbeb83b46698496fb1f093c378e/geventhttpclient-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c8831f3ff03c11f64ad3b306883a8b064ef75f16a9f6a85cd105286023fba030", size = 111082 }, + { url = "https://files.pythonhosted.org/packages/c1/cc/fa518eceadbdfc2edea68e6bfaaeefe9eff904c891fbb4996d401d75aba5/geventhttpclient-2.3.3-cp311-cp311-win32.whl", hash = "sha256:aa56b2b0477b4b9c325251c1672d29762d08c5d2ad8d9e5db0b8279872e0030d", size = 48165 }, + { url = "https://files.pythonhosted.org/packages/e5/61/add6ac2956fca1f6b244725b4db4d97b269a4fcd691c197f543e1121d674/geventhttpclient-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:566d7fb431d416bfb0cc431ec74062858133ee94b5001e32f9607a9433cc1e4f", size = 48795 }, + { url = "https://files.pythonhosted.org/packages/85/dc/08138345692c38debeb822199be5daa32f2dc8e19615e2c511d423b3263b/geventhttpclient-2.3.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1ad896af16ffa276620f4f555ef057fe11a2aa6af21dc0136600d0b7738e67ae", size = 71649 }, + { url = "https://files.pythonhosted.org/packages/87/ae/f849381e097a409994ea0708bc7e06cbf1804a44bb8bf6542d76b015fce7/geventhttpclient-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:caf12944df25318a8c5b4deebc35ac94951562da154f039712ae3cde40ec5d95", size = 52301 }, + { url = "https://files.pythonhosted.org/packages/73/42/3e3c4f49918bae791633f5359f59758cd606aaa6e9bff74bc36424d42337/geventhttpclient-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c2586f3c2602cde0c3e5345813c0ab461142d1522667436b04d8a7dd7e7576c8", size = 51655 }, + { url = "https://files.pythonhosted.org/packages/b9/35/f5c33df76998b684db2e59205a58ef6480578bc5000a73c8fe795bd56331/geventhttpclient-2.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0248bbc2ff430dc2bec89e44715e4a38c7f2097ad2a133ca190f74fee51e5ef", size = 118690 }, + { url = "https://files.pythonhosted.org/packages/d6/7f/ffc7a26454e249877b7b45ca1312323432c3da9acc444226f2cc06228bba/geventhttpclient-2.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:493d5deb230e28fdd8d0d0f8e7addb4e7b9761e6a1115ea72f22b231835e546b", size = 124250 }, + { url = "https://files.pythonhosted.org/packages/e4/6c/25d5a1424dd12b3188fc23611d535b1beead11e14eef24a8aacbd2d1a90c/geventhttpclient-2.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acccefebf3b1cc81f90d384dd17c1b3b58deee5ea1891025ef409307a22036b6", size = 115258 }, + { url = "https://files.pythonhosted.org/packages/28/71/cac8789a71359b5b90d1c83326633b693cd7e64108de2c24e85101ca683a/geventhttpclient-2.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aadaabe9267aacec88912ae5ac84b232e16a0ed12c5256559637f4b74aa510e8", size = 113940 }, + { url = "https://files.pythonhosted.org/packages/57/44/77989104142992e93853880432db4f3c568648bcbfa86f8bdc7376764f21/geventhttpclient-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c16830c9cad42c50f87e939f8065dc922010bbcbfb801fa12fd74d091dae7bef", size = 111474 }, + { url = "https://files.pythonhosted.org/packages/0e/ea/fb5bb3de208c2a7622d990f0552dcd3dbe1e40e7f4afbc13ff58c19dc5ad/geventhttpclient-2.3.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d686ce9ad28ddcb36b7748a59e64e2d8acfaa0145f0817becace36b1cfa4e5c6", size = 112895 }, + { url = "https://files.pythonhosted.org/packages/90/98/7f6785810199f502f0f9b34491b47bcea80826501550439124ea420fd741/geventhttpclient-2.3.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:98bfa7cf5b6246b28e05a72505211b60a6ecb63c934dd70b806e662869b009f6", size = 118432 }, + { url = "https://files.pythonhosted.org/packages/06/20/3a1226ef5e97a2cda0b94721fc687314e6fc470ba0612ff98a82728078b8/geventhttpclient-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dc77b39246ba5d2484567100377f100e4aa50b6b8849d3e547d68dc0138087dd", size = 111888 }, + { url = "https://files.pythonhosted.org/packages/68/d2/220c9b0c27b2481d2037f2a7446efbd7979741dd606f3ed39ea0f3af6456/geventhttpclient-2.3.3-cp312-cp312-win32.whl", hash = "sha256:032b4c519b5e7022c9563dbc7d1fac21ededb49f9e46ff2a9c44d1095747d2ea", size = 48201 }, + { url = "https://files.pythonhosted.org/packages/b9/07/04f0ff60f94e1e4fc83d617ffb46fac1fd3a6c36ef73f42f8fe3adadb02f/geventhttpclient-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:cf1051cc18521cd0819d3d69d930a4de916fb6f62be829b675481ca47e960765", size = 48780 }, + { url = "https://files.pythonhosted.org/packages/15/8a/1229ae5766cadee4517f9fe441abda0aedec06015912c56d312377e03843/geventhttpclient-2.3.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e5a14dd4e3504f05fc9febaedcb7cc91222da7176a6a9a2e703ab0cd85444016", size = 71640 }, + { url = "https://files.pythonhosted.org/packages/41/bd/58f5822779f05cb4410ab294adf9a7ef9b8822e2a8f091a72daebb391ddf/geventhttpclient-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4d6ae4ce130bf91cbdbab951b39a5faeb82b50f37a027afaac1cc956b344cc5d", size = 52298 }, + { url = "https://files.pythonhosted.org/packages/0d/25/5a1a0d6e5ae5bcc0d6273bcab0d2a15d1c7768ef28ac057c5b721efb54a1/geventhttpclient-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f16cf2fd71e6b77e6153a66aae282da00958b43345879e222605a3a7556e3a", size = 51646 }, + { url = "https://files.pythonhosted.org/packages/c1/7c/ed0a81d9a0f5d1a2ef7b4a17b5c56890cc918d9edbffd58c6f5a0c5b92f1/geventhttpclient-2.3.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50c62dbe5f43c9e0ee43f872de44aebf4968695d90804d71fc1bf32b827fae16", size = 118671 }, + { url = "https://files.pythonhosted.org/packages/5e/8b/5e5547d7804fde227a481c6cdfc166221362a04a473b07ae637787af6ff5/geventhttpclient-2.3.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3a52ee992488ff087a3ec99d0076541ba1b07464c8eac22ad1a7778860bc345", size = 124205 }, + { url = "https://files.pythonhosted.org/packages/ac/9c/feac189cfc81bbd3dabf6cd42bfaf5142158bfce7ea1b1f26f599748f305/geventhttpclient-2.3.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52450392f3b9d32563c685013ba30b028f948612ebb9b1bfd6ba4ae113d985dc", size = 115255 }, + { url = "https://files.pythonhosted.org/packages/eb/8f/8059a0dd967679c11fd65b0d0b4bb3f9a03c0a8aaa8496518ac09584d515/geventhttpclient-2.3.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1642c8b3042b675a5b7ad67bce9611415d7bce0cf0380c0be52b7e5f55bc3e8", size = 113899 }, + { url = "https://files.pythonhosted.org/packages/ef/94/5ac03198fd67de43f2f99ced69a669c80c7ffe789fe2ac6d4b93f90ade04/geventhttpclient-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a36145c0b34d3c0e8c0c4a9d2e6d6f2b9f382c12e698fadb6a646a9b320a6c69", size = 111544 }, + { url = "https://files.pythonhosted.org/packages/df/b8/22d5df0ea2e38a63c8ba6df4dd5d98c328301da8960e52955007fe82b4b1/geventhttpclient-2.3.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:49512144af09fb2438a3e14e14863e7556434be3676efdaa0379198ce38bf1e2", size = 112922 }, + { url = "https://files.pythonhosted.org/packages/40/e6/579e43b837fc638a841063fce0d725958054c031338b1540d6bffcf780b7/geventhttpclient-2.3.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8b78a8e5ff3c06dfee63b8457740c1d7d2f0687f85ded76dfca2b25f52200a1c", size = 118438 }, + { url = "https://files.pythonhosted.org/packages/56/88/caf0921b6629996041ef0cad3e3161af88368ea90189f5e809a41cd800b5/geventhttpclient-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bba80efc5c95e94641dc3e9864ab37829111a4e90bdf2ef08b1206c7a89dd94", size = 111900 }, + { url = "https://files.pythonhosted.org/packages/2b/e9/9c03a604c4adec315f680c7bcf33a52f15c7090635ad4e80d0da98c03f86/geventhttpclient-2.3.3-cp313-cp313-win32.whl", hash = "sha256:4a942448e77c01286edc4c29c22575d701d0639d42d0061b37025e118129372a", size = 48195 }, + { url = "https://files.pythonhosted.org/packages/4e/63/7a75399172fbc0aaa7189d9d8c162297acadfe242eb958186bf31fcdfd4e/geventhttpclient-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:b1ee31fed440029e20c99c89e49a0f983b771e7529db81fab33d942193036c41", size = 48778 }, + { url = "https://files.pythonhosted.org/packages/10/9c/8f285560739bd262b03e43111e1ee6f855b40c3daaeba8de93b0f9f2a776/geventhttpclient-2.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e30bb1f0e754720ecbacf353db054ba1c3fa01d6016d00978eeed60b066703b", size = 71583 }, + { url = "https://files.pythonhosted.org/packages/81/d9/c49a051a34a2ca28ebecad8332cc23fc0277f71c564c8ca618eae7dd3c18/geventhttpclient-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72011601bcd0952a8f4188893307dc0263f96e967126bc4df2e15d2f74fa4622", size = 52239 }, + { url = "https://files.pythonhosted.org/packages/61/5f/65c5f5625125c39d79d85cc9c9734a02a9e41d7cbe935cd9e48989a66cef/geventhttpclient-2.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12354718a63e2314c6dd1a6cd4d65cb0db7423062fb0aaaf1dee258cfa51e795", size = 51645 }, + { url = "https://files.pythonhosted.org/packages/b0/fe/900f983a3fbee5ac88fe5acf6cdecb9b3be4802723355ddfdd04bca21841/geventhttpclient-2.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bbab6fef671cc7268cd54f9d018a703542ec767998da0164bb61eb789f8d069", size = 117772 }, + { url = "https://files.pythonhosted.org/packages/3b/a4/b8a5fa00c012bd6ab408499f83f452df43af64c3754f932837294fc34c0a/geventhttpclient-2.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34622d675af26d9289d6bd5f03721cedc01db3ed99e1244360b48c73228d113c", size = 123237 }, + { url = "https://files.pythonhosted.org/packages/45/85/f4b1fed7fded36fe1389d67b3df9bda6562f3e450c578e4c56196fa6cd5b/geventhttpclient-2.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:795ad495865fc535ceb19908c5b0b468d6ccf94b44c3c3229cae85616da400ab", size = 114281 }, + { url = "https://files.pythonhosted.org/packages/d1/9a/c709f2c980361c4ba69e72532e6ad4db4dc592837f402f727cf85612b424/geventhttpclient-2.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbaedf4227f3691bc9e1080f42ebdf1b4131fc5aa09b00ed3934626197a9fbe", size = 112663 }, + { url = "https://files.pythonhosted.org/packages/bc/07/229f2b4fa25e183d98e9b30eb4613f77833f101ba1110c46a432bd9a718b/geventhttpclient-2.3.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f062f4120661c25cc87b7cbec1c4b27e83f618604d1403e950191835b999a61a", size = 110607 }, + { url = "https://files.pythonhosted.org/packages/9b/a8/d390f45b63de3f15ef8d797238dc68d89e845ce2bb0d7dba55b68e1efc64/geventhttpclient-2.3.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0d99d09fc20e91902a7b81000d4b819c4da1d5911b2452e948bffd00dbd3722e", size = 112287 }, + { url = "https://files.pythonhosted.org/packages/e3/14/de5e996b4f84e42d4567af68b6829d7463718f7eae5b74c29bf7b1159be0/geventhttpclient-2.3.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:aafdd67e7c4163f0245e1785c1dc42b2f4fdaacae1f28c68758f06010335f93c", size = 117389 }, + { url = "https://files.pythonhosted.org/packages/95/db/0db19ec48fcad94c9f91afdcdb67a1a106ffb83707aa7e3761dac7a4db4f/geventhttpclient-2.3.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:36f9a4c93eb8927376c995cc91857a1e94dd4a68a0c459870adb11799ceea75d", size = 110782 }, + { url = "https://files.pythonhosted.org/packages/73/bc/969364874f4017196742c3e63fa4228b6523183508d1d615578f8ad1e85e/geventhttpclient-2.3.3-cp39-cp39-win32.whl", hash = "sha256:a4d4f777a9b55d6babbf5525623ad74e543e6fbb86bc3305bf24d80fcc0190dc", size = 48170 }, + { url = "https://files.pythonhosted.org/packages/e7/f2/b4880ab65918b03165cedc90bd1da3405b47f7054cbad4d628fba6dcea6f/geventhttpclient-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:f5724370d95ce9753846ff90d7805a11f7981d9dc579e3a229fa594cb401da98", size = 48797 }, + { url = "https://files.pythonhosted.org/packages/80/4d/5f6ef87025c55cc2db4566fe98aae8e07f8cb535e51ef7cdf4aa0fb6f0ca/geventhttpclient-2.3.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a8519b9aacad25696a220c1217047d5277403df96cb8aa8e9a5ec5271798cb87", size = 50497 }, + { url = "https://files.pythonhosted.org/packages/20/f6/c493e853ec3cecbd8ddb34e3433e39b826568a4e7b8f114ad8570491bb7e/geventhttpclient-2.3.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cbfcb54ee015aa38c8e9eb3bb4be68f88fbce6cbf90f716fc3ffc5f49892f721", size = 49752 }, + { url = "https://files.pythonhosted.org/packages/5e/7d/a063d668d92893274f1d9e36c7b0fe079e55650cc2934b50441c52274538/geventhttpclient-2.3.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e9c4f27d48ce4da6dde530aea00e8d427965ace0801fe3d7c4739e167c10de", size = 54202 }, + { url = "https://files.pythonhosted.org/packages/92/ce/6cc6e30b66c147128d7662cf4909fe038b81792d2617a748dfe4ee0ae98a/geventhttpclient-2.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:447fc2d49a41449684154c12c03ab80176a413e9810d974363a061b71bdbf5a0", size = 58540 }, + { url = "https://files.pythonhosted.org/packages/7e/f4/5214ea44055c82d92adaaaddb19d2791addd3ce60af863aec03384e7c88a/geventhttpclient-2.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4598c2aa14c866a10a07a2944e2c212f53d0c337ce211336ad68ae8243646216", size = 54445 }, + { url = "https://files.pythonhosted.org/packages/06/1d/f65b4ac42cce6cef707ace606bbdc1142aa0f3863ad16fa615004b9461d7/geventhttpclient-2.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:69d2bd7ab7f94a6c73325f4b88fd07b0d5f4865672ed7a519f2d896949353761", size = 48838 }, + { url = "https://files.pythonhosted.org/packages/34/e4/ab4966fb1dcfc855a40d80f7493f597a43ce9c24311ba4f9664da8f7cb1a/geventhttpclient-2.3.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a3182f1457599c2901c48a1def37a5bc4762f696077e186e2050fcc60b2fbdf", size = 50495 }, + { url = "https://files.pythonhosted.org/packages/db/00/6798f14669b27dcfdbbabe98180d7ee578aa410d43c1946f2af88cd50bc6/geventhttpclient-2.3.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:86b489238dc2cbfa53cdd5621e888786a53031d327e0a8509529c7568292b0ce", size = 49739 }, + { url = "https://files.pythonhosted.org/packages/da/e2/ff407218bc6222ea448008a92bf6152565a342136dc6db5ab5244827f8b3/geventhttpclient-2.3.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4c8aca6ab5da4211870c1d8410c699a9d543e86304aac47e1558ec94d0da97a", size = 54201 }, + { url = "https://files.pythonhosted.org/packages/50/42/775e66f52059dbe213dbef16657dee456f1e1a602260514de629879f6463/geventhttpclient-2.3.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29fe3b6523efa8cdcb5e9bad379f9055e4f0ebb914e4dcd8a0ca33b003b402f5", size = 58541 }, + { url = "https://files.pythonhosted.org/packages/fb/50/7dbe5b3693936e561768e1e41e5e7ed139031c656c62c13a398f36f9ca74/geventhttpclient-2.3.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32313c833dfbe27d3f66feacac667ae937859dbbd58e25d1172329c8b368426", size = 54443 }, + { url = "https://files.pythonhosted.org/packages/44/4a/8df48837b520782fb158ec946329bd24a6286d3be801552ebcafee258968/geventhttpclient-2.3.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4fc1d824602d9590a2b88ac14cfe6d2ecc357e91472ecfe719973c40aab25f4e", size = 48827 }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.66.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/a7/8e9cccdb1c49870de6faea2a2764fa23f627dd290633103540209f03524c/googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c", size = 114376 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/0f/c0713fb2b3d28af4b2fded3291df1c4d4f79a00d15c2374a9e010870016c/googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed", size = 221682 }, +] + +[[package]] +name = "greenlet" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/90/5234a78dc0ef6496a6eb97b67a42a8e96742a56f7dc808cb954a85390448/greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563", size = 271235 }, + { url = "https://files.pythonhosted.org/packages/7c/16/cd631fa0ab7d06ef06387135b7549fdcc77d8d859ed770a0d28e47b20972/greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83", size = 637168 }, + { url = "https://files.pythonhosted.org/packages/2f/b1/aed39043a6fec33c284a2c9abd63ce191f4f1a07319340ffc04d2ed3256f/greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0", size = 648826 }, + { url = "https://files.pythonhosted.org/packages/76/25/40e0112f7f3ebe54e8e8ed91b2b9f970805143efef16d043dfc15e70f44b/greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120", size = 644443 }, + { url = "https://files.pythonhosted.org/packages/fb/2f/3850b867a9af519794784a7eeed1dd5bc68ffbcc5b28cef703711025fd0a/greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc", size = 643295 }, + { url = "https://files.pythonhosted.org/packages/cf/69/79e4d63b9387b48939096e25115b8af7cd8a90397a304f92436bcb21f5b2/greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617", size = 599544 }, + { url = "https://files.pythonhosted.org/packages/46/1d/44dbcb0e6c323bd6f71b8c2f4233766a5faf4b8948873225d34a0b7efa71/greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7", size = 1125456 }, + { url = "https://files.pythonhosted.org/packages/e0/1d/a305dce121838d0278cee39d5bb268c657f10a5363ae4b726848f833f1bb/greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6", size = 1149111 }, + { url = "https://files.pythonhosted.org/packages/96/28/d62835fb33fb5652f2e98d34c44ad1a0feacc8b1d3f1aecab035f51f267d/greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80", size = 298392 }, + { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479 }, + { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404 }, + { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813 }, + { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517 }, + { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831 }, + { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413 }, + { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619 }, + { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198 }, + { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930 }, + { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260 }, + { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064 }, + { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420 }, + { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035 }, + { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105 }, + { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077 }, + { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, + { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, + { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, + { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, + { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, + { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, + { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, + { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, + { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, + { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, + { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, + { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, + { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, + { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, + { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, + { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, + { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, + { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, + { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, + { url = "https://files.pythonhosted.org/packages/8c/82/8051e82af6d6b5150aacb6789a657a8afd48f0a44d8e91cb72aaaf28553a/greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3", size = 270027 }, + { url = "https://files.pythonhosted.org/packages/f9/74/f66de2785880293780eebd18a2958aeea7cbe7814af1ccef634f4701f846/greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42", size = 634822 }, + { url = "https://files.pythonhosted.org/packages/68/23/acd9ca6bc412b02b8aa755e47b16aafbe642dde0ad2f929f836e57a7949c/greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f", size = 646866 }, + { url = "https://files.pythonhosted.org/packages/a9/ab/562beaf8a53dc9f6b2459f200e7bc226bb07e51862a66351d8b7817e3efd/greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437", size = 641985 }, + { url = "https://files.pythonhosted.org/packages/03/d3/1006543621f16689f6dc75f6bcf06e3c23e044c26fe391c16c253623313e/greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145", size = 641268 }, + { url = "https://files.pythonhosted.org/packages/2f/c1/ad71ce1b5f61f900593377b3f77b39408bce5dc96754790311b49869e146/greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c", size = 597376 }, + { url = "https://files.pythonhosted.org/packages/f7/ff/183226685b478544d61d74804445589e069d00deb8ddef042699733950c7/greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e", size = 1123359 }, + { url = "https://files.pythonhosted.org/packages/c0/8b/9b3b85a89c22f55f315908b94cd75ab5fed5973f7393bbef000ca8b2c5c1/greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e", size = 1147458 }, + { url = "https://files.pythonhosted.org/packages/b8/1c/248fadcecd1790b0ba793ff81fa2375c9ad6442f4c748bf2cc2e6563346a/greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c", size = 281131 }, + { url = "https://files.pythonhosted.org/packages/ae/02/e7d0aef2354a38709b764df50b2b83608f0621493e47f47694eb80922822/greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22", size = 298306 }, +] + +[[package]] +name = "grpcio" +version = "1.68.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/ec/b76ff6d86bdfd1737a5ec889394b54c18b1ec3832d91041e25023fbcb67d/grpcio-1.68.1.tar.gz", hash = "sha256:44a8502dd5de653ae6a73e2de50a401d84184f0331d0ac3daeb044e66d5c5054", size = 12694654 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/88/d1ac9676a0809e3efec154d45246474ec12a4941686da71ffb3d34190294/grpcio-1.68.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:d35740e3f45f60f3c37b1e6f2f4702c23867b9ce21c6410254c9c682237da68d", size = 5171054 }, + { url = "https://files.pythonhosted.org/packages/ec/cb/94ca41e100201fee8876a4b44d64e43ac7405929909afe1fa943d65b25ef/grpcio-1.68.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d99abcd61760ebb34bdff37e5a3ba333c5cc09feda8c1ad42547bea0416ada78", size = 11078566 }, + { url = "https://files.pythonhosted.org/packages/d5/b0/ad4c66f2e3181b4eab99885686c960c403ae2300bacfe427526282facc07/grpcio-1.68.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:f8261fa2a5f679abeb2a0a93ad056d765cdca1c47745eda3f2d87f874ff4b8c9", size = 5690039 }, + { url = "https://files.pythonhosted.org/packages/67/1e/f5d3410674d021831c9fef2d1d7ca2357b08d09c840ad4e054ea8ffc302e/grpcio-1.68.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0feb02205a27caca128627bd1df4ee7212db051019a9afa76f4bb6a1a80ca95e", size = 6317470 }, + { url = "https://files.pythonhosted.org/packages/91/93/701d5f33b163a621c8f2d4453f9e22f6c14e996baed54118d0dea93fc8c7/grpcio-1.68.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919d7f18f63bcad3a0f81146188e90274fde800a94e35d42ffe9eadf6a9a6330", size = 5941884 }, + { url = "https://files.pythonhosted.org/packages/67/44/06917ffaa35ca463b93dde60f324015fe4192312b0f4dd0faec061e7ca7f/grpcio-1.68.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:963cc8d7d79b12c56008aabd8b457f400952dbea8997dd185f155e2f228db079", size = 6646332 }, + { url = "https://files.pythonhosted.org/packages/d4/94/074db039532687ec8ef07ebbcc747c46547c94329016e22b97d97b9e5f3b/grpcio-1.68.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ccf2ebd2de2d6661e2520dae293298a3803a98ebfc099275f113ce1f6c2a80f1", size = 6212515 }, + { url = "https://files.pythonhosted.org/packages/c5/f2/0c939264c36c6038fae1732a2a3e01a7075ba171a2154d86842ee0ac9b0a/grpcio-1.68.1-cp310-cp310-win32.whl", hash = "sha256:2cc1fd04af8399971bcd4f43bd98c22d01029ea2e56e69c34daf2bf8470e47f5", size = 3650459 }, + { url = "https://files.pythonhosted.org/packages/b6/90/b0e9278e88f747879d13b79fb893c9acb381fb90541ad9e416c7816c5eaf/grpcio-1.68.1-cp310-cp310-win_amd64.whl", hash = "sha256:ee2e743e51cb964b4975de572aa8fb95b633f496f9fcb5e257893df3be854746", size = 4399144 }, + { url = "https://files.pythonhosted.org/packages/fe/0d/fde5a5777d65696c39bb3e622fe1239dd0a878589bf6c5066980e7d19154/grpcio-1.68.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:55857c71641064f01ff0541a1776bfe04a59db5558e82897d35a7793e525774c", size = 5180919 }, + { url = "https://files.pythonhosted.org/packages/07/fd/e5fa75b5ddf5d9f16606196973f9c2b4b1adf5a1735117eb7129fc33d2ec/grpcio-1.68.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4b177f5547f1b995826ef529d2eef89cca2f830dd8b2c99ffd5fde4da734ba73", size = 11150922 }, + { url = "https://files.pythonhosted.org/packages/86/1e/aaf5a1dae87fe47f277c5a1be72b31d2c209d095bebb0ce1d2df5cb8779c/grpcio-1.68.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:3522c77d7e6606d6665ec8d50e867f13f946a4e00c7df46768f1c85089eae515", size = 5685685 }, + { url = "https://files.pythonhosted.org/packages/a9/69/c4fdf87d5c5696207e2ed232e4bdde656d8c99ba91f361927f3f06aa41ca/grpcio-1.68.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d1fae6bbf0816415b81db1e82fb3bf56f7857273c84dcbe68cbe046e58e1ccd", size = 6316535 }, + { url = "https://files.pythonhosted.org/packages/6f/c6/539660516ea7db7bc3d39e07154512ae807961b14ec6b5b0c58d15657ff1/grpcio-1.68.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:298ee7f80e26f9483f0b6f94cc0a046caf54400a11b644713bb5b3d8eb387600", size = 5939920 }, + { url = "https://files.pythonhosted.org/packages/38/f3/97a74dc4dd95bf195168d6da2ca4731ab7d3d0b03078f2833b4ff9c4f48f/grpcio-1.68.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cbb5780e2e740b6b4f2d208e90453591036ff80c02cc605fea1af8e6fc6b1bbe", size = 6644770 }, + { url = "https://files.pythonhosted.org/packages/cb/36/79a5e04073e58106aff442509a0c459151fa4f43202395db3eb8f77b78e9/grpcio-1.68.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ddda1aa22495d8acd9dfbafff2866438d12faec4d024ebc2e656784d96328ad0", size = 6211743 }, + { url = "https://files.pythonhosted.org/packages/73/0f/2250f4a0de1a0bec0726c47a021cbf71af6105f512ecaf67703e2eb1ad2f/grpcio-1.68.1-cp311-cp311-win32.whl", hash = "sha256:b33bd114fa5a83f03ec6b7b262ef9f5cac549d4126f1dc702078767b10c46ed9", size = 3650734 }, + { url = "https://files.pythonhosted.org/packages/4b/29/061c93a35f498238dc35eb8fb039ce168aa99cac2f0f1ce0c8a0a4bdb274/grpcio-1.68.1-cp311-cp311-win_amd64.whl", hash = "sha256:7f20ebec257af55694d8f993e162ddf0d36bd82d4e57f74b31c67b3c6d63d8b2", size = 4400816 }, + { url = "https://files.pythonhosted.org/packages/f5/15/674a1468fef234fa996989509bbdfc0d695878cbb385b9271f5d690d5cd3/grpcio-1.68.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:8829924fffb25386995a31998ccbbeaa7367223e647e0122043dfc485a87c666", size = 5148351 }, + { url = "https://files.pythonhosted.org/packages/62/f5/edce368682d6d0b3573b883b134df022a44b1c888ea416dd7d78d480ab24/grpcio-1.68.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3aed6544e4d523cd6b3119b0916cef3d15ef2da51e088211e4d1eb91a6c7f4f1", size = 11127559 }, + { url = "https://files.pythonhosted.org/packages/ce/14/a6fde3114eafd9e4e345d1ebd0291c544d83b22f0554b1678a2968ae39e1/grpcio-1.68.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:4efac5481c696d5cb124ff1c119a78bddbfdd13fc499e3bc0ca81e95fc573684", size = 5645221 }, + { url = "https://files.pythonhosted.org/packages/21/21/d1865bd6a22f9a26217e4e1b35f9105f7a0cdfb7a5fffe8be48e1a1afafc/grpcio-1.68.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ab2d912ca39c51f46baf2a0d92aa265aa96b2443266fc50d234fa88bf877d8e", size = 6292270 }, + { url = "https://files.pythonhosted.org/packages/3a/f6/19798be6c3515a7b1fb9570198c91710472e2eb21f1900109a76834829e3/grpcio-1.68.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c87ce2a97434dffe7327a4071839ab8e8bffd0054cc74cbe971fba98aedd60", size = 5905978 }, + { url = "https://files.pythonhosted.org/packages/9b/43/c3670a657445cd55be1246f64dbc3a6a33cab0f0141c5836df2e04f794c8/grpcio-1.68.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e4842e4872ae4ae0f5497bf60a0498fa778c192cc7a9e87877abd2814aca9475", size = 6630444 }, + { url = "https://files.pythonhosted.org/packages/80/69/fbbebccffd266bea4268b685f3e8e03613405caba69e93125dc783036465/grpcio-1.68.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:255b1635b0ed81e9f91da4fcc8d43b7ea5520090b9a9ad9340d147066d1d3613", size = 6200324 }, + { url = "https://files.pythonhosted.org/packages/65/5c/27a26c21916f94f0c1585111974a5d5a41d8420dcb42c2717ee514c97a97/grpcio-1.68.1-cp312-cp312-win32.whl", hash = "sha256:7dfc914cc31c906297b30463dde0b9be48e36939575eaf2a0a22a8096e69afe5", size = 3638381 }, + { url = "https://files.pythonhosted.org/packages/a3/ba/ba6b65ccc93c7df1031c6b41e45b79a5a37e46b81d816bb3ea68ba476d77/grpcio-1.68.1-cp312-cp312-win_amd64.whl", hash = "sha256:a0c8ddabef9c8f41617f213e527254c41e8b96ea9d387c632af878d05db9229c", size = 4389959 }, + { url = "https://files.pythonhosted.org/packages/37/1a/15ccc08da339a5536690e6f877963422a5abf3f6dfeed96b3175f5c816b9/grpcio-1.68.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:a47faedc9ea2e7a3b6569795c040aae5895a19dde0c728a48d3c5d7995fda385", size = 5149822 }, + { url = "https://files.pythonhosted.org/packages/bc/fe/91bb4b160cd251d5b5ee722e6342355f76d1ffe176c50a6ef0e8256fbb47/grpcio-1.68.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:390eee4225a661c5cd133c09f5da1ee3c84498dc265fd292a6912b65c421c78c", size = 11085016 }, + { url = "https://files.pythonhosted.org/packages/55/2d/0bb2478410f5896da1090b9f43c2979dd72e7e97d10bc223bfbdddcf8eca/grpcio-1.68.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:66a24f3d45c33550703f0abb8b656515b0ab777970fa275693a2f6dc8e35f1c1", size = 5645634 }, + { url = "https://files.pythonhosted.org/packages/f5/6c/e2d22d963b695f87a09965246beb1c3224b09ffc666fc0b285820926499a/grpcio-1.68.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c08079b4934b0bf0a8847f42c197b1d12cba6495a3d43febd7e99ecd1cdc8d54", size = 6291096 }, + { url = "https://files.pythonhosted.org/packages/6f/f6/21d9204e2c4c0804ad72be8c830c44f0e1355e649c173f87508b7f0e5488/grpcio-1.68.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8720c25cd9ac25dd04ee02b69256d0ce35bf8a0f29e20577427355272230965a", size = 5906528 }, + { url = "https://files.pythonhosted.org/packages/39/2a/bf6ae4fef13755ca236d587d630b82207cfad43cf956870adead97fd1ef1/grpcio-1.68.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:04cfd68bf4f38f5bb959ee2361a7546916bd9a50f78617a346b3aeb2b42e2161", size = 6634215 }, + { url = "https://files.pythonhosted.org/packages/5b/83/9c96a6adfbea5e8a9ed408410c0259942713be64173b8816c7bf6ac2d830/grpcio-1.68.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c28848761a6520c5c6071d2904a18d339a796ebe6b800adc8b3f474c5ce3c3ad", size = 6200750 }, + { url = "https://files.pythonhosted.org/packages/b4/3e/af42f87759c6301c4fed894b3dd801b13162ba1d8e2942412e788ac749eb/grpcio-1.68.1-cp313-cp313-win32.whl", hash = "sha256:77d65165fc35cff6e954e7fd4229e05ec76102d4406d4576528d3a3635fc6172", size = 3637594 }, + { url = "https://files.pythonhosted.org/packages/7e/d1/3bef33a3d5d26d4ea9284e1b464f481d6d21ed8ae1c3da381b05f62c701d/grpcio-1.68.1-cp313-cp313-win_amd64.whl", hash = "sha256:a8040f85dcb9830d8bbb033ae66d272614cec6faceee88d37a88a9bd1a7a704e", size = 4391184 }, + { url = "https://files.pythonhosted.org/packages/c7/44/8ad69230a2ecb248d0cb1e46c7b14a9e5625e61961f5118127e726c6dfa3/grpcio-1.68.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:cb400138e73969eb5e0535d1d06cae6a6f7a15f2cc74add320e2130b8179211a", size = 5171490 }, + { url = "https://files.pythonhosted.org/packages/28/a8/21f4a3d13c4a940442aaa691dd4883768f2d8f5733ed52ac335b05b80a6a/grpcio-1.68.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a1b988b40f2fd9de5c820f3a701a43339d8dcf2cb2f1ca137e2c02671cc83ac1", size = 11144164 }, + { url = "https://files.pythonhosted.org/packages/20/e9/ad4a4ebbee59994717a8cd0d43810d7838e48ff879680cb512054464a731/grpcio-1.68.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:96f473cdacfdd506008a5d7579c9f6a7ff245a9ade92c3c0265eb76cc591914f", size = 5688618 }, + { url = "https://files.pythonhosted.org/packages/63/a6/e9eea6ea8d51e9bcb3a1ceadf696d099ff9f822d92a4b872f4c7f42dc3f8/grpcio-1.68.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:37ea3be171f3cf3e7b7e412a98b77685eba9d4fd67421f4a34686a63a65d99f9", size = 6317135 }, + { url = "https://files.pythonhosted.org/packages/f7/2f/44e2f3199565da84d58df5e26ec68577ba8c1f8a19b1c8413919f75df845/grpcio-1.68.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ceb56c4285754e33bb3c2fa777d055e96e6932351a3082ce3559be47f8024f0", size = 5941760 }, + { url = "https://files.pythonhosted.org/packages/51/cf/f00e13b50db135dace2351fbdcefef74eeb847cdf1eef85ac0a8c06044f5/grpcio-1.68.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:dffd29a2961f3263a16d73945b57cd44a8fd0b235740cb14056f0612329b345e", size = 6647728 }, + { url = "https://files.pythonhosted.org/packages/ee/a3/35d5b641d80696feee278166c5fea013fad65673dca6abf2245174beb179/grpcio-1.68.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:025f790c056815b3bf53da850dd70ebb849fd755a4b1ac822cb65cd631e37d43", size = 6212372 }, + { url = "https://files.pythonhosted.org/packages/28/38/ec2c6dde7274fca6ecfc39cde8ae8b437871c9a90679f72704d7e4fae33f/grpcio-1.68.1-cp39-cp39-win32.whl", hash = "sha256:1098f03dedc3b9810810568060dea4ac0822b4062f537b0f53aa015269be0a76", size = 3649692 }, + { url = "https://files.pythonhosted.org/packages/58/fb/73d7686fd51955de6fe0d635404eca5a9efbee415f04c1c572b5becd010b/grpcio-1.68.1-cp39-cp39-win_amd64.whl", hash = "sha256:334ab917792904245a028f10e803fcd5b6f36a7b2173a820c0b5b076555825e1", size = 4398968 }, +] + +[[package]] +name = "gunicorn" +version = "23.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/34/72/9614c465dc206155d93eff0ca20d42e1e35afc533971379482de953521a4/gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec", size = 375031 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/7d/6dac2a6e1eba33ee43f318edbed4ff29151a49b5d37f080aad1e6469bca4/gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d", size = 85029 }, +] + +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, +] + +[[package]] +name = "identify" +version = "2.6.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/5f/05f0d167be94585d502b4adf8c7af31f1dc0b1c7e14f9938a88fdbbcf4a7/identify-2.6.3.tar.gz", hash = "sha256:62f5dae9b5fef52c84cc188514e9ea4f3f636b1d8799ab5ebc475471f9e47a02", size = 99179 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/f5/09644a3ad803fae9eca8efa17e1f2aef380c7f0b02f7ec4e8d446e51d64a/identify-2.6.3-py2.py3-none-any.whl", hash = "sha256:9edba65473324c2ea9684b1f944fe3191db3345e50b6d04571d10ed164f8d7bd", size = 99049 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "importlib-metadata" +version = "8.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "ipykernel" +version = "6.29.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "appnope", marker = "platform_system == 'Darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/5c/67594cb0c7055dc50814b21731c22a601101ea3b1b50a9a1b090e11f5d0f/ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215", size = 163367 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/5c/368ae6c01c7628438358e6d337c19b05425727fbb221d2a3c4303c372f42/ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5", size = 117173 }, +] + +[[package]] +name = "ipython" +version = "8.18.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/b9/3ba6c45a6df813c09a48bac313c22ff83efa26cbb55011218d925a46e2ad/ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27", size = 5486330 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/6b/d9fdcdef2eb6a23f391251fde8781c38d42acd82abe84d054cb74f7863b0/ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397", size = 808161 }, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234 }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, +] + +[[package]] +name = "jinja2" +version = "3.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/af/92/b3130cbbf5591acf9ade8708c365f3238046ac7cb8ccba6e81abccb0ccff/jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", size = 244674 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, +] + +[[package]] +name = "jupyter-client" +version = "8.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105 }, +] + +[[package]] +name = "jupyter-core" +version = "5.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs" }, + { name = "pywin32", marker = "platform_python_implementation != 'PyPy' and sys_platform == 'win32'" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/11/b56381fa6c3f4cc5d2cf54a7dbf98ad9aa0b339ef7a601d6053538b079a7/jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9", size = 87629 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/fb/108ecd1fe961941959ad0ee4e12ee7b8b1477247f30b1fdfd83ceaf017f0/jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409", size = 28965 }, +] + +[[package]] +name = "linkify-it-py" +version = "2.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uc-micro-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820 }, +] + +[[package]] +name = "locust" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "configargparse" }, + { name = "flask" }, + { name = "flask-cors" }, + { name = "flask-login" }, + { name = "gevent", marker = "python_full_version <= '3.12' or python_full_version > '3.13'" }, + { name = "geventhttpclient" }, + { name = "msgpack" }, + { name = "psutil" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "pyzmq" }, + { name = "requests" }, + { name = "setuptools" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/2e/482d251c300ad9acb2476ab26a2d7956302e4cd58c663f91fa04a37c71f2/locust-2.32.4.tar.gz", hash = "sha256:fd650cbc40842e721668a8d0f7f8224775432b40c63d0a378546b9a9f54b7559", size = 1196827 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/6e/09d99fd6d153d90440f11d3bed3abd07dcfd9eed44b2aae7db355ecdd711/locust-2.32.4-py3-none-any.whl", hash = "sha256:7c5b8767c0d771b5167d5d6b82878622faead74f394eb9cafe8891d89eb36b97", size = 1214885 }, +] + +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[package.optional-dependencies] +linkify = [ + { name = "linkify-it-py" }, +] +plugins = [ + { name = "mdit-py-plugins" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357 }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393 }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732 }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866 }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964 }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977 }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366 }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091 }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065 }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514 }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, + { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344 }, + { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389 }, + { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607 }, + { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728 }, + { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826 }, + { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843 }, + { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219 }, + { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946 }, + { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063 }, + { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/03/a2ecab526543b152300717cf232bb4bb8605b6edb946c845016fa9c9c9fd/mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5", size = 43542 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/f7/7782a043553ee469c1ff49cfa1cdace2d6bf99a1f333cf38676b3ddf30da/mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636", size = 55316 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "msgpack" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/d0/7555686ae7ff5731205df1012ede15dd9d927f6227ea151e901c7406af4f/msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e", size = 167260 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/f9/a892a6038c861fa849b11a2bb0502c07bc698ab6ea53359e5771397d883b/msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd", size = 150428 }, + { url = "https://files.pythonhosted.org/packages/df/7a/d174cc6a3b6bb85556e6a046d3193294a92f9a8e583cdbd46dc8a1d7e7f4/msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d", size = 84131 }, + { url = "https://files.pythonhosted.org/packages/08/52/bf4fbf72f897a23a56b822997a72c16de07d8d56d7bf273242f884055682/msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5", size = 81215 }, + { url = "https://files.pythonhosted.org/packages/02/95/dc0044b439b518236aaf012da4677c1b8183ce388411ad1b1e63c32d8979/msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5", size = 371229 }, + { url = "https://files.pythonhosted.org/packages/ff/75/09081792db60470bef19d9c2be89f024d366b1e1973c197bb59e6aabc647/msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e", size = 378034 }, + { url = "https://files.pythonhosted.org/packages/32/d3/c152e0c55fead87dd948d4b29879b0f14feeeec92ef1fd2ec21b107c3f49/msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b", size = 363070 }, + { url = "https://files.pythonhosted.org/packages/d9/2c/82e73506dd55f9e43ac8aa007c9dd088c6f0de2aa19e8f7330e6a65879fc/msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f", size = 359863 }, + { url = "https://files.pythonhosted.org/packages/cb/a0/3d093b248837094220e1edc9ec4337de3443b1cfeeb6e0896af8ccc4cc7a/msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68", size = 368166 }, + { url = "https://files.pythonhosted.org/packages/e4/13/7646f14f06838b406cf5a6ddbb7e8dc78b4996d891ab3b93c33d1ccc8678/msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b", size = 370105 }, + { url = "https://files.pythonhosted.org/packages/67/fa/dbbd2443e4578e165192dabbc6a22c0812cda2649261b1264ff515f19f15/msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044", size = 68513 }, + { url = "https://files.pythonhosted.org/packages/24/ce/c2c8fbf0ded750cb63cbcbb61bc1f2dfd69e16dca30a8af8ba80ec182dcd/msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f", size = 74687 }, + { url = "https://files.pythonhosted.org/packages/b7/5e/a4c7154ba65d93be91f2f1e55f90e76c5f91ccadc7efc4341e6f04c8647f/msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7", size = 150803 }, + { url = "https://files.pythonhosted.org/packages/60/c2/687684164698f1d51c41778c838d854965dd284a4b9d3a44beba9265c931/msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa", size = 84343 }, + { url = "https://files.pythonhosted.org/packages/42/ae/d3adea9bb4a1342763556078b5765e666f8fdf242e00f3f6657380920972/msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701", size = 81408 }, + { url = "https://files.pythonhosted.org/packages/dc/17/6313325a6ff40ce9c3207293aee3ba50104aed6c2c1559d20d09e5c1ff54/msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6", size = 396096 }, + { url = "https://files.pythonhosted.org/packages/a8/a1/ad7b84b91ab5a324e707f4c9761633e357820b011a01e34ce658c1dda7cc/msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59", size = 403671 }, + { url = "https://files.pythonhosted.org/packages/bb/0b/fd5b7c0b308bbf1831df0ca04ec76fe2f5bf6319833646b0a4bd5e9dc76d/msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0", size = 387414 }, + { url = "https://files.pythonhosted.org/packages/f0/03/ff8233b7c6e9929a1f5da3c7860eccd847e2523ca2de0d8ef4878d354cfa/msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e", size = 383759 }, + { url = "https://files.pythonhosted.org/packages/1f/1b/eb82e1fed5a16dddd9bc75f0854b6e2fe86c0259c4353666d7fab37d39f4/msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6", size = 394405 }, + { url = "https://files.pythonhosted.org/packages/90/2e/962c6004e373d54ecf33d695fb1402f99b51832631e37c49273cc564ffc5/msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5", size = 396041 }, + { url = "https://files.pythonhosted.org/packages/f8/20/6e03342f629474414860c48aeffcc2f7f50ddaf351d95f20c3f1c67399a8/msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88", size = 68538 }, + { url = "https://files.pythonhosted.org/packages/aa/c4/5a582fc9a87991a3e6f6800e9bb2f3c82972912235eb9539954f3e9997c7/msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788", size = 74871 }, + { url = "https://files.pythonhosted.org/packages/e1/d6/716b7ca1dbde63290d2973d22bbef1b5032ca634c3ff4384a958ec3f093a/msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d", size = 152421 }, + { url = "https://files.pythonhosted.org/packages/70/da/5312b067f6773429cec2f8f08b021c06af416bba340c912c2ec778539ed6/msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2", size = 85277 }, + { url = "https://files.pythonhosted.org/packages/28/51/da7f3ae4462e8bb98af0d5bdf2707f1b8c65a0d4f496e46b6afb06cbc286/msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420", size = 82222 }, + { url = "https://files.pythonhosted.org/packages/33/af/dc95c4b2a49cff17ce47611ca9ba218198806cad7796c0b01d1e332c86bb/msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2", size = 392971 }, + { url = "https://files.pythonhosted.org/packages/f1/54/65af8de681fa8255402c80eda2a501ba467921d5a7a028c9c22a2c2eedb5/msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39", size = 401403 }, + { url = "https://files.pythonhosted.org/packages/97/8c/e333690777bd33919ab7024269dc3c41c76ef5137b211d776fbb404bfead/msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f", size = 385356 }, + { url = "https://files.pythonhosted.org/packages/57/52/406795ba478dc1c890559dd4e89280fa86506608a28ccf3a72fbf45df9f5/msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247", size = 383028 }, + { url = "https://files.pythonhosted.org/packages/e7/69/053b6549bf90a3acadcd8232eae03e2fefc87f066a5b9fbb37e2e608859f/msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c", size = 391100 }, + { url = "https://files.pythonhosted.org/packages/23/f0/d4101d4da054f04274995ddc4086c2715d9b93111eb9ed49686c0f7ccc8a/msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b", size = 394254 }, + { url = "https://files.pythonhosted.org/packages/1c/12/cf07458f35d0d775ff3a2dc5559fa2e1fcd06c46f1ef510e594ebefdca01/msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b", size = 69085 }, + { url = "https://files.pythonhosted.org/packages/73/80/2708a4641f7d553a63bc934a3eb7214806b5b39d200133ca7f7afb0a53e8/msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f", size = 75347 }, + { url = "https://files.pythonhosted.org/packages/c8/b0/380f5f639543a4ac413e969109978feb1f3c66e931068f91ab6ab0f8be00/msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf", size = 151142 }, + { url = "https://files.pythonhosted.org/packages/c8/ee/be57e9702400a6cb2606883d55b05784fada898dfc7fd12608ab1fdb054e/msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330", size = 84523 }, + { url = "https://files.pythonhosted.org/packages/7e/3a/2919f63acca3c119565449681ad08a2f84b2171ddfcff1dba6959db2cceb/msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734", size = 81556 }, + { url = "https://files.pythonhosted.org/packages/7c/43/a11113d9e5c1498c145a8925768ea2d5fce7cbab15c99cda655aa09947ed/msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e", size = 392105 }, + { url = "https://files.pythonhosted.org/packages/2d/7b/2c1d74ca6c94f70a1add74a8393a0138172207dc5de6fc6269483519d048/msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca", size = 399979 }, + { url = "https://files.pythonhosted.org/packages/82/8c/cf64ae518c7b8efc763ca1f1348a96f0e37150061e777a8ea5430b413a74/msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915", size = 383816 }, + { url = "https://files.pythonhosted.org/packages/69/86/a847ef7a0f5ef3fa94ae20f52a4cacf596a4e4a010197fbcc27744eb9a83/msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d", size = 380973 }, + { url = "https://files.pythonhosted.org/packages/aa/90/c74cf6e1126faa93185d3b830ee97246ecc4fe12cf9d2d31318ee4246994/msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434", size = 387435 }, + { url = "https://files.pythonhosted.org/packages/7a/40/631c238f1f338eb09f4acb0f34ab5862c4e9d7eda11c1b685471a4c5ea37/msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c", size = 399082 }, + { url = "https://files.pythonhosted.org/packages/e9/1b/fa8a952be252a1555ed39f97c06778e3aeb9123aa4cccc0fd2acd0b4e315/msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc", size = 69037 }, + { url = "https://files.pythonhosted.org/packages/b6/bc/8bd826dd03e022153bfa1766dcdec4976d6c818865ed54223d71f07862b3/msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f", size = 75140 }, + { url = "https://files.pythonhosted.org/packages/f7/3b/544a5c5886042b80e1f4847a4757af3430f60d106d8d43bb7be72c9e9650/msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1", size = 150713 }, + { url = "https://files.pythonhosted.org/packages/93/af/d63f25bcccd3d6f06fd518ba4a321f34a4370c67b579ca5c70b4a37721b4/msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48", size = 84277 }, + { url = "https://files.pythonhosted.org/packages/92/9b/5c0dfb0009b9f96328664fecb9f8e4e9c8a1ae919e6d53986c1b813cb493/msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c", size = 81357 }, + { url = "https://files.pythonhosted.org/packages/d1/7c/3a9ee6ec9fc3e47681ad39b4d344ee04ff20a776b594fba92d88d8b68356/msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468", size = 371256 }, + { url = "https://files.pythonhosted.org/packages/f7/0a/8a213cecea7b731c540f25212ba5f9a818f358237ac51a44d448bd753690/msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74", size = 377868 }, + { url = "https://files.pythonhosted.org/packages/1b/94/a82b0db0981e9586ed5af77d6cfb343da05d7437dceaae3b35d346498110/msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846", size = 363370 }, + { url = "https://files.pythonhosted.org/packages/93/fc/6c7f0dcc1c913e14861e16eaf494c07fc1dde454ec726ff8cebcf348ae53/msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346", size = 358970 }, + { url = "https://files.pythonhosted.org/packages/1f/c6/e4a04c0089deace870dabcdef5c9f12798f958e2e81d5012501edaff342f/msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b", size = 366358 }, + { url = "https://files.pythonhosted.org/packages/b6/54/7d8317dac590cf16b3e08e3fb74d2081e5af44eb396f0effa13f17777f30/msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8", size = 370336 }, + { url = "https://files.pythonhosted.org/packages/dc/6f/a5a1f43b6566831e9630e5bc5d86034a8884386297302be128402555dde1/msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd", size = 68683 }, + { url = "https://files.pythonhosted.org/packages/5f/e8/2162621e18dbc36e2bc8492fd0e97b3975f5d89fe0472ae6d5f7fbdd8cf7/msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325", size = 74787 }, +] + +[[package]] +name = "multidict" +version = "6.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/be/504b89a5e9ca731cd47487e91c469064f8ae5af93b7259758dcfc2b9c848/multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a", size = 64002 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/68/259dee7fd14cf56a17c554125e534f6274c2860159692a414d0b402b9a6d/multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60", size = 48628 }, + { url = "https://files.pythonhosted.org/packages/50/79/53ba256069fe5386a4a9e80d4e12857ced9de295baf3e20c68cdda746e04/multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1", size = 29327 }, + { url = "https://files.pythonhosted.org/packages/ff/10/71f1379b05b196dae749b5ac062e87273e3f11634f447ebac12a571d90ae/multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53", size = 29689 }, + { url = "https://files.pythonhosted.org/packages/71/45/70bac4f87438ded36ad4793793c0095de6572d433d98575a5752629ef549/multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5", size = 126639 }, + { url = "https://files.pythonhosted.org/packages/80/cf/17f35b3b9509b4959303c05379c4bfb0d7dd05c3306039fc79cf035bbac0/multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581", size = 134315 }, + { url = "https://files.pythonhosted.org/packages/ef/1f/652d70ab5effb33c031510a3503d4d6efc5ec93153562f1ee0acdc895a57/multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56", size = 129471 }, + { url = "https://files.pythonhosted.org/packages/a6/64/2dd6c4c681688c0165dea3975a6a4eab4944ea30f35000f8b8af1df3148c/multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429", size = 124585 }, + { url = "https://files.pythonhosted.org/packages/87/56/e6ee5459894c7e554b57ba88f7257dc3c3d2d379cb15baaa1e265b8c6165/multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748", size = 116957 }, + { url = "https://files.pythonhosted.org/packages/36/9e/616ce5e8d375c24b84f14fc263c7ef1d8d5e8ef529dbc0f1df8ce71bb5b8/multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db", size = 128609 }, + { url = "https://files.pythonhosted.org/packages/8c/4f/4783e48a38495d000f2124020dc96bacc806a4340345211b1ab6175a6cb4/multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056", size = 123016 }, + { url = "https://files.pythonhosted.org/packages/3e/b3/4950551ab8fc39862ba5e9907dc821f896aa829b4524b4deefd3e12945ab/multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76", size = 133542 }, + { url = "https://files.pythonhosted.org/packages/96/4d/f0ce6ac9914168a2a71df117935bb1f1781916acdecbb43285e225b484b8/multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160", size = 130163 }, + { url = "https://files.pythonhosted.org/packages/be/72/17c9f67e7542a49dd252c5ae50248607dfb780bcc03035907dafefb067e3/multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7", size = 126832 }, + { url = "https://files.pythonhosted.org/packages/71/9f/72d719e248cbd755c8736c6d14780533a1606ffb3fbb0fbd77da9f0372da/multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0", size = 26402 }, + { url = "https://files.pythonhosted.org/packages/04/5a/d88cd5d00a184e1ddffc82aa2e6e915164a6d2641ed3606e766b5d2f275a/multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d", size = 28800 }, + { url = "https://files.pythonhosted.org/packages/93/13/df3505a46d0cd08428e4c8169a196131d1b0c4b515c3649829258843dde6/multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6", size = 48570 }, + { url = "https://files.pythonhosted.org/packages/f0/e1/a215908bfae1343cdb72f805366592bdd60487b4232d039c437fe8f5013d/multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156", size = 29316 }, + { url = "https://files.pythonhosted.org/packages/70/0f/6dc70ddf5d442702ed74f298d69977f904960b82368532c88e854b79f72b/multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb", size = 29640 }, + { url = "https://files.pythonhosted.org/packages/d8/6d/9c87b73a13d1cdea30b321ef4b3824449866bd7f7127eceed066ccb9b9ff/multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b", size = 131067 }, + { url = "https://files.pythonhosted.org/packages/cc/1e/1b34154fef373371fd6c65125b3d42ff5f56c7ccc6bfff91b9b3c60ae9e0/multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72", size = 138507 }, + { url = "https://files.pythonhosted.org/packages/fb/e0/0bc6b2bac6e461822b5f575eae85da6aae76d0e2a79b6665d6206b8e2e48/multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304", size = 133905 }, + { url = "https://files.pythonhosted.org/packages/ba/af/73d13b918071ff9b2205fcf773d316e0f8fefb4ec65354bbcf0b10908cc6/multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351", size = 129004 }, + { url = "https://files.pythonhosted.org/packages/74/21/23960627b00ed39643302d81bcda44c9444ebcdc04ee5bedd0757513f259/multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb", size = 121308 }, + { url = "https://files.pythonhosted.org/packages/8b/5c/cf282263ffce4a596ed0bb2aa1a1dddfe1996d6a62d08842a8d4b33dca13/multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3", size = 132608 }, + { url = "https://files.pythonhosted.org/packages/d7/3e/97e778c041c72063f42b290888daff008d3ab1427f5b09b714f5a8eff294/multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399", size = 127029 }, + { url = "https://files.pythonhosted.org/packages/47/ac/3efb7bfe2f3aefcf8d103e9a7162572f01936155ab2f7ebcc7c255a23212/multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423", size = 137594 }, + { url = "https://files.pythonhosted.org/packages/42/9b/6c6e9e8dc4f915fc90a9b7798c44a30773dea2995fdcb619870e705afe2b/multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3", size = 134556 }, + { url = "https://files.pythonhosted.org/packages/1d/10/8e881743b26aaf718379a14ac58572a240e8293a1c9d68e1418fb11c0f90/multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753", size = 130993 }, + { url = "https://files.pythonhosted.org/packages/45/84/3eb91b4b557442802d058a7579e864b329968c8d0ea57d907e7023c677f2/multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80", size = 26405 }, + { url = "https://files.pythonhosted.org/packages/9f/0b/ad879847ecbf6d27e90a6eabb7eff6b62c129eefe617ea45eae7c1f0aead/multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926", size = 28795 }, + { url = "https://files.pythonhosted.org/packages/fd/16/92057c74ba3b96d5e211b553895cd6dc7cc4d1e43d9ab8fafc727681ef71/multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa", size = 48713 }, + { url = "https://files.pythonhosted.org/packages/94/3d/37d1b8893ae79716179540b89fc6a0ee56b4a65fcc0d63535c6f5d96f217/multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436", size = 29516 }, + { url = "https://files.pythonhosted.org/packages/a2/12/adb6b3200c363062f805275b4c1e656be2b3681aada66c80129932ff0bae/multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761", size = 29557 }, + { url = "https://files.pythonhosted.org/packages/47/e9/604bb05e6e5bce1e6a5cf80a474e0f072e80d8ac105f1b994a53e0b28c42/multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e", size = 130170 }, + { url = "https://files.pythonhosted.org/packages/7e/13/9efa50801785eccbf7086b3c83b71a4fb501a4d43549c2f2f80b8787d69f/multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef", size = 134836 }, + { url = "https://files.pythonhosted.org/packages/bf/0f/93808b765192780d117814a6dfcc2e75de6dcc610009ad408b8814dca3ba/multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95", size = 133475 }, + { url = "https://files.pythonhosted.org/packages/d3/c8/529101d7176fe7dfe1d99604e48d69c5dfdcadb4f06561f465c8ef12b4df/multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925", size = 131049 }, + { url = "https://files.pythonhosted.org/packages/ca/0c/fc85b439014d5a58063e19c3a158a889deec399d47b5269a0f3b6a2e28bc/multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966", size = 120370 }, + { url = "https://files.pythonhosted.org/packages/db/46/d4416eb20176492d2258fbd47b4abe729ff3b6e9c829ea4236f93c865089/multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305", size = 125178 }, + { url = "https://files.pythonhosted.org/packages/5b/46/73697ad7ec521df7de5531a32780bbfd908ded0643cbe457f981a701457c/multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2", size = 119567 }, + { url = "https://files.pythonhosted.org/packages/cd/ed/51f060e2cb0e7635329fa6ff930aa5cffa17f4c7f5c6c3ddc3500708e2f2/multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2", size = 129822 }, + { url = "https://files.pythonhosted.org/packages/df/9e/ee7d1954b1331da3eddea0c4e08d9142da5f14b1321c7301f5014f49d492/multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6", size = 128656 }, + { url = "https://files.pythonhosted.org/packages/77/00/8538f11e3356b5d95fa4b024aa566cde7a38aa7a5f08f4912b32a037c5dc/multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", size = 125360 }, + { url = "https://files.pythonhosted.org/packages/be/05/5d334c1f2462d43fec2363cd00b1c44c93a78c3925d952e9a71caf662e96/multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", size = 26382 }, + { url = "https://files.pythonhosted.org/packages/a3/bf/f332a13486b1ed0496d624bcc7e8357bb8053823e8cd4b9a18edc1d97e73/multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", size = 28529 }, + { url = "https://files.pythonhosted.org/packages/22/67/1c7c0f39fe069aa4e5d794f323be24bf4d33d62d2a348acdb7991f8f30db/multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008", size = 48771 }, + { url = "https://files.pythonhosted.org/packages/3c/25/c186ee7b212bdf0df2519eacfb1981a017bda34392c67542c274651daf23/multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f", size = 29533 }, + { url = "https://files.pythonhosted.org/packages/67/5e/04575fd837e0958e324ca035b339cea174554f6f641d3fb2b4f2e7ff44a2/multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28", size = 29595 }, + { url = "https://files.pythonhosted.org/packages/d3/b2/e56388f86663810c07cfe4a3c3d87227f3811eeb2d08450b9e5d19d78876/multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b", size = 130094 }, + { url = "https://files.pythonhosted.org/packages/6c/ee/30ae9b4186a644d284543d55d491fbd4239b015d36b23fea43b4c94f7052/multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c", size = 134876 }, + { url = "https://files.pythonhosted.org/packages/84/c7/70461c13ba8ce3c779503c70ec9d0345ae84de04521c1f45a04d5f48943d/multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3", size = 133500 }, + { url = "https://files.pythonhosted.org/packages/4a/9f/002af221253f10f99959561123fae676148dd730e2daa2cd053846a58507/multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44", size = 131099 }, + { url = "https://files.pythonhosted.org/packages/82/42/d1c7a7301d52af79d88548a97e297f9d99c961ad76bbe6f67442bb77f097/multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2", size = 120403 }, + { url = "https://files.pythonhosted.org/packages/68/f3/471985c2c7ac707547553e8f37cff5158030d36bdec4414cb825fbaa5327/multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3", size = 125348 }, + { url = "https://files.pythonhosted.org/packages/67/2c/e6df05c77e0e433c214ec1d21ddd203d9a4770a1f2866a8ca40a545869a0/multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa", size = 119673 }, + { url = "https://files.pythonhosted.org/packages/c5/cd/bc8608fff06239c9fb333f9db7743a1b2eafe98c2666c9a196e867a3a0a4/multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa", size = 129927 }, + { url = "https://files.pythonhosted.org/packages/44/8e/281b69b7bc84fc963a44dc6e0bbcc7150e517b91df368a27834299a526ac/multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4", size = 128711 }, + { url = "https://files.pythonhosted.org/packages/12/a4/63e7cd38ed29dd9f1881d5119f272c898ca92536cdb53ffe0843197f6c85/multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6", size = 125519 }, + { url = "https://files.pythonhosted.org/packages/38/e0/4f5855037a72cd8a7a2f60a3952d9aa45feedb37ae7831642102604e8a37/multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81", size = 26426 }, + { url = "https://files.pythonhosted.org/packages/7e/a5/17ee3a4db1e310b7405f5d25834460073a8ccd86198ce044dfaf69eac073/multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774", size = 28531 }, + { url = "https://files.pythonhosted.org/packages/e7/c9/9e153a6572b38ac5ff4434113af38acf8d5e9957897cdb1f513b3d6614ed/multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c", size = 48550 }, + { url = "https://files.pythonhosted.org/packages/76/f5/79565ddb629eba6c7f704f09a09df085c8dc04643b12506f10f718cee37a/multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1", size = 29298 }, + { url = "https://files.pythonhosted.org/packages/60/1b/9851878b704bc98e641a3e0bce49382ae9e05743dac6d97748feb5b7baba/multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c", size = 29641 }, + { url = "https://files.pythonhosted.org/packages/89/87/d451d45aab9e422cb0fb2f7720c31a4c1d3012c740483c37f642eba568fb/multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c", size = 126202 }, + { url = "https://files.pythonhosted.org/packages/fa/b4/27cbe9f3e2e469359887653f2e45470272eef7295139916cc21107c6b48c/multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f", size = 133925 }, + { url = "https://files.pythonhosted.org/packages/4d/a3/afc841899face8adfd004235ce759a37619f6ec99eafd959650c5ce4df57/multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875", size = 129039 }, + { url = "https://files.pythonhosted.org/packages/5e/41/0d0fb18c1ad574f807196f5f3d99164edf9de3e169a58c6dc2d6ed5742b9/multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255", size = 124072 }, + { url = "https://files.pythonhosted.org/packages/00/22/defd7a2e71a44e6e5b9a5428f972e5b572e7fe28e404dfa6519bbf057c93/multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30", size = 116532 }, + { url = "https://files.pythonhosted.org/packages/91/25/f7545102def0b1d456ab6449388eed2dfd822debba1d65af60194904a23a/multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057", size = 128173 }, + { url = "https://files.pythonhosted.org/packages/45/79/3dbe8d35fc99f5ea610813a72ab55f426cb9cf482f860fa8496e5409be11/multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657", size = 122654 }, + { url = "https://files.pythonhosted.org/packages/97/cb/209e735eeab96e1b160825b5d0b36c56d3862abff828fc43999bb957dcad/multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28", size = 133197 }, + { url = "https://files.pythonhosted.org/packages/e4/3a/a13808a7ada62808afccea67837a79d00ad6581440015ef00f726d064c2d/multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972", size = 129754 }, + { url = "https://files.pythonhosted.org/packages/77/dd/8540e139eafb240079242da8f8ffdf9d3f4b4ad1aac5a786cd4050923783/multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43", size = 126402 }, + { url = "https://files.pythonhosted.org/packages/86/99/e82e1a275d8b1ea16d3a251474262258dbbe41c05cce0c01bceda1fc8ea5/multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada", size = 26421 }, + { url = "https://files.pythonhosted.org/packages/86/1c/9fa630272355af7e4446a2c7550c259f11ee422ab2d30ff90a0a71cf3d9e/multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a", size = 28791 }, + { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195 }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.29.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "importlib-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/8e/b886a5e9861afa188d1fe671fb96ff9a1d90a23d57799331e137cc95d573/opentelemetry_api-1.29.0.tar.gz", hash = "sha256:d04a6cf78aad09614f52964ecb38021e248f5714dc32c2e0d8fd99517b4d69cf", size = 62900 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/53/5249ea860d417a26a3a6f1bdedfc0748c4f081a3adaec3d398bc0f7c6a71/opentelemetry_api-1.29.0-py3-none-any.whl", hash = "sha256:5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8", size = 64304 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.29.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/58/f7fd7eaf592b2521999a4271ab3ce1c82fe37fe9b0dc25c348398d95d66a/opentelemetry_exporter_otlp_proto_common-1.29.0.tar.gz", hash = "sha256:e7c39b5dbd1b78fe199e40ddfe477e6983cb61aa74ba836df09c3869a3e3e163", size = 19133 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/75/7609bda3d72bf307839570b226180513e854c01443ebe265ed732a4980fc/opentelemetry_exporter_otlp_proto_common-1.29.0-py3-none-any.whl", hash = "sha256:a9d7376c06b4da9cf350677bcddb9618ed4b8255c3f6476975f5e38274ecd3aa", size = 18459 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.29.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/aa/b3f2190613141f35fe15145bf438334fdd1eac8aeeee4f7ecbc887999443/opentelemetry_exporter_otlp_proto_grpc-1.29.0.tar.gz", hash = "sha256:3d324d07d64574d72ed178698de3d717f62a059a93b6b7685ee3e303384e73ea", size = 26224 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/de/4b4127a25d1594851d99032f3a9acb09cb512d11edec713410fb906607f4/opentelemetry_exporter_otlp_proto_grpc-1.29.0-py3-none-any.whl", hash = "sha256:5a2a3a741a2543ed162676cf3eefc2b4150e6f4f0a193187afb0d0e65039c69c", size = 18520 }, +] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/2e/2e59a7cb636dc394bd7cf1758ada5e8ed87590458ca6bb2f9c26e0243847/opentelemetry_instrumentation-0.50b0.tar.gz", hash = "sha256:7d98af72de8dec5323e5202e46122e5f908592b22c6d24733aad619f07d82979", size = 26539 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/b1/55a77152a83ec8998e520a3a575f44af1020cfe4bdc000b7538583293b85/opentelemetry_instrumentation-0.50b0-py3-none-any.whl", hash = "sha256:b8f9fc8812de36e1c6dffa5bfc6224df258841fb387b6dfe5df15099daa10630", size = 30728 }, +] + +[[package]] +name = "opentelemetry-instrumentation-asgi" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asgiref" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/cc/a7b2fd243c6d2621803092eba62e450071b6752dfe4f64f530bbfd91a328/opentelemetry_instrumentation_asgi-0.50b0.tar.gz", hash = "sha256:3ca4cb5616ae6a3e8ce86e7d5c360a8d8cc8ed722cf3dc8a5e44300774e87d49", size = 24105 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/81/0899c6b56b1023835f266d909250d439174afa0c34ed5944c5021d3da263/opentelemetry_instrumentation_asgi-0.50b0-py3-none-any.whl", hash = "sha256:2ba1297f746e55dec5a17fe825689da0613662fb25c004c3965a6c54b1d5be22", size = 16304 }, +] + +[[package]] +name = "opentelemetry-instrumentation-dbapi" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/94/f6f2c369f75e02c551dfa6ab5818e606f73eca2409930c467fcdb0e5634e/opentelemetry_instrumentation_dbapi-0.50b0.tar.gz", hash = "sha256:2603ca39e216893026c185ca8c44c326c0a9a763d5afff2309bd6195c50b7c49", size = 12613 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/49/40def6cd71a6d248e9e48a731021cb9bfc70e5ec09986826ad29bd44b23c/opentelemetry_instrumentation_dbapi-0.50b0-py3-none-any.whl", hash = "sha256:23a730c3d7372b04b8a9507d2a67c5efbf92ff718eaa002b81ffbaf2b01d270f", size = 11533 }, +] + +[[package]] +name = "opentelemetry-instrumentation-fastapi" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/f8/1917b0b3e414e23c7d71c9a33f0ce020f94bc47d22a30f54ace704e07588/opentelemetry_instrumentation_fastapi-0.50b0.tar.gz", hash = "sha256:16b9181682136da210295def2bb304a32fb9bdee9a935cdc9da43567f7c1149e", size = 19214 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/d6/37784bb30b213e2dd6838b9f96c2940907022c1b75ef1ff18a99afe42433/opentelemetry_instrumentation_fastapi-0.50b0-py3-none-any.whl", hash = "sha256:8f03b738495e4705fbae51a2826389c7369629dace89d0f291c06ffefdff5e52", size = 12079 }, +] + +[[package]] +name = "opentelemetry-instrumentation-sqlite3" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/8a/9a39717588e121f98481561766d0002c41d96935c6ea2b54dd6b5581d1c7/opentelemetry_instrumentation_sqlite3-0.50b0.tar.gz", hash = "sha256:b7c98f7c72f01e3ca6751c2075eebbef8335fc08800ccdf1d97741207cdbe1ba", size = 7718 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/d2/0088c11c29e6942ab24910608ce2a416d2b5e76c3d29702578aadec97e52/opentelemetry_instrumentation_sqlite3-0.50b0-py3-none-any.whl", hash = "sha256:37e030bcc87733f769faf87c81c4de9dc932b74b565a1e19e7d13e17ec120901", size = 8938 }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.29.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/52/fd3b3d79e1b00ad2dcac92db6885e49bedbf7a6828647954e4952d653132/opentelemetry_proto-1.29.0.tar.gz", hash = "sha256:3c136aa293782e9b44978c738fff72877a4b78b5d21a64e879898db7b2d93e5d", size = 34320 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/66/a500e38ee322d89fce61c74bd7769c8ef3bebc6c2f43fda5f3fc3441286d/opentelemetry_proto-1.29.0-py3-none-any.whl", hash = "sha256:495069c6f5495cbf732501cdcd3b7f60fda2b9d3d4255706ca99b7ca8dec53ff", size = 55818 }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.29.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0c/5a/1ed4c3cf6c09f80565fc085f7e8efa0c222712fd2a9412d07424705dcf72/opentelemetry_sdk-1.29.0.tar.gz", hash = "sha256:b0787ce6aade6ab84315302e72bd7a7f2f014b0fb1b7c3295b88afe014ed0643", size = 157229 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/1d/512b86af21795fb463726665e2f61db77d384e8779fdcf4cb0ceec47866d/opentelemetry_sdk-1.29.0-py3-none-any.whl", hash = "sha256:173be3b5d3f8f7d671f20ea37056710217959e774e2749d984355d1f9391a30a", size = 118078 }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/4e/d7c7c91ff47cd96fe4095dd7231701aec7347426fd66872ff320d6cd1fcc/opentelemetry_semantic_conventions-0.50b0.tar.gz", hash = "sha256:02dc6dbcb62f082de9b877ff19a3f1ffaa3c306300fa53bfac761c4567c83d38", size = 100459 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/fb/dc15fad105450a015e913cfa4f5c27b6a5f1bea8fb649f8cae11e699c8af/opentelemetry_semantic_conventions-0.50b0-py3-none-any.whl", hash = "sha256:e87efba8fdb67fb38113efea6a349531e75ed7ffc01562f65b802fcecb5e115e", size = 166602 }, +] + +[[package]] +name = "opentelemetry-util-http" +version = "0.50b0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/69/10/ce3f0d1157cedbd819194f0b27a6bbb7c19a8bceb3941e4a4775014076cf/opentelemetry_util_http-0.50b0.tar.gz", hash = "sha256:dc4606027e1bc02aabb9533cc330dd43f874fca492e4175c31d7154f341754af", size = 7859 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8a/9e1b54f50d1fddebbeac9a9b0632f8db6ece7add904fb593ee2e268ee4de/opentelemetry_util_http-0.50b0-py3-none-any.whl", hash = "sha256:21f8aedac861ffa3b850f8c0a6c373026189eb8630ac6e14a2bf8c55695cc090", size = 6942 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "pid" +version = "3.0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "psutil", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/45/9e551a0e30d68d18334bc6fd8971b3ab1485423877902eb4f26cc28d7bd5/pid-3.0.4.tar.gz", hash = "sha256:0e33670e83f6a33ebb0822e43a609c3247178d4a375ff50a4689e266d853eb66", size = 16228 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/30/7ebdc66dff1611756d4b38340effdb470aa2693c8789a8fef0bd8dd9332a/pid-3.0.4-py2.py3-none-any.whl", hash = "sha256:af2bf11c5d637bba8a80ce3368279c5eca28f08e201ac828538e1b9ad9e35ef9", size = 11975 }, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "pre-commit" +version = "4.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/c8/e22c292035f1bac8b9f5237a2622305bc0304e776080b246f3df57c4ff9f/pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2", size = 191678 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/8f/496e10d51edd6671ebe0432e33ff800aa86775d2d147ce7d43389324a525/pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878", size = 218713 }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.48" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2d/4f/feb5e137aff82f7c7f3248267b97451da3644f6cdc218edfe549fb354127/prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", size = 424684 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/6a/fd08d94654f7e67c52ca30523a178b3f8ccc4237fce4be90d39c938a831a/prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e", size = 386595 }, +] + +[[package]] +name = "propcache" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/c8/2a13f78d82211490855b2fb303b6721348d0787fdd9a12ac46d99d3acde1/propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64", size = 41735 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/a5/0ea64c9426959ef145a938e38c832fc551843481d356713ececa9a8a64e8/propcache-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6", size = 79296 }, + { url = "https://files.pythonhosted.org/packages/76/5a/916db1aba735f55e5eca4733eea4d1973845cf77dfe67c2381a2ca3ce52d/propcache-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2", size = 45622 }, + { url = "https://files.pythonhosted.org/packages/2d/62/685d3cf268b8401ec12b250b925b21d152b9d193b7bffa5fdc4815c392c2/propcache-0.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6445804cf4ec763dc70de65a3b0d9954e868609e83850a47ca4f0cb64bd79fea", size = 45133 }, + { url = "https://files.pythonhosted.org/packages/4d/3d/31c9c29ee7192defc05aa4d01624fd85a41cf98e5922aaed206017329944/propcache-0.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9479aa06a793c5aeba49ce5c5692ffb51fcd9a7016e017d555d5e2b0045d212", size = 204809 }, + { url = "https://files.pythonhosted.org/packages/10/a1/e4050776f4797fc86140ac9a480d5dc069fbfa9d499fe5c5d2fa1ae71f07/propcache-0.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9631c5e8b5b3a0fda99cb0d29c18133bca1e18aea9effe55adb3da1adef80d3", size = 219109 }, + { url = "https://files.pythonhosted.org/packages/c9/c0/e7ae0df76343d5e107d81e59acc085cea5fd36a48aa53ef09add7503e888/propcache-0.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3156628250f46a0895f1f36e1d4fbe062a1af8718ec3ebeb746f1d23f0c5dc4d", size = 217368 }, + { url = "https://files.pythonhosted.org/packages/fc/e1/e0a2ed6394b5772508868a977d3238f4afb2eebaf9976f0b44a8d347ad63/propcache-0.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6fb63ae352e13748289f04f37868099e69dba4c2b3e271c46061e82c745634", size = 205124 }, + { url = "https://files.pythonhosted.org/packages/50/c1/e388c232d15ca10f233c778bbdc1034ba53ede14c207a72008de45b2db2e/propcache-0.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:887d9b0a65404929641a9fabb6452b07fe4572b269d901d622d8a34a4e9043b2", size = 195463 }, + { url = "https://files.pythonhosted.org/packages/0a/fd/71b349b9def426cc73813dbd0f33e266de77305e337c8c12bfb0a2a82bfb/propcache-0.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a96dc1fa45bd8c407a0af03b2d5218392729e1822b0c32e62c5bf7eeb5fb3958", size = 198358 }, + { url = "https://files.pythonhosted.org/packages/02/f2/d7c497cd148ebfc5b0ae32808e6c1af5922215fe38c7a06e4e722fe937c8/propcache-0.2.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a7e65eb5c003a303b94aa2c3852ef130230ec79e349632d030e9571b87c4698c", size = 195560 }, + { url = "https://files.pythonhosted.org/packages/bb/57/f37041bbe5e0dfed80a3f6be2612a3a75b9cfe2652abf2c99bef3455bbad/propcache-0.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:999779addc413181912e984b942fbcc951be1f5b3663cd80b2687758f434c583", size = 196895 }, + { url = "https://files.pythonhosted.org/packages/83/36/ae3cc3e4f310bff2f064e3d2ed5558935cc7778d6f827dce74dcfa125304/propcache-0.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:19a0f89a7bb9d8048d9c4370c9c543c396e894c76be5525f5e1ad287f1750ddf", size = 207124 }, + { url = "https://files.pythonhosted.org/packages/8c/c4/811b9f311f10ce9d31a32ff14ce58500458443627e4df4ae9c264defba7f/propcache-0.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1ac2f5fe02fa75f56e1ad473f1175e11f475606ec9bd0be2e78e4734ad575034", size = 210442 }, + { url = "https://files.pythonhosted.org/packages/18/dd/a1670d483a61ecac0d7fc4305d91caaac7a8fc1b200ea3965a01cf03bced/propcache-0.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:574faa3b79e8ebac7cb1d7930f51184ba1ccf69adfdec53a12f319a06030a68b", size = 203219 }, + { url = "https://files.pythonhosted.org/packages/f9/2d/30ced5afde41b099b2dc0c6573b66b45d16d73090e85655f1a30c5a24e07/propcache-0.2.1-cp310-cp310-win32.whl", hash = "sha256:03ff9d3f665769b2a85e6157ac8b439644f2d7fd17615a82fa55739bc97863f4", size = 40313 }, + { url = "https://files.pythonhosted.org/packages/23/84/bd9b207ac80da237af77aa6e153b08ffa83264b1c7882495984fcbfcf85c/propcache-0.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:2d3af2e79991102678f53e0dbf4c35de99b6b8b58f29a27ca0325816364caaba", size = 44428 }, + { url = "https://files.pythonhosted.org/packages/bc/0f/2913b6791ebefb2b25b4efd4bb2299c985e09786b9f5b19184a88e5778dd/propcache-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ffc3cca89bb438fb9c95c13fc874012f7b9466b89328c3c8b1aa93cdcfadd16", size = 79297 }, + { url = "https://files.pythonhosted.org/packages/cf/73/af2053aeccd40b05d6e19058419ac77674daecdd32478088b79375b9ab54/propcache-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f174bbd484294ed9fdf09437f889f95807e5f229d5d93588d34e92106fbf6717", size = 45611 }, + { url = "https://files.pythonhosted.org/packages/3c/09/8386115ba7775ea3b9537730e8cf718d83bbf95bffe30757ccf37ec4e5da/propcache-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:70693319e0b8fd35dd863e3e29513875eb15c51945bf32519ef52927ca883bc3", size = 45146 }, + { url = "https://files.pythonhosted.org/packages/03/7a/793aa12f0537b2e520bf09f4c6833706b63170a211ad042ca71cbf79d9cb/propcache-0.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b480c6a4e1138e1aa137c0079b9b6305ec6dcc1098a8ca5196283e8a49df95a9", size = 232136 }, + { url = "https://files.pythonhosted.org/packages/f1/38/b921b3168d72111769f648314100558c2ea1d52eb3d1ba7ea5c4aa6f9848/propcache-0.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d27b84d5880f6d8aa9ae3edb253c59d9f6642ffbb2c889b78b60361eed449787", size = 239706 }, + { url = "https://files.pythonhosted.org/packages/14/29/4636f500c69b5edea7786db3c34eb6166f3384b905665ce312a6e42c720c/propcache-0.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:857112b22acd417c40fa4595db2fe28ab900c8c5fe4670c7989b1c0230955465", size = 238531 }, + { url = "https://files.pythonhosted.org/packages/85/14/01fe53580a8e1734ebb704a3482b7829a0ef4ea68d356141cf0994d9659b/propcache-0.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf6c4150f8c0e32d241436526f3c3f9cbd34429492abddbada2ffcff506c51af", size = 231063 }, + { url = "https://files.pythonhosted.org/packages/33/5c/1d961299f3c3b8438301ccfbff0143b69afcc30c05fa28673cface692305/propcache-0.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d4cfda1d8ed687daa4bc0274fcfd5267873db9a5bc0418c2da19273040eeb7", size = 220134 }, + { url = "https://files.pythonhosted.org/packages/00/d0/ed735e76db279ba67a7d3b45ba4c654e7b02bc2f8050671ec365d8665e21/propcache-0.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c2f992c07c0fca81655066705beae35fc95a2fa7366467366db627d9f2ee097f", size = 220009 }, + { url = "https://files.pythonhosted.org/packages/75/90/ee8fab7304ad6533872fee982cfff5a53b63d095d78140827d93de22e2d4/propcache-0.2.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4a571d97dbe66ef38e472703067021b1467025ec85707d57e78711c085984e54", size = 212199 }, + { url = "https://files.pythonhosted.org/packages/eb/ec/977ffaf1664f82e90737275873461695d4c9407d52abc2f3c3e24716da13/propcache-0.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb6178c241278d5fe853b3de743087be7f5f4c6f7d6d22a3b524d323eecec505", size = 214827 }, + { url = "https://files.pythonhosted.org/packages/57/48/031fb87ab6081764054821a71b71942161619549396224cbb242922525e8/propcache-0.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ad1af54a62ffe39cf34db1aa6ed1a1873bd548f6401db39d8e7cd060b9211f82", size = 228009 }, + { url = "https://files.pythonhosted.org/packages/1a/06/ef1390f2524850838f2390421b23a8b298f6ce3396a7cc6d39dedd4047b0/propcache-0.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e7048abd75fe40712005bcfc06bb44b9dfcd8e101dda2ecf2f5aa46115ad07ca", size = 231638 }, + { url = "https://files.pythonhosted.org/packages/38/2a/101e6386d5a93358395da1d41642b79c1ee0f3b12e31727932b069282b1d/propcache-0.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:160291c60081f23ee43d44b08a7e5fb76681221a8e10b3139618c5a9a291b84e", size = 222788 }, + { url = "https://files.pythonhosted.org/packages/db/81/786f687951d0979007e05ad9346cd357e50e3d0b0f1a1d6074df334b1bbb/propcache-0.2.1-cp311-cp311-win32.whl", hash = "sha256:819ce3b883b7576ca28da3861c7e1a88afd08cc8c96908e08a3f4dd64a228034", size = 40170 }, + { url = "https://files.pythonhosted.org/packages/cf/59/7cc7037b295d5772eceb426358bb1b86e6cab4616d971bd74275395d100d/propcache-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:edc9fc7051e3350643ad929df55c451899bb9ae6d24998a949d2e4c87fb596d3", size = 44404 }, + { url = "https://files.pythonhosted.org/packages/4c/28/1d205fe49be8b1b4df4c50024e62480a442b1a7b818e734308bb0d17e7fb/propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a", size = 79588 }, + { url = "https://files.pythonhosted.org/packages/21/ee/fc4d893f8d81cd4971affef2a6cb542b36617cd1d8ce56b406112cb80bf7/propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0", size = 45825 }, + { url = "https://files.pythonhosted.org/packages/4a/de/bbe712f94d088da1d237c35d735f675e494a816fd6f54e9db2f61ef4d03f/propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d", size = 45357 }, + { url = "https://files.pythonhosted.org/packages/7f/14/7ae06a6cf2a2f1cb382586d5a99efe66b0b3d0c6f9ac2f759e6f7af9d7cf/propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4", size = 241869 }, + { url = "https://files.pythonhosted.org/packages/cc/59/227a78be960b54a41124e639e2c39e8807ac0c751c735a900e21315f8c2b/propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d", size = 247884 }, + { url = "https://files.pythonhosted.org/packages/84/58/f62b4ffaedf88dc1b17f04d57d8536601e4e030feb26617228ef930c3279/propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5", size = 248486 }, + { url = "https://files.pythonhosted.org/packages/1c/07/ebe102777a830bca91bbb93e3479cd34c2ca5d0361b83be9dbd93104865e/propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24", size = 243649 }, + { url = "https://files.pythonhosted.org/packages/ed/bc/4f7aba7f08f520376c4bb6a20b9a981a581b7f2e385fa0ec9f789bb2d362/propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff", size = 229103 }, + { url = "https://files.pythonhosted.org/packages/fe/d5/04ac9cd4e51a57a96f78795e03c5a0ddb8f23ec098b86f92de028d7f2a6b/propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f", size = 226607 }, + { url = "https://files.pythonhosted.org/packages/e3/f0/24060d959ea41d7a7cc7fdbf68b31852331aabda914a0c63bdb0e22e96d6/propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec", size = 221153 }, + { url = "https://files.pythonhosted.org/packages/77/a7/3ac76045a077b3e4de4859a0753010765e45749bdf53bd02bc4d372da1a0/propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348", size = 222151 }, + { url = "https://files.pythonhosted.org/packages/e7/af/5e29da6f80cebab3f5a4dcd2a3240e7f56f2c4abf51cbfcc99be34e17f0b/propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6", size = 233812 }, + { url = "https://files.pythonhosted.org/packages/8c/89/ebe3ad52642cc5509eaa453e9f4b94b374d81bae3265c59d5c2d98efa1b4/propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6", size = 238829 }, + { url = "https://files.pythonhosted.org/packages/e9/2f/6b32f273fa02e978b7577159eae7471b3cfb88b48563b1c2578b2d7ca0bb/propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518", size = 230704 }, + { url = "https://files.pythonhosted.org/packages/5c/2e/f40ae6ff5624a5f77edd7b8359b208b5455ea113f68309e2b00a2e1426b6/propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246", size = 40050 }, + { url = "https://files.pythonhosted.org/packages/3b/77/a92c3ef994e47180862b9d7d11e37624fb1c00a16d61faf55115d970628b/propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1", size = 44117 }, + { url = "https://files.pythonhosted.org/packages/0f/2a/329e0547cf2def8857157f9477669043e75524cc3e6251cef332b3ff256f/propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc", size = 77002 }, + { url = "https://files.pythonhosted.org/packages/12/2d/c4df5415e2382f840dc2ecbca0eeb2293024bc28e57a80392f2012b4708c/propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9", size = 44639 }, + { url = "https://files.pythonhosted.org/packages/d0/5a/21aaa4ea2f326edaa4e240959ac8b8386ea31dedfdaa636a3544d9e7a408/propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439", size = 44049 }, + { url = "https://files.pythonhosted.org/packages/4e/3e/021b6cd86c0acc90d74784ccbb66808b0bd36067a1bf3e2deb0f3845f618/propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536", size = 224819 }, + { url = "https://files.pythonhosted.org/packages/3c/57/c2fdeed1b3b8918b1770a133ba5c43ad3d78e18285b0c06364861ef5cc38/propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629", size = 229625 }, + { url = "https://files.pythonhosted.org/packages/9d/81/70d4ff57bf2877b5780b466471bebf5892f851a7e2ca0ae7ffd728220281/propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b", size = 232934 }, + { url = "https://files.pythonhosted.org/packages/3c/b9/bb51ea95d73b3fb4100cb95adbd4e1acaf2cbb1fd1083f5468eeb4a099a8/propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052", size = 227361 }, + { url = "https://files.pythonhosted.org/packages/f1/20/3c6d696cd6fd70b29445960cc803b1851a1131e7a2e4ee261ee48e002bcd/propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce", size = 213904 }, + { url = "https://files.pythonhosted.org/packages/a1/cb/1593bfc5ac6d40c010fa823f128056d6bc25b667f5393781e37d62f12005/propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d", size = 212632 }, + { url = "https://files.pythonhosted.org/packages/6d/5c/e95617e222be14a34c709442a0ec179f3207f8a2b900273720501a70ec5e/propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce", size = 207897 }, + { url = "https://files.pythonhosted.org/packages/8e/3b/56c5ab3dc00f6375fbcdeefdede5adf9bee94f1fab04adc8db118f0f9e25/propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95", size = 208118 }, + { url = "https://files.pythonhosted.org/packages/86/25/d7ef738323fbc6ebcbce33eb2a19c5e07a89a3df2fded206065bd5e868a9/propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf", size = 217851 }, + { url = "https://files.pythonhosted.org/packages/b3/77/763e6cef1852cf1ba740590364ec50309b89d1c818e3256d3929eb92fabf/propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f", size = 222630 }, + { url = "https://files.pythonhosted.org/packages/4f/e9/0f86be33602089c701696fbed8d8c4c07b6ee9605c5b7536fd27ed540c5b/propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30", size = 216269 }, + { url = "https://files.pythonhosted.org/packages/cc/02/5ac83217d522394b6a2e81a2e888167e7ca629ef6569a3f09852d6dcb01a/propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6", size = 39472 }, + { url = "https://files.pythonhosted.org/packages/f4/33/d6f5420252a36034bc8a3a01171bc55b4bff5df50d1c63d9caa50693662f/propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1", size = 43363 }, + { url = "https://files.pythonhosted.org/packages/0a/08/6ab7f65240a16fa01023125e65258acf7e4884f483f267cdd6fcc48f37db/propcache-0.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6a9a8c34fb7bb609419a211e59da8887eeca40d300b5ea8e56af98f6fbbb1541", size = 80403 }, + { url = "https://files.pythonhosted.org/packages/34/fe/e7180285e21b4e6dff7d311fdf22490c9146a09a02834b5232d6248c6004/propcache-0.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae1aa1cd222c6d205853b3013c69cd04515f9d6ab6de4b0603e2e1c33221303e", size = 46152 }, + { url = "https://files.pythonhosted.org/packages/9c/36/aa74d884af826030ba9cee2ac109b0664beb7e9449c315c9c44db99efbb3/propcache-0.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:accb6150ce61c9c4b7738d45550806aa2b71c7668c6942f17b0ac182b6142fd4", size = 45674 }, + { url = "https://files.pythonhosted.org/packages/22/59/6fe80a3fe7720f715f2c0f6df250dacbd7cad42832410dbd84c719c52f78/propcache-0.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eee736daafa7af6d0a2dc15cc75e05c64f37fc37bafef2e00d77c14171c2097", size = 207792 }, + { url = "https://files.pythonhosted.org/packages/4a/68/584cd51dd8f4d0f5fff5b128ce0cdb257cde903898eecfb92156bbc2c780/propcache-0.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7a31fc1e1bd362874863fdeed71aed92d348f5336fd84f2197ba40c59f061bd", size = 223280 }, + { url = "https://files.pythonhosted.org/packages/85/cb/4c3528460c41e61b06ec3f970c0f89f87fa21f63acac8642ed81a886c164/propcache-0.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba4cfa1052819d16699e1d55d18c92b6e094d4517c41dd231a8b9f87b6fa681", size = 221293 }, + { url = "https://files.pythonhosted.org/packages/69/c0/560e050aa6d31eeece3490d1174da508f05ab27536dfc8474af88b97160a/propcache-0.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f089118d584e859c62b3da0892b88a83d611c2033ac410e929cb6754eec0ed16", size = 208259 }, + { url = "https://files.pythonhosted.org/packages/0c/87/d6c86a77632eb1ba86a328e3313159f246e7564cb5951e05ed77555826a0/propcache-0.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:781e65134efaf88feb447e8c97a51772aa75e48b794352f94cb7ea717dedda0d", size = 198632 }, + { url = "https://files.pythonhosted.org/packages/3a/2b/3690ea7b662dc762ab7af5f3ef0e2d7513c823d193d7b2a1b4cda472c2be/propcache-0.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31f5af773530fd3c658b32b6bdc2d0838543de70eb9a2156c03e410f7b0d3aae", size = 203516 }, + { url = "https://files.pythonhosted.org/packages/4d/b5/afe716c16c23c77657185c257a41918b83e03993b6ccdfa748e5e7d328e9/propcache-0.2.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a7a078f5d37bee6690959c813977da5291b24286e7b962e62a94cec31aa5188b", size = 199402 }, + { url = "https://files.pythonhosted.org/packages/a4/c0/2d2df3aa7f8660d0d4cc4f1e00490c48d5958da57082e70dea7af366f876/propcache-0.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cea7daf9fc7ae6687cf1e2c049752f19f146fdc37c2cc376e7d0032cf4f25347", size = 200528 }, + { url = "https://files.pythonhosted.org/packages/21/c8/65ac9142f5e40c8497f7176e71d18826b09e06dd4eb401c9a4ee41aa9c74/propcache-0.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b3489ff1ed1e8315674d0775dc7d2195fb13ca17b3808721b54dbe9fd020faf", size = 211254 }, + { url = "https://files.pythonhosted.org/packages/09/e4/edb70b447a1d8142df51ec7511e84aa64d7f6ce0a0fdf5eb55363cdd0935/propcache-0.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9403db39be1393618dd80c746cb22ccda168efce239c73af13c3763ef56ffc04", size = 214589 }, + { url = "https://files.pythonhosted.org/packages/cb/02/817f309ec8d8883287781d6d9390f80b14db6e6de08bc659dfe798a825c2/propcache-0.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5d97151bc92d2b2578ff7ce779cdb9174337390a535953cbb9452fb65164c587", size = 207283 }, + { url = "https://files.pythonhosted.org/packages/d7/fe/2d18612096ed2212cfef821b6fccdba5d52efc1d64511c206c5c16be28fd/propcache-0.2.1-cp39-cp39-win32.whl", hash = "sha256:9caac6b54914bdf41bcc91e7eb9147d331d29235a7c967c150ef5df6464fd1bb", size = 40866 }, + { url = "https://files.pythonhosted.org/packages/24/2e/b5134802e7b57c403c7b73c7a39374e7a6b7f128d1968b4a4b4c0b700250/propcache-0.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:92fc4500fcb33899b05ba73276dfb684a20d31caa567b7cb5252d48f896a91b1", size = 44975 }, + { url = "https://files.pythonhosted.org/packages/41/b6/c5319caea262f4821995dca2107483b94a3345d4607ad797c76cb9c36bcc/propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54", size = 11818 }, +] + +[[package]] +name = "protobuf" +version = "5.29.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d2/4f/1639b7b1633d8fd55f216ba01e21bf2c43384ab25ef3ddb35d85a52033e8/protobuf-5.29.1.tar.gz", hash = "sha256:683be02ca21a6ffe80db6dd02c0b5b2892322c59ca57fd6c872d652cb80549cb", size = 424965 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/c7/28669b04691a376cf7d0617d612f126aa0fff763d57df0142f9bf474c5b8/protobuf-5.29.1-cp310-abi3-win32.whl", hash = "sha256:22c1f539024241ee545cbcb00ee160ad1877975690b16656ff87dde107b5f110", size = 422706 }, + { url = "https://files.pythonhosted.org/packages/e3/33/dc7a7712f457456b7e0b16420ab8ba1cc8686751d3f28392eb43d0029ab9/protobuf-5.29.1-cp310-abi3-win_amd64.whl", hash = "sha256:1fc55267f086dd4050d18ef839d7bd69300d0d08c2a53ca7df3920cc271a3c34", size = 434505 }, + { url = "https://files.pythonhosted.org/packages/e5/39/44239fb1c6ec557e1731d996a5de89a9eb1ada7a92491fcf9c5d714052ed/protobuf-5.29.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:d473655e29c0c4bbf8b69e9a8fb54645bc289dead6d753b952e7aa660254ae18", size = 417822 }, + { url = "https://files.pythonhosted.org/packages/fb/4a/ec56f101d38d4bef2959a9750209809242d86cf8b897db00f2f98bfa360e/protobuf-5.29.1-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5ba1d0e4c8a40ae0496d0e2ecfdbb82e1776928a205106d14ad6985a09ec155", size = 319572 }, + { url = "https://files.pythonhosted.org/packages/04/52/c97c58a33b3d6c89a8138788576d372a90a6556f354799971c6b4d16d871/protobuf-5.29.1-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:8ee1461b3af56145aca2800e6a3e2f928108c749ba8feccc6f5dd0062c410c0d", size = 319671 }, + { url = "https://files.pythonhosted.org/packages/99/19/5a3957e08de18578131810563ccfeebc7d2aad31ee52e367a61f56cc3cab/protobuf-5.29.1-cp39-cp39-win32.whl", hash = "sha256:5a41deccfa5e745cef5c65a560c76ec0ed8e70908a67cc8f4da5fce588b50d57", size = 422671 }, + { url = "https://files.pythonhosted.org/packages/24/67/8bc07bb755c8badf08db4a8bc2eb542a4e733135a6d584d1922b701d7751/protobuf-5.29.1-cp39-cp39-win_amd64.whl", hash = "sha256:012ce28d862ff417fd629285aca5d9772807f15ceb1a0dbd15b88f58c776c98c", size = 434591 }, + { url = "https://files.pythonhosted.org/packages/3b/24/c8c49df8f6587719e1d400109b16c10c6902d0c9adddc8fff82840146f99/protobuf-5.29.1-py3-none-any.whl", hash = "sha256:32600ddb9c2a53dedc25b8581ea0f1fd8ea04956373c0c07577ce58d312522e0", size = 172547 }, +] + +[[package]] +name = "psutil" +version = "6.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, + { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, + { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, + { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, + { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, + { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, + { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "py-fast-rsync" +version = "0.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/e3/47f09ab98b428a16f362ce9730e17107ce34ce9b2bebbdec2ef438981a8d/py_fast_rsync-0.1.0.tar.gz", hash = "sha256:0237d1e935adad73697cae3f15ffc79ba639a931089ce2e543a6abb128e60eae", size = 6204 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/75/49752fca6d8b31017b1c03a673a7aa9837d04427d231a5534b4219ebfd1c/py_fast_rsync-0.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d64ec65075c04311cc9711affa40857fbeea80f10b007d3970ef87c04b6cab1b", size = 212179 }, + { url = "https://files.pythonhosted.org/packages/43/97/bc167a5452c5fa96e14cfeca5282ba3e4a4d4891952f1d146ff124c3e1fe/py_fast_rsync-0.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd412dacf2939af3ea67a39d81593918cd76553d504273d15e31f36c9ed6bb16", size = 246699 }, + { url = "https://files.pythonhosted.org/packages/11/8a/938f5e5321383fc108b9c6bb42afc43b674849247074ae3dabbbb6eb1f5e/py_fast_rsync-0.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4ad370842307b10a1aa933b17ffa396521e53c4a6b8afc5bc743d9d6d4eb50a", size = 252294 }, + { url = "https://files.pythonhosted.org/packages/14/18/ad9b6d8ce9c0ce2d01371284e182c9755ef3f343413e7dfb1d9395896e1b/py_fast_rsync-0.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139168e8aa395d64ace28fa57d357e8cc7a8a46ef0b7c4e6aa011c16bf095efc", size = 282031 }, + { url = "https://files.pythonhosted.org/packages/ee/b3/4334dee7e31da412276f6d3740168b9f6485c331b42f6b1b5cce3016cecc/py_fast_rsync-0.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3cd96a3383657fffe0e19d8b1fdf74a59d8db4f208a264c91f268f8bf7dacab", size = 281886 }, + { url = "https://files.pythonhosted.org/packages/d7/f2/6fb0fc36ec8a8bc29e0a70b2bd493460dc430e79615db34e8f17bf0f92f9/py_fast_rsync-0.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84d82a84c3c5d5967be3e8c3be7ca16ac48c3ed298029ba18b69730880e2234a", size = 252222 }, + { url = "https://files.pythonhosted.org/packages/b9/40/a40881ee5d6234d58b58d3fd3085d8d0e82dd634ea02943e85840232ab10/py_fast_rsync-0.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:95b5937fd5b7cfaa42edb5ee87c5fc134f023f880ee80b9d722d1c6fd0d72774", size = 268212 }, + { url = "https://files.pythonhosted.org/packages/77/99/358a02106c9a9aff231cf378d30cb400ffb9080c96be4f36cf8ada79d299/py_fast_rsync-0.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:385e927ca8e1b4d50c7c48949278ec53c0cd9f1b6e2aba3378c5a18f6cd3a932", size = 428941 }, + { url = "https://files.pythonhosted.org/packages/f6/fb/cb9810c049908ba54fbd9be7145915a32f9d827379b5332a6ae2c11e02c6/py_fast_rsync-0.1.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:f23cf5c39382dea61d19e0f0a620b68605fc21ebbebd564a094e0cb23c7dde7d", size = 511128 }, + { url = "https://files.pythonhosted.org/packages/e7/4a/4621798c0895e03c14abb726946a5a48d881533ca1320e3a7d723338d012/py_fast_rsync-0.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c4ed4b6f6cbcd89b4eb9c3abe7cd22f95f5908e740f006473067f4fca830c1be", size = 439976 }, + { url = "https://files.pythonhosted.org/packages/8d/94/b09952b0ab16a9e7df868612eec861461f53fa46c5a408df229be72a7b35/py_fast_rsync-0.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e9908bca041b5f6e8359ba7f07fc56afb06e677971d8dd2d3470d7170fc3ea15", size = 416995 }, + { url = "https://files.pythonhosted.org/packages/e6/44/858c62de51827c7030125ee055f485aabe95a0a82de302d4e71b7b979b28/py_fast_rsync-0.1.0-cp310-none-win32.whl", hash = "sha256:584141a6086a4c11700be5daf28bc63585f63900d6bcdc36dd3cfe8c46585799", size = 117052 }, + { url = "https://files.pythonhosted.org/packages/0c/4e/e31b20e49abc8820e750894fa24133a728fb309a233ffb256bc621cd0da1/py_fast_rsync-0.1.0-cp310-none-win_amd64.whl", hash = "sha256:e51ae07b0a2683f19e2054d685d738cbb18fc0d8863320c1c5b0b917e64aba47", size = 122500 }, + { url = "https://files.pythonhosted.org/packages/7e/a5/5ea9815fb8d7117b5ca8bcc6fb96cdd291bde5bbfeefa485c0d327274f2c/py_fast_rsync-0.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:99b7e1bdebc9551723dead612feb0b31cf17f28b1977cc8bb904831808a0dd8e", size = 220976 }, + { url = "https://files.pythonhosted.org/packages/71/02/ffe3f9d27521a0dab39a306942b3e0dd576e810827161369a9c3a609600f/py_fast_rsync-0.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a92a677b1f833d47376b0d32374702c09e98b764602af3ff6004975805ddd4b2", size = 211917 }, + { url = "https://files.pythonhosted.org/packages/88/7b/0deabe1ddf50a8cc1274b953dbdf1f4680f60dfd3883308d16f59406285c/py_fast_rsync-0.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6ec30c95c023881457b2eb93234d18fbd86c4fb9b754e2b8bb489d098d9a395", size = 246441 }, + { url = "https://files.pythonhosted.org/packages/6f/05/c0f560f08a73866723e26cea2bd1d9df3b48c7682d80f0cc6666d30ee367/py_fast_rsync-0.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb47c0da579ee7ee756167a3decf96c2bc0049f00f66cab90d6e754b0b25a320", size = 251991 }, + { url = "https://files.pythonhosted.org/packages/cd/64/9990193ae95f96e3371ed3fe4c57f26e3dae9de8780e2597b889574e8eae/py_fast_rsync-0.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2bfa14befafe2ed5da206c5b0bd7eaa06f26b23336d82f5be92ba5c6e4dc9c2", size = 281890 }, + { url = "https://files.pythonhosted.org/packages/e4/ee/06fc6d304eff585c9c3082229ed88017e9d76c039a03bc5e8b275ae91e64/py_fast_rsync-0.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb5e564474ee20e12cf32c8d7a6e3b623c4c711c418649675aa344c2834e13f1", size = 281176 }, + { url = "https://files.pythonhosted.org/packages/04/36/c3948003b6cb97c6732e184765dbf5e48d8a816cd0527953025b5ac346d8/py_fast_rsync-0.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22407ee5c75e534d41d76c035e79a295d555d44da9981fa6bc3af545f5e9dfa3", size = 252027 }, + { url = "https://files.pythonhosted.org/packages/dc/7d/e548c82c55ef5e0575205e01875629c8204634c0fa41d7b7b8c91fcae807/py_fast_rsync-0.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:135e5ef01cdda7c8946012f13a030581287383955d521c1af7cc3a3e28d1de3f", size = 268052 }, + { url = "https://files.pythonhosted.org/packages/1e/33/ce5cca03ca8faeb66d7e696a67e551878d455d64c64cc616fa718c1128a1/py_fast_rsync-0.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90a1fb9677aeb8c93a6f1244cf09bbcef37a2c25cd84b921e3b899c9cd3d453e", size = 428703 }, + { url = "https://files.pythonhosted.org/packages/65/b1/484d8ba7f78bcf9bae829afd3137c796862813a77cf7c094f9be56dcf5c0/py_fast_rsync-0.1.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:c9d7519e4183975060bb935d14ec4c55753b48edf2cfe77fbe3dff2fbacdf2f6", size = 510958 }, + { url = "https://files.pythonhosted.org/packages/98/d1/4c334d0118bc2780a63a740ed59562184fa6741025def8d6519ffb1c6558/py_fast_rsync-0.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:68a7bec93f75da5e7d01d5309aaa59a0f6be9aae3011902c79b7e3d8a2f48528", size = 439822 }, + { url = "https://files.pythonhosted.org/packages/c2/c6/4f723af8595a9ca3f9b04238bf37ed3de8150bfc203bfbd595fc4ca651f1/py_fast_rsync-0.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:500bb5659b5fdfe0743a4f22bc8d798ef1c673f5dbee3f0fec02bac025a37d01", size = 416793 }, + { url = "https://files.pythonhosted.org/packages/5a/56/387033df81babb9eba71be797efcaecc468fed16b4ec34953b6a2e6db17d/py_fast_rsync-0.1.0-cp311-none-win32.whl", hash = "sha256:df91c34428dc5d2033f18ee5332966ccb2215d3a4e7d00e6af89c04f5d6cf18c", size = 117033 }, + { url = "https://files.pythonhosted.org/packages/72/d6/b835f74aa6ab9b9f5c1bbee15d8d2f2831fbc1b7639b769506497793b651/py_fast_rsync-0.1.0-cp311-none-win_amd64.whl", hash = "sha256:9d09f0869c9074acc85f2d9fcc604f594dcb2e410fdc57df09f5c5f07c916add", size = 122462 }, + { url = "https://files.pythonhosted.org/packages/b1/f5/2ad1000d7e467e54dd307f7f3486db66464f922e91a1c0ece6309394eeb6/py_fast_rsync-0.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a37f699c7d27774946b7262d0f851ee8ef0c09a761e957c0134306ceba45f0d", size = 220743 }, + { url = "https://files.pythonhosted.org/packages/77/3b/14cf098750fd1d7c0a87b33588febb45d9641d553bebc57b32e829dda5cd/py_fast_rsync-0.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f2f4695bb0f4e92916b3ec4bd969f55454eddfe12985a0a3a07be1eb817b4875", size = 211535 }, + { url = "https://files.pythonhosted.org/packages/e2/e0/540b2c7976b9d96322359e8cc8bff0a82a5e956deac8fd248a424a502021/py_fast_rsync-0.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d978c046b8a860c4f70d5a1f6b20f9477f963002ceee3e121823f409134b909d", size = 246068 }, + { url = "https://files.pythonhosted.org/packages/c6/9b/e3d533ed6c5c24f918d312ca70a04a917fcbe9866dad0bf86f791f8da85b/py_fast_rsync-0.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1a01022c23612b8939c06ef189661e0593995f991cd20418ab14bcc111c628a", size = 251771 }, + { url = "https://files.pythonhosted.org/packages/a0/b7/13c6c78c816ba3b9ba4331c5bf66229811aa37b6dddbe306e240c74909b8/py_fast_rsync-0.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:446508780a380a7365e09f5afb32da93530d1575f3141f049f557229384fe770", size = 282091 }, + { url = "https://files.pythonhosted.org/packages/43/8d/4ddad5ed9fe2f8e7c21ea1c80edabec6fc20ac57a55a09133c638bc1cfd3/py_fast_rsync-0.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c539d071de0794f6477116765695ad092b7cc87b3f9fc0f8d18ca5a0b7ef89a5", size = 280000 }, + { url = "https://files.pythonhosted.org/packages/e0/4a/cd1c1f52992a6fae16c4561ed484abbdd3c3d58394793cb2c5ee1e64d9c9/py_fast_rsync-0.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3831c4990e92301aa2c5f717fafb4df20134e2fd2efa880bb8d94a6ec7aa9d76", size = 251696 }, + { url = "https://files.pythonhosted.org/packages/e7/a3/22a8074c481c08b7c9ce19e77a51271ed3e8becb537baed081c00a2a91b7/py_fast_rsync-0.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17b5ad409c61db2102b02ee29e830019463e97072e0e06f9c9f53c25194b33da", size = 267421 }, + { url = "https://files.pythonhosted.org/packages/2b/76/05fc09c1403185fd0c507287f7757caefd62e3c08febf316383a641e3acd/py_fast_rsync-0.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fe7465aafee86fc78ba33abbf4143eb7c260d2db6ab933e35c8022a2b17fa6bf", size = 429138 }, + { url = "https://files.pythonhosted.org/packages/56/99/a6eb7c47cad7b59adc58c9601f1a63b5a8cf683b030b10a65c359a15ff53/py_fast_rsync-0.1.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fb8eae2135c7b4aa691c0678adf5e42d99950713153208de8b51196bf7635f0a", size = 511000 }, + { url = "https://files.pythonhosted.org/packages/56/fd/e0ba8895e7daac51e13156e82da554784305b12ee179a045681b76a2b07d/py_fast_rsync-0.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2ff52b375a151f7235462140eaa719e84ce9f5a39bf6f5df6465b7084e87b0a2", size = 439970 }, + { url = "https://files.pythonhosted.org/packages/a5/ed/52979209f2637b3c0b43ed5ff18fd30e56f9ae7c6a2f5d995a9aed22ac07/py_fast_rsync-0.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d89a79394f7ab6c0f9d80b97f8ce58ab5136bfcc0eeeb2f8357629a3ec69c613", size = 416704 }, + { url = "https://files.pythonhosted.org/packages/df/29/79806bde74f5f68a877deaa29686a2c156ff8b333ebcb7281d3fb75633b2/py_fast_rsync-0.1.0-cp312-none-win32.whl", hash = "sha256:01c33e8774fcf1828d93e2d0aaabf32b01ca7312a944a90c6406e2a62759ae44", size = 117044 }, + { url = "https://files.pythonhosted.org/packages/38/a1/4ecbbb15fae383343a8f0b3124c16237e0782972661380c3a7baa5edf60a/py_fast_rsync-0.1.0-cp312-none-win_amd64.whl", hash = "sha256:b83f786c8f1e8d74ff82fc1b995836894eb1294234707db0b638796c45223b0e", size = 122376 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/82b2aa430ddde0dcb3c41abe32a75d6c4660a662eba2d269170efb900ef0/py_fast_rsync-0.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e44e29f3f9488f1d2c1e09a3556077d09c340c6f11b82568cc72a7ff09d8160", size = 213156 }, + { url = "https://files.pythonhosted.org/packages/2a/a3/ec3c3e759f71e88843baf9b9acb2aa048255abb8783d14ae1d7c58487812/py_fast_rsync-0.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cbaf02888f6e3eabc2d4b11ba03e0ae5b864854259a9a44ca8e13eff287746e", size = 246506 }, + { url = "https://files.pythonhosted.org/packages/d5/c6/8faa6e8369969843d26e15478b7970b2df2f7782ba27db86f336f41b8724/py_fast_rsync-0.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99feadc7104c58e865ee21ccf1f119cfcc70e273d6a5493b851ea5948d4b40b5", size = 252461 }, + { url = "https://files.pythonhosted.org/packages/6b/0e/0d986f7d607a047de6e0b468b49ff6c8de90ce28d5b20071983903afe171/py_fast_rsync-0.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ef3a1aea1ab3ec37a4cbddc7f3fb9d4551f939e5d9c167f85bf2fe76088f0b5b", size = 281693 }, + { url = "https://files.pythonhosted.org/packages/2a/ce/c3f8e0634b32d5bc285a6606d7b2a6e070bd31a2460656b61420b3bd4719/py_fast_rsync-0.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49026321576a63cfd225930679a61bdca603a3edba2d0a38b6a8e16c7cbb295d", size = 282265 }, + { url = "https://files.pythonhosted.org/packages/34/29/fdc6098b613ed045a7bc3c3fddec8592030ca448814aecda6cfd0ed2e529/py_fast_rsync-0.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9d53609a2a538c675b0108dec4ecd7838399a610e86b0f7de5a3f218dee4089", size = 252255 }, + { url = "https://files.pythonhosted.org/packages/e5/f8/5059dab4385bf94518e3f172081f8abaa37a2f8ed0813f52b5a77c6e4fc8/py_fast_rsync-0.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:69014ec6641aa18f40374c1b6603a79e92e648cc742e1b009dab7eac11ceec66", size = 268744 }, + { url = "https://files.pythonhosted.org/packages/35/7b/4d7932cbd198080e0712dd336777e5affd9e30d8d96e05742d75237266e3/py_fast_rsync-0.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8b85ebb8372d1d1feebf5900621c904e9eba7003c721edc72cb23233aa6659a1", size = 429720 }, + { url = "https://files.pythonhosted.org/packages/50/e8/e113637289be6468fa5bae5cebac945784cd754883b67d7baa58a2029f12/py_fast_rsync-0.1.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:e30068af9fc7f9d1f14ccedd1a8809d2bb43a00b88ffecc797371456aa1abe08", size = 511548 }, + { url = "https://files.pythonhosted.org/packages/66/37/eaf816402f626f5defe6906589ddc8b5e5bc4a1acf1500c3f5a861e4c108/py_fast_rsync-0.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec41d9f5938c670428ca8ff5f825f533086151b872c365f234a340f6689b9083", size = 440266 }, + { url = "https://files.pythonhosted.org/packages/d9/a6/b5bfde7492d3da031fe11db434011ec458aef5b4851a9bc6d9287162c0d8/py_fast_rsync-0.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:49e478c18d77ce02c345e099968b56218ca3fa7344a43144f06e58dfd90cc2d8", size = 417500 }, + { url = "https://files.pythonhosted.org/packages/f1/15/a1fce4238e7ba4d5902dcff907e9fad442f0973b8079510648c5e02fe138/py_fast_rsync-0.1.0-cp39-none-win32.whl", hash = "sha256:a01cc4a3d8990ad91925f90c73e3ee83f4d5286716b8598f761e061d6030cdc7", size = 117346 }, + { url = "https://files.pythonhosted.org/packages/d1/12/70f2fd694c9b648ecc6653896e1577827125ce7538c9ea765170950efdc2/py_fast_rsync-0.1.0-cp39-none-win_amd64.whl", hash = "sha256:9d3e2ac95223b9c89e95b2079384fbdbb23e8464faea69b39eee5e40877f50ac", size = 122646 }, + { url = "https://files.pythonhosted.org/packages/b8/5e/30ac03bbc3870be944ebea29d486de724092ed7be00b963b618340f5e8df/py_fast_rsync-0.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f47e304158f7ef0907ac89fb3dbbda1f61811a2c1812c5a5a12656d6d6510cc", size = 247631 }, + { url = "https://files.pythonhosted.org/packages/e2/cb/608259249557d1d3202b6630dd8a02f8b0c1957aeec8bf945afc870e998d/py_fast_rsync-0.1.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:29fd0e9a68742a7dcb9c4b178d3d4e79eab46134abf9afb140e9c70e08a6bac9", size = 253109 }, + { url = "https://files.pythonhosted.org/packages/e7/fd/81467ca170175f1254b9009f19a151cfb09d83bf9324a0e7a94b39dc9105/py_fast_rsync-0.1.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c8c34b0c31a6a36ad61f605780b3935be6699019b7e6bc16ea78b628df05120b", size = 282803 }, + { url = "https://files.pythonhosted.org/packages/aa/d4/0f16a065c72e933d7d32386d58e36f3d2d2b927b6957eb19159d07c74c0a/py_fast_rsync-0.1.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7398bfa56e09cd6abf05be32ceea2a4c746264af70ec81f710081709ae8d9c4b", size = 283045 }, + { url = "https://files.pythonhosted.org/packages/a0/5a/fc102b17cf5f4d5811d1bb17b332bfbcf48d59ff57569a26495e0e17ca9c/py_fast_rsync-0.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7eac59240b2a1768b49865032f6432794ebba7a86f20978bedf443724f4472e", size = 253145 }, + { url = "https://files.pythonhosted.org/packages/f2/c8/4e7daa08ba33f70be9e99e490dfd61ef8abfe75db98d837f67aff5eb3e88/py_fast_rsync-0.1.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87faf6da6ae0035658bb9b2057021d6febd0758adbc275ce3223e64a932d6371", size = 269642 }, + { url = "https://files.pythonhosted.org/packages/27/90/383ec7abccdb274c35746ef3b7a7dcc93dff82046438d66eafd712d9a5a3/py_fast_rsync-0.1.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:60a3c9ca090c09ed9c0b5bbbaca9ea18525b4b80a31a92524fcadbf02548fb6a", size = 430664 }, + { url = "https://files.pythonhosted.org/packages/82/20/cd05a409156b7d41daf2d94b00c851a4f125c69469a8063727cf9382efcd/py_fast_rsync-0.1.0-pp310-pypy310_pp73-musllinux_1_2_armv7l.whl", hash = "sha256:f3dc72edd835b460ff851a241851995bf641b0e9954e45b86d192614efd3a070", size = 512529 }, + { url = "https://files.pythonhosted.org/packages/94/8e/1163651c6cc8b371bc6b687345703d6eb983301a9044d8878ef90b418370/py_fast_rsync-0.1.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:f9fe9f8f0023ed97b440825f28c268f32471b6f883c319ff734fddc1ee3c5332", size = 441134 }, + { url = "https://files.pythonhosted.org/packages/1a/87/2c5ec6bffc2860507fd5bc018c6d6f6691d6d6f56e85d1184099c341ad64/py_fast_rsync-0.1.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:edf0d02bfd0e4a61b5ec0f4558c7a723300578dc114911add7b81fc767bdf1a4", size = 418421 }, + { url = "https://files.pythonhosted.org/packages/4b/39/6e8abffa594eb9c92cf85b8727236561c4e85c08e0a8fe394e72a87fb805/py_fast_rsync-0.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72849d1a6cc1611239eac76ea8f3fd2099e7e376cc29cef8b28e46f50e4fa7fc", size = 247955 }, + { url = "https://files.pythonhosted.org/packages/c2/b1/156bd6d3fe21fc34eaf060cf1ebd02766faa8ea7a71f560646bf0e983688/py_fast_rsync-0.1.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c407629030e146672fee921092adc7699a6950fd37e7d80e08e615118254283e", size = 253452 }, + { url = "https://files.pythonhosted.org/packages/ff/a9/b3b6cfe967e22c3171b470a6668b57e67dfc5f40dd702b1d71ae439b4fae/py_fast_rsync-0.1.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d94bd60bf38bd0dde082d791f423f6bff7e121d4e35d4e04ae1d080865f5a901", size = 282933 }, + { url = "https://files.pythonhosted.org/packages/de/71/e637dae58bbf94adc638da6de0ef655668ccbdf67872ed6a5ab0b24275b8/py_fast_rsync-0.1.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823035f9af750b3a1f84b672e4a28ef39065923bb5350ae34afaa8f9cc06b7c3", size = 283438 }, + { url = "https://files.pythonhosted.org/packages/41/6d/c02df94bc94f8416b5c2c4cfdf86cabc3804c001e2da2d91d9db4c544e94/py_fast_rsync-0.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348412ac6f4023bff94408523d6ecfdfa26792de265a4dd30502c2c28d813461", size = 253615 }, + { url = "https://files.pythonhosted.org/packages/33/27/984415247267a62b00d4259f7bf0e346d301220d7e91ddaa1e5dc08912b7/py_fast_rsync-0.1.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03c82ddf2c74862d6c33aae4c4bf256ed522c212e98246dec8586bfaa7130995", size = 269919 }, + { url = "https://files.pythonhosted.org/packages/9d/04/7d92a404d484635f6cc5c6c8c4c153eaa49ee6c0588e714afb0e1a38a641/py_fast_rsync-0.1.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:efbf8613d5c1a6f30d4e8dccc8df3d8a3d14437d9c1b1f14910ed2980920b875", size = 431225 }, + { url = "https://files.pythonhosted.org/packages/83/63/d2e9bf1dee13a400f312e616ebba371a8e0ef63d8af3e7c42c62b1c65834/py_fast_rsync-0.1.0-pp39-pypy39_pp73-musllinux_1_2_armv7l.whl", hash = "sha256:b1759ddb048687caf653365abb3e743f3f3cd67a7e7cfc687aa007808a8c75e9", size = 512844 }, + { url = "https://files.pythonhosted.org/packages/25/94/d246d5bb4d06fbb10162178c8ed4ec802a1657e256b96a4dc6714453c523/py_fast_rsync-0.1.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:a203ee63c1b3807664c16c46dc2b10cbecec1c44d6aa128e7449ad231edaf6ca", size = 441590 }, + { url = "https://files.pythonhosted.org/packages/7f/9d/a8993fafa274d79b3fabd4f744555acefa19b03a90f6626eccfdff39a54c/py_fast_rsync-0.1.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:0997af518fa9780c35d45bfec2b6d3081c957b53aa610ee376a52cf60ce24e85", size = 418967 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 }, +] + +[package.optional-dependencies] +email = [ + { name = "email-validator" }, +] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/bc/fed5f74b5d802cf9a03e83f60f18864e90e3aed7223adaca5ffb7a8d8d64/pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa", size = 1895938 }, + { url = "https://files.pythonhosted.org/packages/71/2a/185aff24ce844e39abb8dd680f4e959f0006944f4a8a0ea372d9f9ae2e53/pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c", size = 1815684 }, + { url = "https://files.pythonhosted.org/packages/c3/43/fafabd3d94d159d4f1ed62e383e264f146a17dd4d48453319fd782e7979e/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a", size = 1829169 }, + { url = "https://files.pythonhosted.org/packages/a2/d1/f2dfe1a2a637ce6800b799aa086d079998959f6f1215eb4497966efd2274/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5", size = 1867227 }, + { url = "https://files.pythonhosted.org/packages/7d/39/e06fcbcc1c785daa3160ccf6c1c38fea31f5754b756e34b65f74e99780b5/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c", size = 2037695 }, + { url = "https://files.pythonhosted.org/packages/7a/67/61291ee98e07f0650eb756d44998214231f50751ba7e13f4f325d95249ab/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7", size = 2741662 }, + { url = "https://files.pythonhosted.org/packages/32/90/3b15e31b88ca39e9e626630b4c4a1f5a0dfd09076366f4219429e6786076/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a", size = 1993370 }, + { url = "https://files.pythonhosted.org/packages/ff/83/c06d333ee3a67e2e13e07794995c1535565132940715931c1c43bfc85b11/pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236", size = 1996813 }, + { url = "https://files.pythonhosted.org/packages/7c/f7/89be1c8deb6e22618a74f0ca0d933fdcb8baa254753b26b25ad3acff8f74/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962", size = 2005287 }, + { url = "https://files.pythonhosted.org/packages/b7/7d/8eb3e23206c00ef7feee17b83a4ffa0a623eb1a9d382e56e4aa46fd15ff2/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9", size = 2128414 }, + { url = "https://files.pythonhosted.org/packages/4e/99/fe80f3ff8dd71a3ea15763878d464476e6cb0a2db95ff1c5c554133b6b83/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af", size = 2155301 }, + { url = "https://files.pythonhosted.org/packages/2b/a3/e50460b9a5789ca1451b70d4f52546fa9e2b420ba3bfa6100105c0559238/pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4", size = 1816685 }, + { url = "https://files.pythonhosted.org/packages/57/4c/a8838731cb0f2c2a39d3535376466de6049034d7b239c0202a64aaa05533/pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31", size = 1982876 }, + { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421 }, + { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998 }, + { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167 }, + { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071 }, + { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244 }, + { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470 }, + { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291 }, + { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613 }, + { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355 }, + { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661 }, + { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261 }, + { url = "https://files.pythonhosted.org/packages/72/9d/a241db83f973049a1092a079272ffe2e3e82e98561ef6214ab53fe53b1c7/pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c", size = 1812361 }, + { url = "https://files.pythonhosted.org/packages/e8/ef/013f07248041b74abd48a385e2110aa3a9bbfef0fbd97d4e6d07d2f5b89a/pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc", size = 1982484 }, + { url = "https://files.pythonhosted.org/packages/10/1c/16b3a3e3398fd29dca77cea0a1d998d6bde3902fa2706985191e2313cc76/pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4", size = 1867102 }, + { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 }, + { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 }, + { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 }, + { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 }, + { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 }, + { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 }, + { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 }, + { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 }, + { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 }, + { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 }, + { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 }, + { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 }, + { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 }, + { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 }, + { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 }, + { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 }, + { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 }, + { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 }, + { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 }, + { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 }, + { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 }, + { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 }, + { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 }, + { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 }, + { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 }, + { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 }, + { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 }, + { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 }, + { url = "https://files.pythonhosted.org/packages/27/97/3aef1ddb65c5ccd6eda9050036c956ff6ecbfe66cb7eb40f280f121a5bb0/pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993", size = 1896475 }, + { url = "https://files.pythonhosted.org/packages/ad/d3/5668da70e373c9904ed2f372cb52c0b996426f302e0dee2e65634c92007d/pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308", size = 1772279 }, + { url = "https://files.pythonhosted.org/packages/8a/9e/e44b8cb0edf04a2f0a1f6425a65ee089c1d6f9c4c2dcab0209127b6fdfc2/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4", size = 1829112 }, + { url = "https://files.pythonhosted.org/packages/1c/90/1160d7ac700102effe11616e8119e268770f2a2aa5afb935f3ee6832987d/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf", size = 1866780 }, + { url = "https://files.pythonhosted.org/packages/ee/33/13983426df09a36d22c15980008f8d9c77674fc319351813b5a2739b70f3/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76", size = 2037943 }, + { url = "https://files.pythonhosted.org/packages/01/d7/ced164e376f6747e9158c89988c293cd524ab8d215ae4e185e9929655d5c/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118", size = 2740492 }, + { url = "https://files.pythonhosted.org/packages/8b/1f/3dc6e769d5b7461040778816aab2b00422427bcaa4b56cc89e9c653b2605/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630", size = 1995714 }, + { url = "https://files.pythonhosted.org/packages/07/d7/a0bd09bc39283530b3f7c27033a814ef254ba3bd0b5cfd040b7abf1fe5da/pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54", size = 1997163 }, + { url = "https://files.pythonhosted.org/packages/2d/bb/2db4ad1762e1c5699d9b857eeb41959191980de6feb054e70f93085e1bcd/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f", size = 2005217 }, + { url = "https://files.pythonhosted.org/packages/53/5f/23a5a3e7b8403f8dd8fc8a6f8b49f6b55c7d715b77dcf1f8ae919eeb5628/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362", size = 2127899 }, + { url = "https://files.pythonhosted.org/packages/c2/ae/aa38bb8dd3d89c2f1d8362dd890ee8f3b967330821d03bbe08fa01ce3766/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96", size = 2155726 }, + { url = "https://files.pythonhosted.org/packages/98/61/4f784608cc9e98f70839187117ce840480f768fed5d386f924074bf6213c/pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e", size = 1817219 }, + { url = "https://files.pythonhosted.org/packages/57/82/bb16a68e4a1a858bb3768c2c8f1ff8d8978014e16598f001ea29a25bf1d1/pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67", size = 1985382 }, + { url = "https://files.pythonhosted.org/packages/46/72/af70981a341500419e67d5cb45abe552a7c74b66326ac8877588488da1ac/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e", size = 1891159 }, + { url = "https://files.pythonhosted.org/packages/ad/3d/c5913cccdef93e0a6a95c2d057d2c2cba347815c845cda79ddd3c0f5e17d/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8", size = 1768331 }, + { url = "https://files.pythonhosted.org/packages/f6/f0/a3ae8fbee269e4934f14e2e0e00928f9346c5943174f2811193113e58252/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3", size = 1822467 }, + { url = "https://files.pythonhosted.org/packages/d7/7a/7bbf241a04e9f9ea24cd5874354a83526d639b02674648af3f350554276c/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f", size = 1979797 }, + { url = "https://files.pythonhosted.org/packages/4f/5f/4784c6107731f89e0005a92ecb8a2efeafdb55eb992b8e9d0a2be5199335/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133", size = 1987839 }, + { url = "https://files.pythonhosted.org/packages/6d/a7/61246562b651dff00de86a5f01b6e4befb518df314c54dec187a78d81c84/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc", size = 1998861 }, + { url = "https://files.pythonhosted.org/packages/86/aa/837821ecf0c022bbb74ca132e117c358321e72e7f9702d1b6a03758545e2/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50", size = 2116582 }, + { url = "https://files.pythonhosted.org/packages/81/b0/5e74656e95623cbaa0a6278d16cf15e10a51f6002e3ec126541e95c29ea3/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9", size = 2151985 }, + { url = "https://files.pythonhosted.org/packages/63/37/3e32eeb2a451fddaa3898e2163746b0cffbbdbb4740d38372db0490d67f3/pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151", size = 2004715 }, + { url = "https://files.pythonhosted.org/packages/29/0e/dcaea00c9dbd0348b723cae82b0e0c122e0fa2b43fa933e1622fd237a3ee/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656", size = 1891733 }, + { url = "https://files.pythonhosted.org/packages/86/d3/e797bba8860ce650272bda6383a9d8cad1d1c9a75a640c9d0e848076f85e/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278", size = 1768375 }, + { url = "https://files.pythonhosted.org/packages/41/f7/f847b15fb14978ca2b30262548f5fc4872b2724e90f116393eb69008299d/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb", size = 1822307 }, + { url = "https://files.pythonhosted.org/packages/9c/63/ed80ec8255b587b2f108e514dc03eed1546cd00f0af281e699797f373f38/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd", size = 1979971 }, + { url = "https://files.pythonhosted.org/packages/a9/6d/6d18308a45454a0de0e975d70171cadaf454bc7a0bf86b9c7688e313f0bb/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc", size = 1987616 }, + { url = "https://files.pythonhosted.org/packages/82/8a/05f8780f2c1081b800a7ca54c1971e291c2d07d1a50fb23c7e4aef4ed403/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b", size = 1998943 }, + { url = "https://files.pythonhosted.org/packages/5e/3e/fe5b6613d9e4c0038434396b46c5303f5ade871166900b357ada4766c5b7/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b", size = 2116654 }, + { url = "https://files.pythonhosted.org/packages/db/ad/28869f58938fad8cc84739c4e592989730bfb69b7c90a8fff138dff18e1e/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2", size = 2152292 }, + { url = "https://files.pythonhosted.org/packages/a1/0c/c5c5cd3689c32ed1fe8c5d234b079c12c281c051759770c05b8bed6412b5/pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35", size = 2004961 }, +] + +[[package]] +name = "pydantic-settings" +version = "2.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/73/7b/c58a586cd7d9ac66d2ee4ba60ca2d241fa837c02bca9bea80a9a8c3d22a9/pydantic_settings-2.7.1.tar.gz", hash = "sha256:10c9caad35e64bfb3c2fbf70a078c0e25cc92499782e5200747f942a065dec93", size = 79920 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/46/93416fdae86d40879714f72956ac14df9c7b76f7d41a4d68aa9f71a0028b/pydantic_settings-2.7.1-py3-none-any.whl", hash = "sha256:590be9e6e24d06db33a4262829edef682500ef008565a969c73d39d5f8bfb3fd", size = 29718 }, +] + +[[package]] +name = "pygments" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/6d/c6cf50ce320cf8611df7a1254d86233b3df7cc07f9b5f5cbcb82e08aa534/pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276", size = 49855 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/31/6607dab48616902f76885dfcf62c08d929796fc3b2d2318faf9fd54dbed9/pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b", size = 18024 }, +] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/45/9b538de8cef30e17c7b45ef42f538a94889ed6a16f2387a6c89e73220651/pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0", size = 66945 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/3b/48e79f2cd6a61dbbd4807b4ed46cb564b4fd50a76166b1c4ea5c1d9e2371/pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35", size = 22949 }, +] + +[[package]] +name = "pytest-httpx" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/89/5b12b7b29e3d0af3a4b9c071ee92fa25a9017453731a38f08ba01c280f4c/pytest_httpx-0.35.0.tar.gz", hash = "sha256:d619ad5d2e67734abfbb224c3d9025d64795d4b8711116b1a13f72a251ae511f", size = 54146 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/ed/026d467c1853dd83102411a78126b4842618e86c895f93528b0528c7a620/pytest_httpx-0.35.0-py3-none-any.whl", hash = "sha256:ee11a00ffcea94a5cbff47af2114d34c5b231c326902458deed73f9c459fd744", size = 19442 }, +] + +[[package]] +name = "pytest-timeout" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/93/0d/04719abc7a4bdb3a7a1f968f24b0f5253d698c9cc94975330e9d3145befb/pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9", size = 17697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/27/14af9ef8321f5edc7527e47def2a21d8118c6f329a9342cc61387a0c0599/pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e", size = 14148 }, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108 }, +] + +[package.optional-dependencies] +psutil = [ + { name = "psutil" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "python-dotenv" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546 }, +] + +[[package]] +name = "pytz" +version = "2024.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/31/3c70bf7603cc2dca0f19bdc53b4537a797747a58875b552c8c413d963a3f/pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a", size = 319692 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/c3/005fcca25ce078d2cc29fd559379817424e94885510568bc1bc53d7d5846/pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725", size = 508002 }, +] + +[[package]] +name = "pywin32" +version = "308" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/72/a6/3e9f2c474895c1bb61b11fa9640be00067b5c5b363c501ee9c3fa53aec01/pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e", size = 5927028 }, + { url = "https://files.pythonhosted.org/packages/d9/b4/84e2463422f869b4b718f79eb7530a4c1693e96b8a4e5e968de38be4d2ba/pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e", size = 6558484 }, + { url = "https://files.pythonhosted.org/packages/9f/8f/fb84ab789713f7c6feacaa08dad3ec8105b88ade8d1c4f0f0dfcaaa017d6/pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c", size = 7971454 }, + { url = "https://files.pythonhosted.org/packages/eb/e2/02652007469263fe1466e98439831d65d4ca80ea1a2df29abecedf7e47b7/pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a", size = 5928156 }, + { url = "https://files.pythonhosted.org/packages/48/ef/f4fb45e2196bc7ffe09cad0542d9aff66b0e33f6c0954b43e49c33cad7bd/pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b", size = 6559559 }, + { url = "https://files.pythonhosted.org/packages/79/ef/68bb6aa865c5c9b11a35771329e95917b5559845bd75b65549407f9fc6b4/pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6", size = 7972495 }, + { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, + { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, + { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, + { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, + { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, + { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, + { url = "https://files.pythonhosted.org/packages/a8/41/ead05a7657ffdbb1edabb954ab80825c4f87a3de0285d59f8290457f9016/pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341", size = 5991824 }, + { url = "https://files.pythonhosted.org/packages/e4/cd/0838c9a6063bff2e9bac2388ae36524c26c50288b5d7b6aebb6cdf8d375d/pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920", size = 6640327 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199 }, + { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758 }, + { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463 }, + { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280 }, + { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239 }, + { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802 }, + { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527 }, + { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052 }, + { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774 }, + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, + { url = "https://files.pythonhosted.org/packages/65/d8/b7a1db13636d7fb7d4ff431593c510c8b8fca920ade06ca8ef20015493c5/PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", size = 184777 }, + { url = "https://files.pythonhosted.org/packages/0a/02/6ec546cd45143fdf9840b2c6be8d875116a64076218b61d68e12548e5839/PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", size = 172318 }, + { url = "https://files.pythonhosted.org/packages/0e/9a/8cc68be846c972bda34f6c2a93abb644fb2476f4dcc924d52175786932c9/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", size = 720891 }, + { url = "https://files.pythonhosted.org/packages/e9/6c/6e1b7f40181bc4805e2e07f4abc10a88ce4648e7e95ff1abe4ae4014a9b2/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", size = 722614 }, + { url = "https://files.pythonhosted.org/packages/3d/32/e7bd8535d22ea2874cef6a81021ba019474ace0d13a4819c2a4bce79bd6a/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", size = 737360 }, + { url = "https://files.pythonhosted.org/packages/d7/12/7322c1e30b9be969670b672573d45479edef72c9a0deac3bb2868f5d7469/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", size = 699006 }, + { url = "https://files.pythonhosted.org/packages/82/72/04fcad41ca56491995076630c3ec1e834be241664c0c09a64c9a2589b507/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", size = 723577 }, + { url = "https://files.pythonhosted.org/packages/ed/5e/46168b1f2757f1fcd442bc3029cd8767d88a98c9c05770d8b420948743bb/PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", size = 144593 }, + { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312 }, +] + +[[package]] +name = "pyzmq" +version = "26.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fd/05/bed626b9f7bb2322cdbbf7b4bd8f54b1b617b0d2ab2d3547d6e39428a48e/pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f", size = 271975 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/a8/9837c39aba390eb7d01924ace49d761c8dbe7bc2d6082346d00c8332e431/pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629", size = 1340058 }, + { url = "https://files.pythonhosted.org/packages/a2/1f/a006f2e8e4f7d41d464272012695da17fb95f33b54342612a6890da96ff6/pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b", size = 1008818 }, + { url = "https://files.pythonhosted.org/packages/b6/09/b51b6683fde5ca04593a57bbe81788b6b43114d8f8ee4e80afc991e14760/pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764", size = 673199 }, + { url = "https://files.pythonhosted.org/packages/c9/78/486f3e2e824f3a645238332bf5a4c4b4477c3063033a27c1e4052358dee2/pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c", size = 911762 }, + { url = "https://files.pythonhosted.org/packages/5e/3b/2eb1667c9b866f53e76ee8b0c301b0469745a23bd5a87b7ee3d5dd9eb6e5/pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a", size = 868773 }, + { url = "https://files.pythonhosted.org/packages/16/29/ca99b4598a9dc7e468b5417eda91f372b595be1e3eec9b7cbe8e5d3584e8/pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88", size = 868834 }, + { url = "https://files.pythonhosted.org/packages/ad/e5/9efaeb1d2f4f8c50da04144f639b042bc52869d3a206d6bf672ab3522163/pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f", size = 1202861 }, + { url = "https://files.pythonhosted.org/packages/c3/62/c721b5608a8ac0a69bb83cbb7d07a56f3ff00b3991a138e44198a16f94c7/pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282", size = 1515304 }, + { url = "https://files.pythonhosted.org/packages/87/84/e8bd321aa99b72f48d4606fc5a0a920154125bd0a4608c67eab742dab087/pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea", size = 1414712 }, + { url = "https://files.pythonhosted.org/packages/cd/cd/420e3fd1ac6977b008b72e7ad2dae6350cc84d4c5027fc390b024e61738f/pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2", size = 578113 }, + { url = "https://files.pythonhosted.org/packages/5c/57/73930d56ed45ae0cb4946f383f985c855c9b3d4063f26416998f07523c0e/pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971", size = 641631 }, + { url = "https://files.pythonhosted.org/packages/61/d2/ae6ac5c397f1ccad59031c64beaafce7a0d6182e0452cc48f1c9c87d2dd0/pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa", size = 543528 }, + { url = "https://files.pythonhosted.org/packages/12/20/de7442172f77f7c96299a0ac70e7d4fb78cd51eca67aa2cf552b66c14196/pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218", size = 1340639 }, + { url = "https://files.pythonhosted.org/packages/98/4d/5000468bd64c7910190ed0a6c76a1ca59a68189ec1f007c451dc181a22f4/pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4", size = 1008710 }, + { url = "https://files.pythonhosted.org/packages/e1/bf/c67fd638c2f9fbbab8090a3ee779370b97c82b84cc12d0c498b285d7b2c0/pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef", size = 673129 }, + { url = "https://files.pythonhosted.org/packages/86/94/99085a3f492aa538161cbf27246e8886ff850e113e0c294a5b8245f13b52/pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317", size = 910107 }, + { url = "https://files.pythonhosted.org/packages/31/1d/346809e8a9b999646d03f21096428453465b1bca5cd5c64ecd048d9ecb01/pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf", size = 867960 }, + { url = "https://files.pythonhosted.org/packages/ab/68/6fb6ae5551846ad5beca295b7bca32bf0a7ce19f135cb30e55fa2314e6b6/pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e", size = 869204 }, + { url = "https://files.pythonhosted.org/packages/0f/f9/18417771dee223ccf0f48e29adf8b4e25ba6d0e8285e33bcbce078070bc3/pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37", size = 1203351 }, + { url = "https://files.pythonhosted.org/packages/e0/46/f13e67fe0d4f8a2315782cbad50493de6203ea0d744610faf4d5f5b16e90/pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3", size = 1514204 }, + { url = "https://files.pythonhosted.org/packages/50/11/ddcf7343b7b7a226e0fc7b68cbf5a5bb56291fac07f5c3023bb4c319ebb4/pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6", size = 1414339 }, + { url = "https://files.pythonhosted.org/packages/01/14/1c18d7d5b7be2708f513f37c61bfadfa62161c10624f8733f1c8451b3509/pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4", size = 576928 }, + { url = "https://files.pythonhosted.org/packages/3b/1b/0a540edd75a41df14ec416a9a500b9fec66e554aac920d4c58fbd5756776/pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5", size = 642317 }, + { url = "https://files.pythonhosted.org/packages/98/77/1cbfec0358078a4c5add529d8a70892db1be900980cdb5dd0898b3d6ab9d/pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003", size = 543834 }, + { url = "https://files.pythonhosted.org/packages/28/2f/78a766c8913ad62b28581777ac4ede50c6d9f249d39c2963e279524a1bbe/pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9", size = 1343105 }, + { url = "https://files.pythonhosted.org/packages/b7/9c/4b1e2d3d4065be715e007fe063ec7885978fad285f87eae1436e6c3201f4/pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52", size = 1008365 }, + { url = "https://files.pythonhosted.org/packages/4f/ef/5a23ec689ff36d7625b38d121ef15abfc3631a9aecb417baf7a4245e4124/pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08", size = 665923 }, + { url = "https://files.pythonhosted.org/packages/ae/61/d436461a47437d63c6302c90724cf0981883ec57ceb6073873f32172d676/pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5", size = 903400 }, + { url = "https://files.pythonhosted.org/packages/47/42/fc6d35ecefe1739a819afaf6f8e686f7f02a4dd241c78972d316f403474c/pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae", size = 860034 }, + { url = "https://files.pythonhosted.org/packages/07/3b/44ea6266a6761e9eefaa37d98fabefa112328808ac41aa87b4bbb668af30/pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711", size = 860579 }, + { url = "https://files.pythonhosted.org/packages/38/6f/4df2014ab553a6052b0e551b37da55166991510f9e1002c89cab7ce3b3f2/pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6", size = 1196246 }, + { url = "https://files.pythonhosted.org/packages/38/9d/ee240fc0c9fe9817f0c9127a43238a3e28048795483c403cc10720ddef22/pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3", size = 1507441 }, + { url = "https://files.pythonhosted.org/packages/85/4f/01711edaa58d535eac4a26c294c617c9a01f09857c0ce191fd574d06f359/pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b", size = 1406498 }, + { url = "https://files.pythonhosted.org/packages/07/18/907134c85c7152f679ed744e73e645b365f3ad571f38bdb62e36f347699a/pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7", size = 575533 }, + { url = "https://files.pythonhosted.org/packages/ce/2c/a6f4a20202a4d3c582ad93f95ee78d79bbdc26803495aec2912b17dbbb6c/pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a", size = 637768 }, + { url = "https://files.pythonhosted.org/packages/5f/0e/eb16ff731632d30554bf5af4dbba3ffcd04518219d82028aea4ae1b02ca5/pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b", size = 540675 }, + { url = "https://files.pythonhosted.org/packages/04/a7/0f7e2f6c126fe6e62dbae0bc93b1bd3f1099cf7fea47a5468defebe3f39d/pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726", size = 1006564 }, + { url = "https://files.pythonhosted.org/packages/31/b6/a187165c852c5d49f826a690857684333a6a4a065af0a6015572d2284f6a/pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3", size = 1340447 }, + { url = "https://files.pythonhosted.org/packages/68/ba/f4280c58ff71f321602a6e24fd19879b7e79793fb8ab14027027c0fb58ef/pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50", size = 665485 }, + { url = "https://files.pythonhosted.org/packages/77/b5/c987a5c53c7d8704216f29fc3d810b32f156bcea488a940e330e1bcbb88d/pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb", size = 903484 }, + { url = "https://files.pythonhosted.org/packages/29/c9/07da157d2db18c72a7eccef8e684cefc155b712a88e3d479d930aa9eceba/pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187", size = 859981 }, + { url = "https://files.pythonhosted.org/packages/43/09/e12501bd0b8394b7d02c41efd35c537a1988da67fc9c745cae9c6c776d31/pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b", size = 860334 }, + { url = "https://files.pythonhosted.org/packages/eb/ff/f5ec1d455f8f7385cc0a8b2acd8c807d7fade875c14c44b85c1bddabae21/pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18", size = 1196179 }, + { url = "https://files.pythonhosted.org/packages/ec/8a/bb2ac43295b1950fe436a81fc5b298be0b96ac76fb029b514d3ed58f7b27/pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115", size = 1507668 }, + { url = "https://files.pythonhosted.org/packages/a9/49/dbc284ebcfd2dca23f6349227ff1616a7ee2c4a35fe0a5d6c3deff2b4fed/pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e", size = 1406539 }, + { url = "https://files.pythonhosted.org/packages/00/68/093cdce3fe31e30a341d8e52a1ad86392e13c57970d722c1f62a1d1a54b6/pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5", size = 575567 }, + { url = "https://files.pythonhosted.org/packages/92/ae/6cc4657148143412b5819b05e362ae7dd09fb9fe76e2a539dcff3d0386bc/pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad", size = 637551 }, + { url = "https://files.pythonhosted.org/packages/6c/67/fbff102e201688f97c8092e4c3445d1c1068c2f27bbd45a578df97ed5f94/pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797", size = 540378 }, + { url = "https://files.pythonhosted.org/packages/3f/fe/2d998380b6e0122c6c4bdf9b6caf490831e5f5e2d08a203b5adff060c226/pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a", size = 1007378 }, + { url = "https://files.pythonhosted.org/packages/4a/f4/30d6e7157f12b3a0390bde94d6a8567cdb88846ed068a6e17238a4ccf600/pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc", size = 1329532 }, + { url = "https://files.pythonhosted.org/packages/82/86/3fe917870e15ee1c3ad48229a2a64458e36036e64b4afa9659045d82bfa8/pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5", size = 653242 }, + { url = "https://files.pythonhosted.org/packages/50/2d/242e7e6ef6c8c19e6cb52d095834508cd581ffb925699fd3c640cdc758f1/pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672", size = 888404 }, + { url = "https://files.pythonhosted.org/packages/ac/11/7270566e1f31e4ea73c81ec821a4b1688fd551009a3d2bab11ec66cb1e8f/pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797", size = 845858 }, + { url = "https://files.pythonhosted.org/packages/91/d5/72b38fbc69867795c8711bdd735312f9fef1e3d9204e2f63ab57085434b9/pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386", size = 847375 }, + { url = "https://files.pythonhosted.org/packages/dd/9a/10ed3c7f72b4c24e719c59359fbadd1a27556a28b36cdf1cd9e4fb7845d5/pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306", size = 1183489 }, + { url = "https://files.pythonhosted.org/packages/72/2d/8660892543fabf1fe41861efa222455811adac9f3c0818d6c3170a1153e3/pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6", size = 1492932 }, + { url = "https://files.pythonhosted.org/packages/7b/d6/32fd69744afb53995619bc5effa2a405ae0d343cd3e747d0fbc43fe894ee/pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0", size = 1392485 }, + { url = "https://files.pythonhosted.org/packages/ac/9e/ad5fbbe1bcc7a9d1e8c5f4f7de48f2c1dc481e151ef80cc1ce9a7fe67b55/pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2", size = 1341256 }, + { url = "https://files.pythonhosted.org/packages/4c/d9/d7a8022108c214803a82b0b69d4885cee00933d21928f1f09dca371cf4bf/pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c", size = 1009385 }, + { url = "https://files.pythonhosted.org/packages/ed/69/0529b59ac667ea8bfe8796ac71796b688fbb42ff78e06525dabfed3bc7ae/pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98", size = 908009 }, + { url = "https://files.pythonhosted.org/packages/6e/bd/3ff3e1172f12f55769793a3a334e956ec2886805ebfb2f64756b6b5c6a1a/pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9", size = 862078 }, + { url = "https://files.pythonhosted.org/packages/c3/ec/ab13585c3a1f48e2874253844c47b194d56eb25c94718691349c646f336f/pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db", size = 673756 }, + { url = "https://files.pythonhosted.org/packages/1e/be/febcd4b04dd50ee6d514dfbc33a3d5d9cb38ec9516e02bbfc929baa0f141/pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073", size = 1203684 }, + { url = "https://files.pythonhosted.org/packages/16/28/304150e71afd2df3b82f52f66c0d8ab9ac6fe1f1ffdf92bad4c8cc91d557/pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc", size = 1515864 }, + { url = "https://files.pythonhosted.org/packages/18/89/8d48d8cd505c12a1f5edee597cc32ffcedc65fd8d2603aebaaedc38a7041/pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940", size = 1415383 }, + { url = "https://files.pythonhosted.org/packages/d4/7e/43a60c3b179f7da0cbc2b649bd2702fd6a39bff5f72aa38d6e1aeb00256d/pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44", size = 578540 }, + { url = "https://files.pythonhosted.org/packages/3a/55/8841dcd28f783ad06674c8fe8d7d72794b548d0bff8829aaafeb72e8b44d/pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec", size = 642147 }, + { url = "https://files.pythonhosted.org/packages/b4/78/b3c31ccfcfcdd6ea50b6abc8f46a2a7aadb9c3d40531d1b908d834aaa12e/pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb", size = 543903 }, + { url = "https://files.pythonhosted.org/packages/53/fb/36b2b2548286e9444e52fcd198760af99fd89102b5be50f0660fcfe902df/pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072", size = 906955 }, + { url = "https://files.pythonhosted.org/packages/77/8f/6ce54f8979a01656e894946db6299e2273fcee21c8e5fa57c6295ef11f57/pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1", size = 565701 }, + { url = "https://files.pythonhosted.org/packages/ee/1c/bf8cd66730a866b16db8483286078892b7f6536f8c389fb46e4beba0a970/pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d", size = 794312 }, + { url = "https://files.pythonhosted.org/packages/71/43/91fa4ff25bbfdc914ab6bafa0f03241d69370ef31a761d16bb859f346582/pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca", size = 752775 }, + { url = "https://files.pythonhosted.org/packages/ec/d2/3b2ab40f455a256cb6672186bea95cd97b459ce4594050132d71e76f0d6f/pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c", size = 550762 }, + { url = "https://files.pythonhosted.org/packages/6c/78/3096d72581365dfb0081ac9512a3b53672fa69854aa174d78636510c4db8/pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3", size = 906945 }, + { url = "https://files.pythonhosted.org/packages/da/f2/8054574d77c269c31d055d4daf3d8407adf61ea384a50c8d14b158551d09/pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a", size = 565698 }, + { url = "https://files.pythonhosted.org/packages/77/21/c3ad93236d1d60eea10b67528f55e7db115a9d32e2bf163fcf601f85e9cc/pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6", size = 794307 }, + { url = "https://files.pythonhosted.org/packages/6a/49/e95b491724500fcb760178ce8db39b923429e328e57bcf9162e32c2c187c/pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a", size = 752769 }, + { url = "https://files.pythonhosted.org/packages/9b/a9/50c9c06762b30792f71aaad8d1886748d39c4bffedc1171fbc6ad2b92d67/pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4", size = 751338 }, + { url = "https://files.pythonhosted.org/packages/ca/63/27e6142b4f67a442ee480986ca5b88edb01462dd2319843057683a5148bd/pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f", size = 550757 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "rich" +version = "13.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 }, +] + +[[package]] +name = "setuptools" +version = "75.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/54/292f26c208734e9a7f067aea4a7e282c080750c4546559b58e2e45413ca0/setuptools-75.6.0.tar.gz", hash = "sha256:8199222558df7c86216af4f84c30e9b34a61d8ba19366cc914424cdbd28252f6", size = 1337429 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/21/47d163f615df1d30c094f6c8bbb353619274edccf0327b185cc2493c2c33/setuptools-75.6.0-py3-none-any.whl", hash = "sha256:ce74b49e8f7110f9bf04883b730f4765b774ef3ef28f722cce7c273d253aaf7d", size = 1224032 }, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "starlette" +version = "0.41.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/4c/9b5764bd22eec91c4039ef4c55334e9187085da2d8a2df7bd570869aae18/starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835", size = 2574159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/00/2b325970b3060c7cecebab6d295afe763365822b1306a12eeab198f74323/starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7", size = 73225 }, +] + +[[package]] +name = "syftbox" +version = "0.3.5" +source = { editable = "." } +dependencies = [ + { name = "aiofiles" }, + { name = "croniter" }, + { name = "curl-cffi" }, + { name = "distro" }, + { name = "fastapi" }, + { name = "gunicorn" }, + { name = "httpx" }, + { name = "jinja2" }, + { name = "loguru" }, + { name = "msgpack" }, + { name = "opentelemetry-exporter-otlp-proto-grpc" }, + { name = "opentelemetry-instrumentation-fastapi" }, + { name = "opentelemetry-instrumentation-sqlite3" }, + { name = "pathspec" }, + { name = "pid" }, + { name = "psutil" }, + { name = "py-fast-rsync" }, + { name = "pydantic", extra = ["email"] }, + { name = "pydantic-settings" }, + { name = "pyjwt" }, + { name = "python-multipart" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "rich" }, + { name = "textual" }, + { name = "tqdm" }, + { name = "typer" }, + { name = "typing-extensions" }, + { name = "uvicorn" }, + { name = "wcmatch" }, +] + +[package.dev-dependencies] +dev = [ + { name = "bump2version" }, + { name = "faker" }, + { name = "ipykernel" }, + { name = "locust" }, + { name = "pre-commit" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-cov" }, + { name = "pytest-httpx" }, + { name = "pytest-timeout" }, + { name = "pytest-xdist", extra = ["psutil"] }, + { name = "textual-dev" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiofiles", specifier = ">=24.1.0" }, + { name = "croniter", specifier = "==6.0.0" }, + { name = "curl-cffi", specifier = ">=0.7.4" }, + { name = "distro", specifier = "==1.9.0" }, + { name = "fastapi", specifier = "==0.115.7" }, + { name = "gunicorn", specifier = "==23.0.0" }, + { name = "httpx", specifier = "==0.28.1" }, + { name = "jinja2", specifier = "==3.1.5" }, + { name = "loguru", specifier = "==0.7.3" }, + { name = "msgpack", specifier = ">=1.1.0" }, + { name = "opentelemetry-exporter-otlp-proto-grpc", specifier = "==1.29.0" }, + { name = "opentelemetry-instrumentation-fastapi", specifier = "==0.50b0" }, + { name = "opentelemetry-instrumentation-sqlite3", specifier = "==0.50b0" }, + { name = "pathspec", specifier = "==0.12.1" }, + { name = "pid", specifier = "==3.0.4" }, + { name = "psutil", specifier = "==6.1.1" }, + { name = "py-fast-rsync", specifier = "==0.1.0" }, + { name = "pydantic", extras = ["email"], specifier = "==2.10.6" }, + { name = "pydantic-settings", specifier = "==2.7.1" }, + { name = "pyjwt", specifier = "==2.10.1" }, + { name = "python-multipart", specifier = "==0.0.20" }, + { name = "pyyaml", specifier = "==6.0.2" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "rich", specifier = "==13.9.4" }, + { name = "textual", specifier = ">=1.0.0" }, + { name = "tqdm", specifier = ">=4.67.1" }, + { name = "typer", specifier = "==0.15.1" }, + { name = "typing-extensions", specifier = "==4.12.2" }, + { name = "uvicorn", specifier = "==0.34.0" }, + { name = "wcmatch", specifier = "==10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "bump2version", specifier = ">=1.0.1" }, + { name = "faker", specifier = ">=30.3.5" }, + { name = "ipykernel", specifier = ">=6.29.5" }, + { name = "locust", specifier = ">=2.32.0" }, + { name = "pre-commit", specifier = ">=4.0.1" }, + { name = "pytest", specifier = ">=8.3.3" }, + { name = "pytest-asyncio", specifier = ">=0.24.0" }, + { name = "pytest-cov", specifier = ">=5.0.0" }, + { name = "pytest-httpx", specifier = ">=0.35.0" }, + { name = "pytest-timeout", specifier = ">=2.3.1" }, + { name = "pytest-xdist", extras = ["psutil"], specifier = ">=3.6.1" }, + { name = "textual-dev", specifier = ">=1.7.0" }, +] + +[[package]] +name = "textual" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", extra = ["linkify", "plugins"] }, + { name = "platformdirs" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/b6/59b1de04bb4dca0f21ed7ba0b19309ed7f3f5de4396edf20cc2855e53085/textual-1.0.0.tar.gz", hash = "sha256:bec9fe63547c1c552569d1b75d309038b7d456c03f86dfa3706ddb099b151399", size = 1532733 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/bb/5fb6656c625019cd653d5215237d7cd6e0b12e7eae4195c3d1c91b2136fc/textual-1.0.0-py3-none-any.whl", hash = "sha256:2d4a701781c05104925e463ae370c630567c70c2880e92ab838052e3e23c986f", size = 660456 }, +] + +[[package]] +name = "textual-dev" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "click" }, + { name = "msgpack" }, + { name = "textual" }, + { name = "textual-serve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/d3/ed0b20f6de0af1b7062c402d59d256029c0daa055ad9e04c27471b450cdd/textual_dev-1.7.0.tar.gz", hash = "sha256:bf1a50eaaff4cd6a863535dd53f06dbbd62617c371604f66f56de3908220ccd5", size = 25935 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/4b/3c1eb9cbc39f2f28d27e10ef2fe42bfe0cf3c2f8445a454c124948d6169b/textual_dev-1.7.0-py3-none-any.whl", hash = "sha256:a93a846aeb6a06edb7808504d9c301565f7f4bf2e7046d56583ed755af356c8d", size = 27221 }, +] + +[[package]] +name = "textual-serve" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "aiohttp-jinja2" }, + { name = "jinja2" }, + { name = "rich" }, + { name = "textual" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/6c/57248070f525ea8a9a02d9f58dc2747c609b615b0bda1306aaeb80a233bd/textual_serve-1.1.1.tar.gz", hash = "sha256:71c662472c462e5e368defc660ee6e8eae3bfda88ca40c050c55474686eb0c54", size = 445957 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/a9/01d35770fde8d889e1fe28b726188cf28801e57afd369c614cd2bc100ee4/textual_serve-1.1.1-py3-none-any.whl", hash = "sha256:568782f1c0e60e3f7039d9121e1cb5c2f4ca1aaf6d6bd7aeb833d5763a534cb2", size = 445034 }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "tornado" +version = "6.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/59/45/a0daf161f7d6f36c3ea5fc0c2de619746cc3dd4c76402e9db545bd920f63/tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b", size = 501135 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/7e/71f604d8cea1b58f82ba3590290b66da1e72d840aeb37e0d5f7291bd30db/tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1", size = 436299 }, + { url = "https://files.pythonhosted.org/packages/96/44/87543a3b99016d0bf54fdaab30d24bf0af2e848f1d13d34a3a5380aabe16/tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803", size = 434253 }, + { url = "https://files.pythonhosted.org/packages/cb/fb/fdf679b4ce51bcb7210801ef4f11fdac96e9885daa402861751353beea6e/tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec", size = 437602 }, + { url = "https://files.pythonhosted.org/packages/4f/3b/e31aeffffc22b475a64dbeb273026a21b5b566f74dee48742817626c47dc/tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946", size = 436972 }, + { url = "https://files.pythonhosted.org/packages/22/55/b78a464de78051a30599ceb6983b01d8f732e6f69bf37b4ed07f642ac0fc/tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf", size = 437173 }, + { url = "https://files.pythonhosted.org/packages/79/5e/be4fb0d1684eb822c9a62fb18a3e44a06188f78aa466b2ad991d2ee31104/tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634", size = 437892 }, + { url = "https://files.pythonhosted.org/packages/f5/33/4f91fdd94ea36e1d796147003b490fe60a0215ac5737b6f9c65e160d4fe0/tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73", size = 437334 }, + { url = "https://files.pythonhosted.org/packages/2b/ae/c1b22d4524b0e10da2f29a176fb2890386f7bd1f63aacf186444873a88a0/tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c", size = 437261 }, + { url = "https://files.pythonhosted.org/packages/b5/25/36dbd49ab6d179bcfc4c6c093a51795a4f3bed380543a8242ac3517a1751/tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482", size = 438463 }, + { url = "https://files.pythonhosted.org/packages/61/cc/58b1adeb1bb46228442081e746fcdbc4540905c87e8add7c277540934edb/tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38", size = 438907 }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "typer" +version = "0.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/dca7b219718afd37a0068f4f2530a727c2b74a8b6e8e0c0080a4c0de4fcd/typer-0.15.1.tar.gz", hash = "sha256:a0588c0a7fa68a1978a069818657778f86abe6ff5ea6abf472f940a08bfe4f0a", size = 99789 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/cc/0a838ba5ca64dc832aa43f727bd586309846b0ffb2ce52422543e6075e8a/typer-0.15.1-py3-none-any.whl", hash = "sha256:7994fb7b8155b64d3402518560648446072864beefd44aa2dc36972a5972e847", size = 44908 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "uc-micro-py" +version = "1.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229 }, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, +] + +[[package]] +name = "uvicorn" +version = "0.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, +] + +[[package]] +name = "virtualenv" +version = "20.28.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/75/53316a5a8050069228a2f6d11f32046cfa94fbb6cc3f08703f59b873de2e/virtualenv-20.28.0.tar.gz", hash = "sha256:2c9c3262bb8e7b87ea801d715fae4495e6032450c71d2309be9550e7364049aa", size = 7650368 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/f9/0919cf6f1432a8c4baa62511f8f8da8225432d22e83e3476f5be1a1edc6e/virtualenv-20.28.0-py3-none-any.whl", hash = "sha256:23eae1b4516ecd610481eda647f3a7c09aea295055337331bb4e6892ecce47b0", size = 4276702 }, +] + +[[package]] +name = "wcmatch" +version = "10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bracex" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/ab/b3a52228538ccb983653c446c1656eddf1d5303b9cb8b9aef6a91299f862/wcmatch-10.0.tar.gz", hash = "sha256:e72f0de09bba6a04e0de70937b0cf06e55f36f37b3deb422dfaf854b867b840a", size = 115578 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/df/4ee467ab39cc1de4b852c212c1ed3becfec2e486a51ac1ce0091f85f38d7/wcmatch-10.0-py3-none-any.whl", hash = "sha256:0dd927072d03c0a6527a20d2e6ad5ba8d0380e60870c383bc533b71744df7b7a", size = 39347 }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498 }, +] + +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083 }, +] + +[[package]] +name = "wrapt" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/a1/fc03dca9b0432725c2e8cdbf91a349d2194cf03d8523c124faebe581de09/wrapt-1.17.0.tar.gz", hash = "sha256:16187aa2317c731170a88ef35e8937ae0f533c402872c1ee5e6d079fcf320801", size = 55542 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/f9/85220321e9bb1a5f72ccce6604395ae75fcb463d87dad0014dc1010bd1f1/wrapt-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a0c23b8319848426f305f9cb0c98a6e32ee68a36264f45948ccf8e7d2b941f8", size = 38766 }, + { url = "https://files.pythonhosted.org/packages/ff/71/ff624ff3bde91ceb65db6952cdf8947bc0111d91bd2359343bc2fa7c57fd/wrapt-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ca5f060e205f72bec57faae5bd817a1560fcfc4af03f414b08fa29106b7e2d", size = 83262 }, + { url = "https://files.pythonhosted.org/packages/9f/0a/814d4a121a643af99cfe55a43e9e6dd08f4a47cdac8e8f0912c018794715/wrapt-1.17.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e185ec6060e301a7e5f8461c86fb3640a7beb1a0f0208ffde7a65ec4074931df", size = 74990 }, + { url = "https://files.pythonhosted.org/packages/cd/c7/b8c89bf5ca5c4e6a2d0565d149d549cdb4cffb8916d1d1b546b62fb79281/wrapt-1.17.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb90765dd91aed05b53cd7a87bd7f5c188fcd95960914bae0d32c5e7f899719d", size = 82712 }, + { url = "https://files.pythonhosted.org/packages/19/7c/5977aefa8460906c1ff914fd42b11cf6c09ded5388e46e1cc6cea4ab15e9/wrapt-1.17.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:879591c2b5ab0a7184258274c42a126b74a2c3d5a329df16d69f9cee07bba6ea", size = 81705 }, + { url = "https://files.pythonhosted.org/packages/ae/e7/233402d7bd805096bb4a8ec471f5a141421a01de3c8c957cce569772c056/wrapt-1.17.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fce6fee67c318fdfb7f285c29a82d84782ae2579c0e1b385b7f36c6e8074fffb", size = 74636 }, + { url = "https://files.pythonhosted.org/packages/93/81/b6c32d8387d9cfbc0134f01585dee7583315c3b46dfd3ae64d47693cd078/wrapt-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0698d3a86f68abc894d537887b9bbf84d29bcfbc759e23f4644be27acf6da301", size = 81299 }, + { url = "https://files.pythonhosted.org/packages/d1/c3/1fae15d453468c98f09519076f8d401b476d18d8d94379e839eed14c4c8b/wrapt-1.17.0-cp310-cp310-win32.whl", hash = "sha256:69d093792dc34a9c4c8a70e4973a3361c7a7578e9cd86961b2bbf38ca71e4e22", size = 36425 }, + { url = "https://files.pythonhosted.org/packages/c6/f4/77e0886c95556f2b4caa8908ea8eb85f713fc68296a2113f8c63d50fe0fb/wrapt-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:f28b29dc158ca5d6ac396c8e0a2ef45c4e97bb7e65522bfc04c989e6fe814575", size = 38748 }, + { url = "https://files.pythonhosted.org/packages/0e/40/def56538acddc2f764c157d565b9f989072a1d2f2a8e384324e2e104fc7d/wrapt-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:74bf625b1b4caaa7bad51d9003f8b07a468a704e0644a700e936c357c17dd45a", size = 38766 }, + { url = "https://files.pythonhosted.org/packages/89/e2/8c299f384ae4364193724e2adad99f9504599d02a73ec9199bf3f406549d/wrapt-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f2a28eb35cf99d5f5bd12f5dd44a0f41d206db226535b37b0c60e9da162c3ed", size = 83730 }, + { url = "https://files.pythonhosted.org/packages/29/ef/fcdb776b12df5ea7180d065b28fa6bb27ac785dddcd7202a0b6962bbdb47/wrapt-1.17.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81b1289e99cf4bad07c23393ab447e5e96db0ab50974a280f7954b071d41b489", size = 75470 }, + { url = "https://files.pythonhosted.org/packages/55/b5/698bd0bf9fbb3ddb3a2feefbb7ad0dea1205f5d7d05b9cbab54f5db731aa/wrapt-1.17.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2939cd4a2a52ca32bc0b359015718472d7f6de870760342e7ba295be9ebaf9", size = 83168 }, + { url = "https://files.pythonhosted.org/packages/ce/07/701a5cee28cb4d5df030d4b2649319e36f3d9fdd8000ef1d84eb06b9860d/wrapt-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a9653131bda68a1f029c52157fd81e11f07d485df55410401f745007bd6d339", size = 82307 }, + { url = "https://files.pythonhosted.org/packages/42/92/c48ba92cda6f74cb914dc3c5bba9650dc80b790e121c4b987f3a46b028f5/wrapt-1.17.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4e4b4385363de9052dac1a67bfb535c376f3d19c238b5f36bddc95efae15e12d", size = 75101 }, + { url = "https://files.pythonhosted.org/packages/8a/0a/9276d3269334138b88a2947efaaf6335f61d547698e50dff672ade24f2c6/wrapt-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bdf62d25234290db1837875d4dceb2151e4ea7f9fff2ed41c0fde23ed542eb5b", size = 81835 }, + { url = "https://files.pythonhosted.org/packages/b9/4c/39595e692753ef656ea94b51382cc9aea662fef59d7910128f5906486f0e/wrapt-1.17.0-cp311-cp311-win32.whl", hash = "sha256:5d8fd17635b262448ab8f99230fe4dac991af1dabdbb92f7a70a6afac8a7e346", size = 36412 }, + { url = "https://files.pythonhosted.org/packages/63/bb/c293a67fb765a2ada48f48cd0f2bb957da8161439da4c03ea123b9894c02/wrapt-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:92a3d214d5e53cb1db8b015f30d544bc9d3f7179a05feb8f16df713cecc2620a", size = 38744 }, + { url = "https://files.pythonhosted.org/packages/85/82/518605474beafff11f1a34759f6410ab429abff9f7881858a447e0d20712/wrapt-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:89fc28495896097622c3fc238915c79365dd0ede02f9a82ce436b13bd0ab7569", size = 38904 }, + { url = "https://files.pythonhosted.org/packages/80/6c/17c3b2fed28edfd96d8417c865ef0b4c955dc52c4e375d86f459f14340f1/wrapt-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875d240fdbdbe9e11f9831901fb8719da0bd4e6131f83aa9f69b96d18fae7504", size = 88622 }, + { url = "https://files.pythonhosted.org/packages/4a/11/60ecdf3b0fd3dca18978d89acb5d095a05f23299216e925fcd2717c81d93/wrapt-1.17.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ed16d95fd142e9c72b6c10b06514ad30e846a0d0917ab406186541fe68b451", size = 80920 }, + { url = "https://files.pythonhosted.org/packages/d2/50/dbef1a651578a3520d4534c1e434989e3620380c1ad97e309576b47f0ada/wrapt-1.17.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18b956061b8db634120b58f668592a772e87e2e78bc1f6a906cfcaa0cc7991c1", size = 89170 }, + { url = "https://files.pythonhosted.org/packages/44/a2/78c5956bf39955288c9e0dd62e807b308c3aa15a0f611fbff52aa8d6b5ea/wrapt-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:daba396199399ccabafbfc509037ac635a6bc18510ad1add8fd16d4739cdd106", size = 86748 }, + { url = "https://files.pythonhosted.org/packages/99/49/2ee413c78fc0bdfebe5bee590bf3becdc1fab0096a7a9c3b5c9666b2415f/wrapt-1.17.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4d63f4d446e10ad19ed01188d6c1e1bb134cde8c18b0aa2acfd973d41fcc5ada", size = 79734 }, + { url = "https://files.pythonhosted.org/packages/c0/8c/4221b7b270e36be90f0930fe15a4755a6ea24093f90b510166e9ed7861ea/wrapt-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8a5e7cc39a45fc430af1aefc4d77ee6bad72c5bcdb1322cfde852c15192b8bd4", size = 87552 }, + { url = "https://files.pythonhosted.org/packages/4c/6b/1aaccf3efe58eb95e10ce8e77c8909b7a6b0da93449a92c4e6d6d10b3a3d/wrapt-1.17.0-cp312-cp312-win32.whl", hash = "sha256:0a0a1a1ec28b641f2a3a2c35cbe86c00051c04fffcfcc577ffcdd707df3f8635", size = 36647 }, + { url = "https://files.pythonhosted.org/packages/b3/4f/243f88ac49df005b9129194c6511b3642818b3e6271ddea47a15e2ee4934/wrapt-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:3c34f6896a01b84bab196f7119770fd8466c8ae3dfa73c59c0bb281e7b588ce7", size = 38830 }, + { url = "https://files.pythonhosted.org/packages/67/9c/38294e1bb92b055222d1b8b6591604ca4468b77b1250f59c15256437644f/wrapt-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:714c12485aa52efbc0fc0ade1e9ab3a70343db82627f90f2ecbc898fdf0bb181", size = 38904 }, + { url = "https://files.pythonhosted.org/packages/78/b6/76597fb362cbf8913a481d41b14b049a8813cd402a5d2f84e57957c813ae/wrapt-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da427d311782324a376cacb47c1a4adc43f99fd9d996ffc1b3e8529c4074d393", size = 88608 }, + { url = "https://files.pythonhosted.org/packages/bc/69/b500884e45b3881926b5f69188dc542fb5880019d15c8a0df1ab1dfda1f7/wrapt-1.17.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba1739fb38441a27a676f4de4123d3e858e494fac05868b7a281c0a383c098f4", size = 80879 }, + { url = "https://files.pythonhosted.org/packages/52/31/f4cc58afe29eab8a50ac5969963010c8b60987e719c478a5024bce39bc42/wrapt-1.17.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e711fc1acc7468463bc084d1b68561e40d1eaa135d8c509a65dd534403d83d7b", size = 89119 }, + { url = "https://files.pythonhosted.org/packages/aa/9c/05ab6bf75dbae7a9d34975fb6ee577e086c1c26cde3b6cf6051726d33c7c/wrapt-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:140ea00c87fafc42739bd74a94a5a9003f8e72c27c47cd4f61d8e05e6dec8721", size = 86778 }, + { url = "https://files.pythonhosted.org/packages/0e/6c/4b8d42e3db355603d35fe5c9db79c28f2472a6fd1ccf4dc25ae46739672a/wrapt-1.17.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73a96fd11d2b2e77d623a7f26e004cc31f131a365add1ce1ce9a19e55a1eef90", size = 79793 }, + { url = "https://files.pythonhosted.org/packages/69/23/90e3a2ee210c0843b2c2a49b3b97ffcf9cad1387cb18cbeef9218631ed5a/wrapt-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0b48554952f0f387984da81ccfa73b62e52817a4386d070c75e4db7d43a28c4a", size = 87606 }, + { url = "https://files.pythonhosted.org/packages/5f/06/3683126491ca787d8d71d8d340e775d40767c5efedb35039d987203393b7/wrapt-1.17.0-cp313-cp313-win32.whl", hash = "sha256:498fec8da10e3e62edd1e7368f4b24aa362ac0ad931e678332d1b209aec93045", size = 36651 }, + { url = "https://files.pythonhosted.org/packages/f1/bc/3bf6d2ca0d2c030d324ef9272bea0a8fdaff68f3d1fa7be7a61da88e51f7/wrapt-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd136bb85f4568fffca995bd3c8d52080b1e5b225dbf1c2b17b66b4c5fa02838", size = 38835 }, + { url = "https://files.pythonhosted.org/packages/ce/b5/251165c232d87197a81cd362eeb5104d661a2dd3aa1f0b33e4bf61dda8b8/wrapt-1.17.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:17fcf043d0b4724858f25b8826c36e08f9fb2e475410bece0ec44a22d533da9b", size = 40146 }, + { url = "https://files.pythonhosted.org/packages/89/33/1e1bdd3e866eeb73d8c4755db1ceb8a80d5bd51ee4648b3f2247adec4e67/wrapt-1.17.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4a557d97f12813dc5e18dad9fa765ae44ddd56a672bb5de4825527c847d6379", size = 113444 }, + { url = "https://files.pythonhosted.org/packages/9f/7c/94f53b065a43f5dc1fbdd8b80fd8f41284315b543805c956619c0b8d92f0/wrapt-1.17.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0229b247b0fc7dee0d36176cbb79dbaf2a9eb7ecc50ec3121f40ef443155fb1d", size = 101246 }, + { url = "https://files.pythonhosted.org/packages/62/5d/640360baac6ea6018ed5e34e6e80e33cfbae2aefde24f117587cd5efd4b7/wrapt-1.17.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8425cfce27b8b20c9b89d77fb50e368d8306a90bf2b6eef2cdf5cd5083adf83f", size = 109320 }, + { url = "https://files.pythonhosted.org/packages/e3/cf/6c7a00ae86a2e9482c91170aefe93f4ccda06c1ac86c4de637c69133da59/wrapt-1.17.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9c900108df470060174108012de06d45f514aa4ec21a191e7ab42988ff42a86c", size = 110193 }, + { url = "https://files.pythonhosted.org/packages/cd/cc/aa718df0d20287e8f953ce0e2f70c0af0fba1d3c367db7ee8bdc46ea7003/wrapt-1.17.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4e547b447073fc0dbfcbff15154c1be8823d10dab4ad401bdb1575e3fdedff1b", size = 100460 }, + { url = "https://files.pythonhosted.org/packages/f7/16/9f3ac99fe1f6caaa789d67b4e3c562898b532c250769f5255fa8b8b93983/wrapt-1.17.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:914f66f3b6fc7b915d46c1cc424bc2441841083de01b90f9e81109c9759e43ab", size = 106347 }, + { url = "https://files.pythonhosted.org/packages/64/85/c77a331b2c06af49a687f8b926fc2d111047a51e6f0b0a4baa01ff3a673a/wrapt-1.17.0-cp313-cp313t-win32.whl", hash = "sha256:a4192b45dff127c7d69b3bdfb4d3e47b64179a0b9900b6351859f3001397dabf", size = 37971 }, + { url = "https://files.pythonhosted.org/packages/05/9b/b2469f8be9efed24283fd7b9eeb8e913e9bc0715cf919ea8645e428ab7af/wrapt-1.17.0-cp313-cp313t-win_amd64.whl", hash = "sha256:4f643df3d4419ea3f856c5c3f40fec1d65ea2e89ec812c83f7767c8730f9827a", size = 40755 }, + { url = "https://files.pythonhosted.org/packages/89/03/518069f0708573c02cbba3a3e452be3642dc7d984d0a03a47e0850e2fb05/wrapt-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d751300b94e35b6016d4b1e7d0e7bbc3b5e1751e2405ef908316c2a9024008a1", size = 38765 }, + { url = "https://files.pythonhosted.org/packages/60/01/12dd81522f8c1c953e98e2cbf356ff44fbb06ef0f7523cd622ac06ad7f03/wrapt-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7264cbb4a18dc4acfd73b63e4bcfec9c9802614572025bdd44d0721983fc1d9c", size = 83012 }, + { url = "https://files.pythonhosted.org/packages/c4/2d/9853fe0009271b2841f839eb0e707c6b4307d169375f26c58812ecf4fd71/wrapt-1.17.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33539c6f5b96cf0b1105a0ff4cf5db9332e773bb521cc804a90e58dc49b10578", size = 74759 }, + { url = "https://files.pythonhosted.org/packages/94/5c/03c911442b01b50e364572581430e12f82c3f5ea74d302907c1449d7ba36/wrapt-1.17.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c30970bdee1cad6a8da2044febd824ef6dc4cc0b19e39af3085c763fdec7de33", size = 82540 }, + { url = "https://files.pythonhosted.org/packages/52/e0/ef637448514295a6b3a01cf1dff417e081e7b8cf1eb712839962459af1f6/wrapt-1.17.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bc7f729a72b16ee21795a943f85c6244971724819819a41ddbaeb691b2dd85ad", size = 81461 }, + { url = "https://files.pythonhosted.org/packages/7f/44/8b7d417c3aae3a35ccfe361375ee3e452901c91062e5462e1aeef98255e8/wrapt-1.17.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6ff02a91c4fc9b6a94e1c9c20f62ea06a7e375f42fe57587f004d1078ac86ca9", size = 74380 }, + { url = "https://files.pythonhosted.org/packages/af/a9/e65406a9c3a99162055efcb6bf5e0261924381228c0a7608066805da03df/wrapt-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dfb7cff84e72e7bf975b06b4989477873dcf160b2fd89959c629535df53d4e0", size = 81057 }, + { url = "https://files.pythonhosted.org/packages/55/0c/111d42fb658a2f9ed7024cd5e57c08521d61646a256a3946db7d500c1551/wrapt-1.17.0-cp39-cp39-win32.whl", hash = "sha256:2399408ac33ffd5b200480ee858baa58d77dd30e0dd0cab6a8a9547135f30a88", size = 36415 }, + { url = "https://files.pythonhosted.org/packages/00/33/e7b14a7c06cedfaae064f34e95c95350de7cc10187ac173743e30a956b30/wrapt-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f763a29ee6a20c529496a20a7bcb16a73de27f5da6a843249c7047daf135977", size = 38742 }, + { url = "https://files.pythonhosted.org/packages/4b/d9/a8ba5e9507a9af1917285d118388c5eb7a81834873f45df213a6fe923774/wrapt-1.17.0-py3-none-any.whl", hash = "sha256:d2c63b93548eda58abf5188e505ffed0229bf675f7c3090f8e36ad55b8cbc371", size = 23592 }, +] + +[[package]] +name = "yarl" +version = "1.18.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b7/9d/4b94a8e6d2b51b599516a5cb88e5bc99b4d8d4583e468057eaa29d5f0918/yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1", size = 181062 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/98/e005bc608765a8a5569f58e650961314873c8469c333616eb40bff19ae97/yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34", size = 141458 }, + { url = "https://files.pythonhosted.org/packages/df/5d/f8106b263b8ae8a866b46d9be869ac01f9b3fb7f2325f3ecb3df8003f796/yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7", size = 94365 }, + { url = "https://files.pythonhosted.org/packages/56/3e/d8637ddb9ba69bf851f765a3ee288676f7cf64fb3be13760c18cbc9d10bd/yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed", size = 92181 }, + { url = "https://files.pythonhosted.org/packages/76/f9/d616a5c2daae281171de10fba41e1c0e2d8207166fc3547252f7d469b4e1/yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde", size = 315349 }, + { url = "https://files.pythonhosted.org/packages/bb/b4/3ea5e7b6f08f698b3769a06054783e434f6d59857181b5c4e145de83f59b/yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b", size = 330494 }, + { url = "https://files.pythonhosted.org/packages/55/f1/e0fc810554877b1b67420568afff51b967baed5b53bcc983ab164eebf9c9/yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5", size = 326927 }, + { url = "https://files.pythonhosted.org/packages/a9/42/b1753949b327b36f210899f2dd0a0947c0c74e42a32de3f8eb5c7d93edca/yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc", size = 319703 }, + { url = "https://files.pythonhosted.org/packages/f0/6d/e87c62dc9635daefb064b56f5c97df55a2e9cc947a2b3afd4fd2f3b841c7/yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd", size = 310246 }, + { url = "https://files.pythonhosted.org/packages/e3/ef/e2e8d1785cdcbd986f7622d7f0098205f3644546da7919c24b95790ec65a/yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990", size = 319730 }, + { url = "https://files.pythonhosted.org/packages/fc/15/8723e22345bc160dfde68c4b3ae8b236e868f9963c74015f1bc8a614101c/yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db", size = 321681 }, + { url = "https://files.pythonhosted.org/packages/86/09/bf764e974f1516efa0ae2801494a5951e959f1610dd41edbfc07e5e0f978/yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62", size = 324812 }, + { url = "https://files.pythonhosted.org/packages/f6/4c/20a0187e3b903c97d857cf0272d687c1b08b03438968ae8ffc50fe78b0d6/yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760", size = 337011 }, + { url = "https://files.pythonhosted.org/packages/c9/71/6244599a6e1cc4c9f73254a627234e0dad3883ece40cc33dce6265977461/yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b", size = 338132 }, + { url = "https://files.pythonhosted.org/packages/af/f5/e0c3efaf74566c4b4a41cb76d27097df424052a064216beccae8d303c90f/yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690", size = 331849 }, + { url = "https://files.pythonhosted.org/packages/8a/b8/3d16209c2014c2f98a8f658850a57b716efb97930aebf1ca0d9325933731/yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6", size = 84309 }, + { url = "https://files.pythonhosted.org/packages/fd/b7/2e9a5b18eb0fe24c3a0e8bae994e812ed9852ab4fd067c0107fadde0d5f0/yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8", size = 90484 }, + { url = "https://files.pythonhosted.org/packages/40/93/282b5f4898d8e8efaf0790ba6d10e2245d2c9f30e199d1a85cae9356098c/yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069", size = 141555 }, + { url = "https://files.pythonhosted.org/packages/6d/9c/0a49af78df099c283ca3444560f10718fadb8a18dc8b3edf8c7bd9fd7d89/yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193", size = 94351 }, + { url = "https://files.pythonhosted.org/packages/5a/a1/205ab51e148fdcedad189ca8dd587794c6f119882437d04c33c01a75dece/yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889", size = 92286 }, + { url = "https://files.pythonhosted.org/packages/ed/fe/88b690b30f3f59275fb674f5f93ddd4a3ae796c2b62e5bb9ece8a4914b83/yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8", size = 340649 }, + { url = "https://files.pythonhosted.org/packages/07/eb/3b65499b568e01f36e847cebdc8d7ccb51fff716dbda1ae83c3cbb8ca1c9/yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca", size = 356623 }, + { url = "https://files.pythonhosted.org/packages/33/46/f559dc184280b745fc76ec6b1954de2c55595f0ec0a7614238b9ebf69618/yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8", size = 354007 }, + { url = "https://files.pythonhosted.org/packages/af/ba/1865d85212351ad160f19fb99808acf23aab9a0f8ff31c8c9f1b4d671fc9/yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae", size = 344145 }, + { url = "https://files.pythonhosted.org/packages/94/cb/5c3e975d77755d7b3d5193e92056b19d83752ea2da7ab394e22260a7b824/yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3", size = 336133 }, + { url = "https://files.pythonhosted.org/packages/19/89/b77d3fd249ab52a5c40859815765d35c91425b6bb82e7427ab2f78f5ff55/yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb", size = 347967 }, + { url = "https://files.pythonhosted.org/packages/35/bd/f6b7630ba2cc06c319c3235634c582a6ab014d52311e7d7c22f9518189b5/yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e", size = 346397 }, + { url = "https://files.pythonhosted.org/packages/18/1a/0b4e367d5a72d1f095318344848e93ea70da728118221f84f1bf6c1e39e7/yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59", size = 350206 }, + { url = "https://files.pythonhosted.org/packages/b5/cf/320fff4367341fb77809a2d8d7fe75b5d323a8e1b35710aafe41fdbf327b/yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d", size = 362089 }, + { url = "https://files.pythonhosted.org/packages/57/cf/aadba261d8b920253204085268bad5e8cdd86b50162fcb1b10c10834885a/yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e", size = 366267 }, + { url = "https://files.pythonhosted.org/packages/54/58/fb4cadd81acdee6dafe14abeb258f876e4dd410518099ae9a35c88d8097c/yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a", size = 359141 }, + { url = "https://files.pythonhosted.org/packages/9a/7a/4c571597589da4cd5c14ed2a0b17ac56ec9ee7ee615013f74653169e702d/yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1", size = 84402 }, + { url = "https://files.pythonhosted.org/packages/ae/7b/8600250b3d89b625f1121d897062f629883c2f45339623b69b1747ec65fa/yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5", size = 91030 }, + { url = "https://files.pythonhosted.org/packages/33/85/bd2e2729752ff4c77338e0102914897512e92496375e079ce0150a6dc306/yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50", size = 142644 }, + { url = "https://files.pythonhosted.org/packages/ff/74/1178322cc0f10288d7eefa6e4a85d8d2e28187ccab13d5b844e8b5d7c88d/yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576", size = 94962 }, + { url = "https://files.pythonhosted.org/packages/be/75/79c6acc0261e2c2ae8a1c41cf12265e91628c8c58ae91f5ff59e29c0787f/yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640", size = 92795 }, + { url = "https://files.pythonhosted.org/packages/6b/32/927b2d67a412c31199e83fefdce6e645247b4fb164aa1ecb35a0f9eb2058/yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2", size = 332368 }, + { url = "https://files.pythonhosted.org/packages/19/e5/859fca07169d6eceeaa4fde1997c91d8abde4e9a7c018e371640c2da2b71/yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75", size = 342314 }, + { url = "https://files.pythonhosted.org/packages/08/75/76b63ccd91c9e03ab213ef27ae6add2e3400e77e5cdddf8ed2dbc36e3f21/yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512", size = 341987 }, + { url = "https://files.pythonhosted.org/packages/1a/e1/a097d5755d3ea8479a42856f51d97eeff7a3a7160593332d98f2709b3580/yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba", size = 336914 }, + { url = "https://files.pythonhosted.org/packages/0b/42/e1b4d0e396b7987feceebe565286c27bc085bf07d61a59508cdaf2d45e63/yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb", size = 325765 }, + { url = "https://files.pythonhosted.org/packages/7e/18/03a5834ccc9177f97ca1bbb245b93c13e58e8225276f01eedc4cc98ab820/yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272", size = 344444 }, + { url = "https://files.pythonhosted.org/packages/c8/03/a713633bdde0640b0472aa197b5b86e90fbc4c5bc05b727b714cd8a40e6d/yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6", size = 340760 }, + { url = "https://files.pythonhosted.org/packages/eb/99/f6567e3f3bbad8fd101886ea0276c68ecb86a2b58be0f64077396cd4b95e/yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e", size = 346484 }, + { url = "https://files.pythonhosted.org/packages/8e/a9/84717c896b2fc6cb15bd4eecd64e34a2f0a9fd6669e69170c73a8b46795a/yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb", size = 359864 }, + { url = "https://files.pythonhosted.org/packages/1e/2e/d0f5f1bef7ee93ed17e739ec8dbcb47794af891f7d165fa6014517b48169/yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393", size = 364537 }, + { url = "https://files.pythonhosted.org/packages/97/8a/568d07c5d4964da5b02621a517532adb8ec5ba181ad1687191fffeda0ab6/yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285", size = 357861 }, + { url = "https://files.pythonhosted.org/packages/7d/e3/924c3f64b6b3077889df9a1ece1ed8947e7b61b0a933f2ec93041990a677/yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2", size = 84097 }, + { url = "https://files.pythonhosted.org/packages/34/45/0e055320daaabfc169b21ff6174567b2c910c45617b0d79c68d7ab349b02/yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477", size = 90399 }, + { url = "https://files.pythonhosted.org/packages/30/c7/c790513d5328a8390be8f47be5d52e141f78b66c6c48f48d241ca6bd5265/yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb", size = 140789 }, + { url = "https://files.pythonhosted.org/packages/30/aa/a2f84e93554a578463e2edaaf2300faa61c8701f0898725842c704ba5444/yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa", size = 94144 }, + { url = "https://files.pythonhosted.org/packages/c6/fc/d68d8f83714b221a85ce7866832cba36d7c04a68fa6a960b908c2c84f325/yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782", size = 91974 }, + { url = "https://files.pythonhosted.org/packages/56/4e/d2563d8323a7e9a414b5b25341b3942af5902a2263d36d20fb17c40411e2/yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0", size = 333587 }, + { url = "https://files.pythonhosted.org/packages/25/c9/cfec0bc0cac8d054be223e9f2c7909d3e8442a856af9dbce7e3442a8ec8d/yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482", size = 344386 }, + { url = "https://files.pythonhosted.org/packages/ab/5d/4c532190113b25f1364d25f4c319322e86232d69175b91f27e3ebc2caf9a/yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186", size = 345421 }, + { url = "https://files.pythonhosted.org/packages/23/d1/6cdd1632da013aa6ba18cee4d750d953104a5e7aac44e249d9410a972bf5/yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58", size = 339384 }, + { url = "https://files.pythonhosted.org/packages/9a/c4/6b3c39bec352e441bd30f432cda6ba51681ab19bb8abe023f0d19777aad1/yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53", size = 326689 }, + { url = "https://files.pythonhosted.org/packages/23/30/07fb088f2eefdc0aa4fc1af4e3ca4eb1a3aadd1ce7d866d74c0f124e6a85/yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2", size = 345453 }, + { url = "https://files.pythonhosted.org/packages/63/09/d54befb48f9cd8eec43797f624ec37783a0266855f4930a91e3d5c7717f8/yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8", size = 341872 }, + { url = "https://files.pythonhosted.org/packages/91/26/fd0ef9bf29dd906a84b59f0cd1281e65b0c3e08c6aa94b57f7d11f593518/yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1", size = 347497 }, + { url = "https://files.pythonhosted.org/packages/d9/b5/14ac7a256d0511b2ac168d50d4b7d744aea1c1aa20c79f620d1059aab8b2/yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a", size = 359981 }, + { url = "https://files.pythonhosted.org/packages/ca/b3/d493221ad5cbd18bc07e642894030437e405e1413c4236dd5db6e46bcec9/yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10", size = 366229 }, + { url = "https://files.pythonhosted.org/packages/04/56/6a3e2a5d9152c56c346df9b8fb8edd2c8888b1e03f96324d457e5cf06d34/yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8", size = 360383 }, + { url = "https://files.pythonhosted.org/packages/fd/b7/4b3c7c7913a278d445cc6284e59b2e62fa25e72758f888b7a7a39eb8423f/yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d", size = 310152 }, + { url = "https://files.pythonhosted.org/packages/f5/d5/688db678e987c3e0fb17867970700b92603cadf36c56e5fb08f23e822a0c/yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c", size = 315723 }, + { url = "https://files.pythonhosted.org/packages/6a/3b/fec4b08f5e88f68e56ee698a59284a73704df2e0e0b5bdf6536c86e76c76/yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04", size = 142780 }, + { url = "https://files.pythonhosted.org/packages/ed/85/796b0d6a22d536ec8e14bdbb86519250bad980cec450b6e299b1c2a9079e/yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719", size = 94981 }, + { url = "https://files.pythonhosted.org/packages/ee/0e/a830fd2238f7a29050f6dd0de748b3d6f33a7dbb67dbbc081a970b2bbbeb/yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e", size = 92789 }, + { url = "https://files.pythonhosted.org/packages/0f/4f/438c9fd668954779e48f08c0688ee25e0673380a21bb1e8ccc56de5b55d7/yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee", size = 317327 }, + { url = "https://files.pythonhosted.org/packages/bd/79/a78066f06179b4ed4581186c136c12fcfb928c475cbeb23743e71a991935/yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789", size = 336999 }, + { url = "https://files.pythonhosted.org/packages/55/02/527963cf65f34a06aed1e766ff9a3b3e7d0eaa1c90736b2948a62e528e1d/yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8", size = 331693 }, + { url = "https://files.pythonhosted.org/packages/a2/2a/167447ae39252ba624b98b8c13c0ba35994d40d9110e8a724c83dbbb5822/yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c", size = 321473 }, + { url = "https://files.pythonhosted.org/packages/55/03/07955fabb20082373be311c91fd78abe458bc7ff9069d34385e8bddad20e/yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5", size = 313571 }, + { url = "https://files.pythonhosted.org/packages/95/e2/67c8d3ec58a8cd8ddb1d63bd06eb7e7b91c9f148707a3eeb5a7ed87df0ef/yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1", size = 325004 }, + { url = "https://files.pythonhosted.org/packages/06/43/51ceb3e427368fe6ccd9eccd162be227fd082523e02bad1fd3063daf68da/yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24", size = 322677 }, + { url = "https://files.pythonhosted.org/packages/e4/0e/7ef286bfb23267739a703f7b967a858e2128c10bea898de8fa027e962521/yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318", size = 332806 }, + { url = "https://files.pythonhosted.org/packages/c8/94/2d1f060f4bfa47c8bd0bcb652bfe71fba881564bcac06ebb6d8ced9ac3bc/yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985", size = 339919 }, + { url = "https://files.pythonhosted.org/packages/8e/8d/73b5f9a6ab69acddf1ca1d5e7bc92f50b69124512e6c26b36844531d7f23/yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910", size = 340960 }, + { url = "https://files.pythonhosted.org/packages/41/13/ce6bc32be4476b60f4f8694831f49590884b2c975afcffc8d533bf2be7ec/yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1", size = 336592 }, + { url = "https://files.pythonhosted.org/packages/81/d5/6e0460292d6299ac3919945f912b16b104f4e81ab20bf53e0872a1296daf/yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5", size = 84833 }, + { url = "https://files.pythonhosted.org/packages/b2/fc/a8aef69156ad5508165d8ae956736d55c3a68890610834bd985540966008/yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9", size = 90968 }, + { url = "https://files.pythonhosted.org/packages/f5/4b/a06e0ec3d155924f77835ed2d167ebd3b211a7b0853da1cf8d8414d784ef/yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b", size = 45109 }, +] + +[[package]] +name = "zipp" +version = "3.21.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630 }, +] + +[[package]] +name = "zope-event" +version = "5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/c2/427f1867bb96555d1d34342f1dd97f8c420966ab564d58d18469a1db8736/zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd", size = 17350 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/42/f8dbc2b9ad59e927940325a22d6d3931d630c3644dae7e2369ef5d9ba230/zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26", size = 6824 }, +] + +[[package]] +name = "zope-interface" +version = "7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/71/e6177f390e8daa7e75378505c5ab974e0bf59c1d3b19155638c7afbf4b2d/zope.interface-7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce290e62229964715f1011c3dbeab7a4a1e4971fd6f31324c4519464473ef9f2", size = 208243 }, + { url = "https://files.pythonhosted.org/packages/52/db/7e5f4226bef540f6d55acfd95cd105782bc6ee044d9b5587ce2c95558a5e/zope.interface-7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05b910a5afe03256b58ab2ba6288960a2892dfeef01336dc4be6f1b9ed02ab0a", size = 208759 }, + { url = "https://files.pythonhosted.org/packages/28/ea/fdd9813c1eafd333ad92464d57a4e3a82b37ae57c19497bcffa42df673e4/zope.interface-7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:550f1c6588ecc368c9ce13c44a49b8d6b6f3ca7588873c679bd8fd88a1b557b6", size = 254922 }, + { url = "https://files.pythonhosted.org/packages/3b/d3/0000a4d497ef9fbf4f66bb6828b8d0a235e690d57c333be877bec763722f/zope.interface-7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ef9e2f865721553c6f22a9ff97da0f0216c074bd02b25cf0d3af60ea4d6931d", size = 249367 }, + { url = "https://files.pythonhosted.org/packages/3e/e5/0b359e99084f033d413419eff23ee9c2bd33bca2ca9f4e83d11856f22d10/zope.interface-7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f926f0dcb058211a3bb3e0e501c69759613b17a553788b2caeb991bed3b61d", size = 254488 }, + { url = "https://files.pythonhosted.org/packages/7b/90/12d50b95f40e3b2fc0ba7f7782104093b9fd62806b13b98ef4e580f2ca61/zope.interface-7.2-cp310-cp310-win_amd64.whl", hash = "sha256:144964649eba4c5e4410bb0ee290d338e78f179cdbfd15813de1a664e7649b3b", size = 211947 }, + { url = "https://files.pythonhosted.org/packages/98/7d/2e8daf0abea7798d16a58f2f3a2bf7588872eee54ac119f99393fdd47b65/zope.interface-7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1909f52a00c8c3dcab6c4fad5d13de2285a4b3c7be063b239b8dc15ddfb73bd2", size = 208776 }, + { url = "https://files.pythonhosted.org/packages/a0/2a/0c03c7170fe61d0d371e4c7ea5b62b8cb79b095b3d630ca16719bf8b7b18/zope.interface-7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ecf2451596f19fd607bb09953f426588fc1e79e93f5968ecf3367550396b22", size = 209296 }, + { url = "https://files.pythonhosted.org/packages/49/b4/451f19448772b4a1159519033a5f72672221e623b0a1bd2b896b653943d8/zope.interface-7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:033b3923b63474800b04cba480b70f6e6243a62208071fc148354f3f89cc01b7", size = 260997 }, + { url = "https://files.pythonhosted.org/packages/65/94/5aa4461c10718062c8f8711161faf3249d6d3679c24a0b81dd6fc8ba1dd3/zope.interface-7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a102424e28c6b47c67923a1f337ede4a4c2bba3965b01cf707978a801fc7442c", size = 255038 }, + { url = "https://files.pythonhosted.org/packages/9f/aa/1a28c02815fe1ca282b54f6705b9ddba20328fabdc37b8cf73fc06b172f0/zope.interface-7.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25e6a61dcb184453bb00eafa733169ab6d903e46f5c2ace4ad275386f9ab327a", size = 259806 }, + { url = "https://files.pythonhosted.org/packages/a7/2c/82028f121d27c7e68632347fe04f4a6e0466e77bb36e104c8b074f3d7d7b/zope.interface-7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3f6771d1647b1fc543d37640b45c06b34832a943c80d1db214a37c31161a93f1", size = 212305 }, + { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959 }, + { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357 }, + { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235 }, + { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253 }, + { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702 }, + { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466 }, + { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961 }, + { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356 }, + { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196 }, + { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237 }, + { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696 }, + { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472 }, + { url = "https://files.pythonhosted.org/packages/8c/2c/1f49dc8b4843c4f0848d8e43191aed312bad946a1563d1bf9e46cf2816ee/zope.interface-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7bd449c306ba006c65799ea7912adbbfed071089461a19091a228998b82b1fdb", size = 208349 }, + { url = "https://files.pythonhosted.org/packages/ed/7d/83ddbfc8424c69579a90fc8edc2b797223da2a8083a94d8dfa0e374c5ed4/zope.interface-7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a19a6cc9c6ce4b1e7e3d319a473cf0ee989cbbe2b39201d7c19e214d2dfb80c7", size = 208799 }, + { url = "https://files.pythonhosted.org/packages/36/22/b1abd91854c1be03f5542fe092e6a745096d2eca7704d69432e119100583/zope.interface-7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cd1790b48c16db85d51fbbd12d20949d7339ad84fd971427cf00d990c1f137", size = 254267 }, + { url = "https://files.pythonhosted.org/packages/2a/dd/fcd313ee216ad0739ae00e6126bc22a0af62a74f76a9ca668d16cd276222/zope.interface-7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52e446f9955195440e787596dccd1411f543743c359eeb26e9b2c02b077b0519", size = 248614 }, + { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800 }, + { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980 }, +] diff --git a/packages/syftcli/manifest.yml b/packages/syftcli/manifest.yml index 3e62eed9305..1636fdf08b0 100644 --- a/packages/syftcli/manifest.yml +++ b/packages/syftcli/manifest.yml @@ -1,33 +1,20 @@ manifestVersion: 1.0 -syftVersion: 0.8.6-beta.1 -dockerTag: 0.8.6-beta.1 +syftVersion: 0.9.6-beta.6 +dockerTag: 0.9.6-beta.6 images: - - docker.io/openmined/grid-frontend:0.8.6-beta.1 - - docker.io/openmined/grid-backend:0.8.6-beta.1 - - docker.io/library/mongo:7.0.4 - - docker.io/traefik:v2.10 + - docker.io/openmined/syft-frontend:0.9.6-beta.6 + - docker.io/openmined/syft-backend:0.9.6-beta.6 + - docker.io/library/postgres:16.1 + - docker.io/traefik:v2.11.0 configFiles: docker: - packages/grid/default.env - - packages/grid/docker-compose.build.yml - - packages/grid/docker-compose.dev.yml - - packages/grid/docker-compose.pull.yml - - packages/grid/docker-compose.test.yml - - packages/grid/docker-compose.tls.yml - - packages/grid/docker-compose.yml - packages/grid/traefik/docker/dynamic-tls.yml - packages/grid/traefik/docker/dynamic.yml - packages/grid/traefik/docker/traefik-tls.template.yml - packages/grid/traefik/docker/traefik.yml k8s: - packages/grid/devspace.yaml - - packages/grid/traefik/k8s/dynamic.yml - - packages/grid/traefik/k8s/traefik.yml - podman: - - packages/grid/podman/podman-kube/podman-syft-kube-config.yaml - - packages/grid/podman/podman-kube/podman-syft-kube.yaml - - packages/grid/podman/podman-kube/traefik/conf/dynamic.yml - - packages/grid/podman/podman-kube/traefik/traefik.yml diff --git a/packages/syftcli/setup.py b/packages/syftcli/setup.py index 61a4ec2a424..7275830e471 100644 --- a/packages/syftcli/setup.py +++ b/packages/syftcli/setup.py @@ -5,7 +5,7 @@ __version__ = "0.1.11" packages = [ - "requests==2.31.0", + "requests==2.32.3", "pyyaml==6.0.1", "packaging==21.3", "typer[all]==0.9.0", diff --git a/releases.md b/releases.md new file mode 100644 index 00000000000..6a234f74a0c --- /dev/null +++ b/releases.md @@ -0,0 +1,36 @@ +# Releases + +:exclamation: PySyft and Syft Server must use the same `version`. + +### Latest Stable + +- `0.9.5` (Stable) - Docs +- Install PySyft (Stable): `pip install -U syft` + +### Latest Beta + +- `0.9.6` (Beta) - `dev` branch 👈🏽 +- Install PySyft (Beta): `pip install -U syft --pre` + +### Supported versions + +- `0.9.3` - API +- `0.9.2` - API + +**Deprecated**: + +- `0.9.1` - API +- `0.9.0` - API +- `0.8.8` - API +- `0.8.7` - API +- `0.8.6` - API +- `0.8.5-post.2` - API +- `0.8.4` - API +- `0.8.3` - API +- `0.8.2` - API +- `0.8.1` - API +- `0.8.0` - API +- `0.7.0` - Course 3 Updated +- `0.6.0` - Course 3 +- `0.5.1` - Course 2 + M1 Hotfix +- `0.2.0` - `0.5.0` diff --git a/ruff.toml b/ruff.toml index 6d8e8a2f93a..bdf2c46b9cf 100644 --- a/ruff.toml +++ b/ruff.toml @@ -14,6 +14,7 @@ select = [ "F", # pyflake "B", # flake8-bugbear "C4", # flake8-comprehensions + # "PERF", # perflint "UP", # pyupgrade ] ignore = [ @@ -23,6 +24,7 @@ ignore = [ [lint.per-file-ignores] "*.ipynb" = ["E402"] +"__init__.py" = ["F401"] [lint.pycodestyle] max-line-length = 120 diff --git a/scripts/aa_demo/update_domain.sh b/scripts/aa_demo/update_domain.sh deleted file mode 100644 index 6efa4106b23..00000000000 --- a/scripts/aa_demo/update_domain.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# $1 domain ip -# $2 dataset url -# install syft in dev mode -pip install -U -e packages/syft - -# get domain name -NODE_NAME="$(sudo docker ps --format '{{.Names}}' | grep "celery" |rev | cut -c 16- | rev)" -echo "Domain Name: ${NODE_NAME}" -echo "Nuking Domain .. >:)"; - -# destroy current domain -hagrid land all -echo "Launching Domain .. hahaha >:)"; - -# re-launch domain -hagrid launch ${NODE_NAME} to docker:80 --dev --build-src="model_training_tests" - -# wait for domain to be up -hagrid check --timeout=120 - -echo "Domain lauch succeeded." -echo "Starting to upload dataset" - -# upload dataset -python scripts/aa_demo/upload_dataset.py $1 $2 - -echo "Upload dataset script complete." diff --git a/scripts/build_images.sh b/scripts/build_images.sh index da58203e08b..bf7ad7e1823 100644 --- a/scripts/build_images.sh +++ b/scripts/build_images.sh @@ -3,6 +3,6 @@ REGISTRY=${1:-"k3d-registry.localhost:5800"} TAG=${2:-"latest"} -docker image build -f ./packages/grid/backend/backend.dockerfile --target backend -t $REGISTRY/openmined/grid-backend:$TAG ./packages -docker image build -f ./packages/grid/frontend/frontend.dockerfile --target grid-ui-development -t $REGISTRY/openmined/grid-frontend:$TAG ./packages/grid/frontend -docker image build -f ./packages/grid/seaweedfs/seaweedfs.dockerfile --build-arg SEAWEEDFS_VERSION=3.59 -t $REGISTRY/openmined/grid-seaweedfs:$TAG ./packages/grid/seaweedfs +docker image build -f ./packages/grid/backend/backend.dockerfile --target backend -t $REGISTRY/openmined/syft-backend:$TAG ./packages +docker image build -f ./packages/grid/frontend/frontend.dockerfile --target syft-ui-development -t $REGISTRY/openmined/syft-frontend:$TAG ./packages/grid/frontend +docker image build -f ./packages/grid/seaweedfs/seaweedfs.dockerfile -t $REGISTRY/openmined/syft-seaweedfs:$TAG ./packages/grid/seaweedfs diff --git a/scripts/container_log_collector.py b/scripts/container_log_collector.py index f3c3743155e..d16f65d8f54 100644 --- a/scripts/container_log_collector.py +++ b/scripts/container_log_collector.py @@ -9,7 +9,7 @@ log_path.mkdir(exist_ok=True) # Get the github job name and create a directory for it -job_name = os.getenv("GITHUB_JOB") +job_name = os.getenv("GITHUB_JOB", "") job_path: PosixPath = log_path / job_name job_path.mkdir(exist_ok=True) diff --git a/scripts/create_syftcli_config.py b/scripts/create_syftcli_config.py index 547cd469aa6..255c23ec705 100644 --- a/scripts/create_syftcli_config.py +++ b/scripts/create_syftcli_config.py @@ -11,7 +11,6 @@ PREFIX_PATHS = { "k8s": "packages/grid/", "docker": "packages/grid/", - "podman": "packages/grid/podman/podman-kube/", } @@ -36,7 +35,7 @@ def create_tar(key): if __name__ == "__main__": - for config in ("docker", "podman"): + for config in ("docker",): print("Generating config for", config) create_tar(config) print() diff --git a/scripts/dev_tools.sh b/scripts/dev_tools.sh index 763e602cd28..c23b56c05b9 100755 --- a/scripts/dev_tools.sh +++ b/scripts/dev_tools.sh @@ -23,15 +23,13 @@ function docker_list_exposed_ports() { if [[ -z "$1" ]]; then # list db, redis, rabbitmq, and seaweedfs ports - docker_list_exposed_ports "db\|redis\|queue\|seaweedfs\|jaeger\|mongo" + docker_list_exposed_ports "db\|seaweedfs" else PORT=$1 if docker ps | grep ":${PORT}" | grep -q 'redis'; then ${command} redis://127.0.0.1:${PORT} elif docker ps | grep ":${PORT}" | grep -q 'postgres'; then ${command} postgresql://postgres:changethis@127.0.0.1:${PORT}/app - elif docker ps | grep ":${PORT}" | grep -q 'mongo'; then - ${command} mongodb://root:example@127.0.0.1:${PORT} else ${command} http://localhost:${PORT} fi diff --git a/scripts/display_credentials.sh b/scripts/display_credentials.sh new file mode 100755 index 00000000000..b4174655dbe --- /dev/null +++ b/scripts/display_credentials.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Construct variable names based on the cluster name +EMAIL_VAR="SYFT_LOGIN_${CLUSTER_NAME//[^a-zA-Z0-9_]/_}_ROOT_EMAIL" +PWD_VAR="SYFT_LOGIN_${CLUSTER_NAME//[^a-zA-Z0-9_]/_}_PASSWORD" + +# Default CLIENT_NAME is "client" +CLIENT_NAME="client" + +# Determine CLIENT_NAME based on CLUSTER_NAME +if [[ "$CLUSTER_NAME" == *"high"* ]]; then + CLIENT_NAME="high_client" +elif [[ "$CLUSTER_NAME" == *"low"* ]]; then + CLIENT_NAME="low_client" +fi + +# Retrieve values from the constructed variable names +CLIENT_EMAIL="${!EMAIL_VAR}" +CLIENT_PWD="${!PWD_VAR}" + +# Check if CLIENT_EMAIL or CLIENT_PWD are empty and provide a warning if needed +if [[ -z "$CLIENT_EMAIL" || -z "$CLIENT_PWD" ]]; then + echo "Warning: CLIENT_EMAIL or CLIENT_PWD is empty. Please check the environment variables." +fi + +# Output the formatted command +echo "\ +To login to the Syft backend, copy and run the following command in Jupyter: + +import syft as sy +$CLIENT_NAME = sy.login( + email=\"${CLIENT_EMAIL}\", + password=\"${CLIENT_PWD}\", + port=\"${CLUSTER_HTTP_PORT}\" +)" diff --git a/scripts/flush_queue.sh b/scripts/flush_queue.sh deleted file mode 100755 index fbb2914bc33..00000000000 --- a/scripts/flush_queue.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -docker ps --format '{{.Names}}' | grep "celeryworker" | xargs -I '{}' docker exec -i {} python -c "from grid.core.celery_app import celery_app; celery_app.control.purge();print('Tasks Cleared')" diff --git a/scripts/generate_canonical_names.py b/scripts/generate_canonical_names.py new file mode 100644 index 00000000000..1744c67db15 --- /dev/null +++ b/scripts/generate_canonical_names.py @@ -0,0 +1,126 @@ +# stdlib +import ast +import inspect +import os + +# If variable is set, search for all serializable classes without canonical names +os.environ["SYFT_SEARCH_MISSING_CANONICAL_NAME"] = "true" + +# syft absolute +# NOTE import has to happen after setting the environment variable + +# relative +from ..serde.recursive import SYFT_CLASSES_MISSING_CANONICAL_NAME # noqa: E402 +from ..types.syft_object_registry import SyftObjectRegistry # noqa: E402 + + +class DecoratorFinder(ast.NodeVisitor): + def __init__(self, class_name: str, decorator_name: str): + self.class_name = class_name + self.decorator_name = decorator_name + self.decorator: ast.Call | None = None + + def visit_ClassDef(self, node: ast.ClassDef) -> None: + if node.name == self.class_name: + for decorator in node.decorator_list: + if ( + isinstance(decorator, ast.Call) + and getattr(decorator.func, "id", None) == self.decorator_name + ): + self.decorator = decorator + self.generic_visit(node) + + +def get_class_file_path(cls: type) -> str: + return inspect.getfile(cls) + + +def get_decorator_with_lines( + file_path: str, class_name: str, decorator_name: str +) -> tuple[ast.Call | None, int | None, int | None]: + with open(file_path) as source: + tree = ast.parse(source.read()) + + finder = DecoratorFinder(class_name, decorator_name) + finder.visit(tree) + + if finder.decorator: + start_line = finder.decorator.lineno - 1 + end_line = ( + finder.decorator.end_lineno + if hasattr(finder.decorator, "end_lineno") + else finder.decorator.lineno + ) + return finder.decorator, start_line, end_line + return None, None, None + + +def add_canonical_name_version(decorator: ast.Call, class_name: str) -> ast.Call: + new_decorator = decorator + + canonical_name_exists = any( + kw.arg == "canonical_name" for kw in new_decorator.keywords + ) + version_exists = any(kw.arg == "version" for kw in new_decorator.keywords) + + if not canonical_name_exists: + new_decorator.keywords.append( + ast.keyword(arg="canonical_name", value=ast.Constant(value=class_name)) + ) + if not version_exists: + new_decorator.keywords.append( + ast.keyword(arg="version", value=ast.Constant(value=1)) + ) + + return ast.copy_location(new_decorator, decorator) + + +def update_decorator_for_cls( + cls: type, existing_canonical_names: list[str] +) -> str | None: + file_path = inspect.getfile(cls) + class_name = cls.__name__ + + decorator, start_line, end_line = get_decorator_with_lines( + file_path, class_name, "serializable" + ) + + if decorator is None: + print( + f"{cls.__module__}: Could not find decorator for class {class_name}. Did not update canonical name." + ) + return None + if start_line is None or end_line is None: + print( + f"{cls.__module__}: No start/end lines for decorator in class {class_name}. Did not update canonical name." + ) + return None + + if class_name in existing_canonical_names: + print( + f"{cls.__module__}: {class_name} is already a registered canonical name. Did not update canonical name." + ) + return None + + new_decorator = add_canonical_name_version(decorator, class_name) + new_decorator_code = ast.unparse(new_decorator).split("\n") + new_decorator_code[0] = "@" + new_decorator_code[0] + + with open(file_path) as file: + lines = file.readlines() + + lines[start_line:end_line] = [line + "\n" for line in new_decorator_code] + + with open(file_path, "w") as file: + file.writelines(lines) + + print(f"Updated {cls.__module__}.{cls.__name__}") + return class_name + + +def update_canonical_names(): + existing_cnames = list(SyftObjectRegistry.__object_serialization_registry__.keys()) + for cls in SYFT_CLASSES_MISSING_CANONICAL_NAME: + new_name = update_decorator_for_cls(cls, existing_cnames) + if new_name: + existing_cnames.append(new_name) diff --git a/scripts/get_k8s_secret_ci.sh b/scripts/get_k8s_secret_ci.sh old mode 100644 new mode 100755 index 965e8ff0896..d58a7a13985 --- a/scripts/get_k8s_secret_ci.sh +++ b/scripts/get_k8s_secret_ci.sh @@ -1,6 +1,50 @@ #!/bin/bash -export SYFT_LOGIN_testgateway1_PASSWORD=$(kubectl --context=k3d-testgateway1 get secret backend-secret -n syft \ - -o jsonpath='{.data.defaultRootPassword}' | base64 --decode) -export SYFT_LOGIN_testdomain1_PASSWORD=$(kubectl get --context=k3d-testdomain1 secret backend-secret -n syft \ +# Ensure CLUSTER_NAME is set +if [ -z "$CLUSTER_NAME" ]; then + echo "CLUSTER_NAME is not set. Please set it before running the script." + exit 1 +fi + +# Get the password from the secret and decode it +SYFT_PASSWORD=$(kubectl --context=k3d-$CLUSTER_NAME get secret backend-secret -n syft \ -o jsonpath='{.data.defaultRootPassword}' | base64 --decode) + +# Check if the command was successful +if [ $? -ne 0 ]; then + echo "Failed to retrieve or decode the secret from the cluster." + exit 1 +fi + + +# Get the name of the backend pod (assuming there's only one, or picking the first one) +BACKEND_POD=$(kubectl --context=k3d-$CLUSTER_NAME get pods -n syft -l app.kubernetes.io/component=backend -o jsonpath='{.items[0].metadata.name}') + +# Check if we successfully retrieved the pod name +if [ -z "$BACKEND_POD" ]; then + echo "Failed to find the backend pod." + exit 1 +fi + +# Get the root email from the environment variables of the backend pod +SYFT_ROOT_EMAIL=$(kubectl --context=k3d-$CLUSTER_NAME exec "$BACKEND_POD" -n syft \ + -- printenv DEFAULT_ROOT_EMAIL) + +# Check if the command was successful +if [ $? -ne 0 ] || [ -z "$SYFT_ROOT_EMAIL" ]; then + echo "Failed to retrieve the root email from the backend pod." + exit 1 +fi + + + +# Export the root email as an environment variable +export SYFT_LOGIN_${CLUSTER_NAME//[^a-zA-Z0-9_]/_}_ROOT_EMAIL="$SYFT_ROOT_EMAIL" + +# Export the password as an environment variable +export SYFT_LOGIN_${CLUSTER_NAME//[^a-zA-Z0-9_]/_}_PASSWORD="$SYFT_PASSWORD" + + +echo "Credentials successfully exported as environment variables." +echo "SYFT_LOGIN_${CLUSTER_NAME//[^a-zA-Z0-9_]/_}_ROOT_EMAIL=${SYFT_ROOT_EMAIL}" +echo "SYFT_LOGIN_${CLUSTER_NAME//[^a-zA-Z0-9_]/_}_PASSWORD=${SYFT_PASSWORD}" diff --git a/scripts/hagrid_hash b/scripts/hagrid_hash deleted file mode 100644 index 63aec8b1bad..00000000000 --- a/scripts/hagrid_hash +++ /dev/null @@ -1 +0,0 @@ -4b25e83ff10f7d5923ba9b723d949a6d diff --git a/scripts/k8s/delete_stack.sh b/scripts/k8s/delete_stack.sh deleted file mode 100755 index 86d0a1ce176..00000000000 --- a/scripts/k8s/delete_stack.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Deleting gateway node -bash -c "CLUSTER_NAME=testgateway1 tox -e dev.k8s.destroy || true" - -# Deleting domain node -bash -c "CLUSTER_NAME=testdomain1 tox -e dev.k8s.destroy || true" \ No newline at end of file diff --git a/scripts/k8s/launch_domain.sh b/scripts/k8s/launch_domain.sh deleted file mode 100755 index d39f45744d3..00000000000 --- a/scripts/k8s/launch_domain.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Domain Node -bash -c '\ - export CLUSTER_NAME=testdomain1 CLUSTER_HTTP_PORT=9082 && \ - tox -e dev.k8s.start && \ - tox -e dev.k8s.hotreload' \ No newline at end of file diff --git a/scripts/k8s/launch_gateway.sh b/scripts/k8s/launch_gateway.sh deleted file mode 100755 index 792a0885ae4..00000000000 --- a/scripts/k8s/launch_gateway.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Gateway Node -bash -c '\ - export CLUSTER_NAME=testgateway1 CLUSTER_HTTP_PORT=9081 DEVSPACE_PROFILE=gateway && \ - tox -e dev.k8s.start && \ - tox -e dev.k8s.hotreload' \ No newline at end of file diff --git a/scripts/latest_pypi_version.py b/scripts/latest_pypi_version.py new file mode 100644 index 00000000000..3ea3770217d --- /dev/null +++ b/scripts/latest_pypi_version.py @@ -0,0 +1,14 @@ +# third party +import requests + +PROJECT_NAME = "syft" +PYPI_JSON_URL = f"https://pypi.org/pypi/{PROJECT_NAME}/json" + + +def get_latest_pypi_version(): + response = requests.get(PYPI_JSON_URL) + data = response.json() + return data["info"]["version"] + + +print(get_latest_pypi_version()) diff --git a/scripts/patch_hosts.py b/scripts/patch_hosts.py index 56dc0f25185..6dd72c40eb9 100644 --- a/scripts/patch_hosts.py +++ b/scripts/patch_hosts.py @@ -60,29 +60,31 @@ def path(self) -> Path: def read(self) -> str: return self.path.read_text() - def get(self, domain: str) -> list[str]: - return re.findall(f"(.+)\s+{domain}", self.content) + def get(self, datasite: str) -> list[str]: + return re.findall(f"(.+)\s+{datasite}", self.content) - def add(self, ip: str, domain: str) -> None: - if self.get(domain): + def add(self, ip: str, datasite: str) -> None: + if self.get(datasite): return - self.content = self.content.rstrip() + f"\n{ip}\t{domain}" + self.content = self.content.rstrip() + f"\n{ip}\t{datasite}" self.__write() - def remove(self, domain: str) -> None: - if not self.get(domain): + def remove(self, datasite: str) -> None: + if not self.get(datasite): return - self.content = re.sub(f"(.+)\s+{domain}\n", "", self.content) + self.content = re.sub(f"(.+)\s+{datasite}\n", "", self.content) self.__write() - def update(self, ip: str, domain: str) -> None: - if not self.get(domain): - self.add(ip, domain) + def update(self, ip: str, datasite: str) -> None: + if not self.get(datasite): + self.add(ip, datasite) # inplace - self.content = re.sub(f"(.+)\s+{domain}\n", f"{ip}\t{domain}\n", self.content) + self.content = re.sub( + f"(.+)\s+{datasite}\n", f"{ip}\t{datasite}\n", self.content + ) self.__write() def __write(self) -> None: @@ -128,7 +130,7 @@ def main(): nargs=2, action="append", default=[], - metavar=("IP", "DOMAIN"), + metavar=("IP", "DATASITE"), help="Add entry to hosts file", ) parser.add_argument( @@ -178,9 +180,9 @@ def main(): print(">> Hosts file:", hosts.path) if len(args.add): - for ip, domain in args.add: - print(f">> Adding {ip} {domain}") - hosts.update(ip, domain) + for ip, datasite in args.add: + print(f">> Adding {ip} {datasite}") + hosts.update(ip, datasite) if args.add_k3d_registry: print(">> Adding k3d registry host entry") diff --git a/scripts/reset_k8s.sh b/scripts/reset_k8s.sh new file mode 100755 index 00000000000..68c317a6232 --- /dev/null +++ b/scripts/reset_k8s.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +KUBECTL_ARGS="$@" +NAMESPACE="syft" +POSTGRES_POD_NAME="postgres-0" + +# if kubectl args doesn't have a namespace, add it +if [[ ! "$KUBECTL_ARGS" =~ (-n|--namespace) ]]; then + KUBECTL_ARGS="$KUBECTL_ARGS --namespace $NAMESPACE" +fi + +# SQL commands to reset all tables +RESET_COMMAND=" +DO \$\$ +DECLARE + r RECORD; +BEGIN + -- Disable all triggers + SET session_replication_role = 'replica'; + + -- Truncate all tables in the current schema + FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = current_schema()) LOOP + EXECUTE 'TRUNCATE TABLE ' || quote_ident(r.tablename) || ' CASCADE'; + END LOOP; + + -- Re-enable all triggers + SET session_replication_role = 'origin'; +END \$\$; + +-- Reset all sequences +DO \$\$ +DECLARE + r RECORD; +BEGIN + FOR r IN (SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema = current_schema()) LOOP + EXECUTE 'ALTER SEQUENCE ' || quote_ident(r.sequence_name) || ' RESTART WITH 1'; + END LOOP; +END \$\$; +" + +# Execute the SQL commands +echo ">>> Resetting database '$POSTGRES_POD_NAME'. psql output:" +kubectl exec $KUBECTL_ARGS -i $POSTGRES_POD_NAME -- psql -U syft_postgres -d syftdb_postgres << EOF +$RESET_COMMAND +EOF + +# Deleting StatefulSets that end with -pool +POOL_STATEFULSETS=$(kubectl get statefulsets $KUBECTL_ARGS -o jsonpath="{.items[*].metadata.name}" | tr ' ' '\n' | grep -E ".*-pool$") +if [ -n "$POOL_STATEFULSETS" ]; then + echo ">>> Deleting '$POOL_STATEFULSETS'" + for STATEFULSET in $POOL_STATEFULSETS; do + kubectl delete statefulsets $KUBECTL_ARGS $STATEFULSET + kubectl delete pods $KUBECTL_ARGS -l "app.kubernetes.io/component=$STATEFULSET" --grace-period=0 --force + done +fi + +# Resetting the backend pod +BACKEND_POD=$(kubectl get pods $KUBECTL_ARGS -o jsonpath="{.items[*].metadata.name}" | tr ' ' '\n' | grep -E ".*backend.*") +if [ -n "$BACKEND_POD" ]; then + echo ">>> Re-creating '$BACKEND_POD'" + kubectl delete pod $KUBECTL_ARGS $BACKEND_POD --grace-period=0 --force + + # wait for backend to come back up + echo ">>> Waiting for '$BACKEND_POD' to be ready..." + export WAIT_TIME=5 + bash packages/grid/scripts/wait_for.sh service backend $KUBECTL_ARGS > /dev/null +fi + +echo ">>> Done" diff --git a/scripts/reset_mongo.sh b/scripts/reset_mongo.sh deleted file mode 100755 index ac1641f68e4..00000000000 --- a/scripts/reset_mongo.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# WARNING: this will drop the app database in all your mongo dbs -echo $1 - -if [ -z $1 ]; then - MONGO_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep -m 1 mongo) -else - MONGO_CONTAINER_NAME=$1 -fi - -DROPCMD="<&1 \ No newline at end of file diff --git a/scripts/reset_network.sh b/scripts/reset_network.sh deleted file mode 100755 index ce5f863ff14..00000000000 --- a/scripts/reset_network.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -MONGO_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep -m 1 mongo) -DROPCMD="<&1 - -# flush the worker queue -. ${BASH_SOURCE%/*}/flush_queue.sh - -# reset docker service to clear out weird network issues -sudo service docker restart - -# make sure all containers start -. ${BASH_SOURCE%/*}/../packages/grid/scripts/containers.sh diff --git a/scripts/staging.json b/scripts/staging.json deleted file mode 100644 index f3b648cff6f..00000000000 --- a/scripts/staging.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "domain-staging-dev": { - "name": "domain-staging-dev", - "node_type": "domain", - "ip": "40.78.86.246", - "metadata_endpoint": "/api/v1/new/metadata", - "branch": "dev", - "commit_hash": "6bd3bea303107f8eef9ad0e43c85006161835fc9" - } -} diff --git a/scripts/staging.py b/scripts/staging.py deleted file mode 100644 index e158c72b30d..00000000000 --- a/scripts/staging.py +++ /dev/null @@ -1,201 +0,0 @@ -# stdlib -import json -import os -import subprocess -from typing import Any - -# third party -import git -import requests - -DEV_MODE = False -KEY = None -JSON_DATA = os.path.dirname(__file__) + "/staging.json" - - -def run_hagrid(node: dict) -> int: - name = node["name"] - node_type = node["node_type"] - ip = node["ip"] - branch = node["branch"] - cmd = ( - f"hagrid launch {name} {node_type} to {ip} --username=azureuser --auth-type=key " - f"--key-path={KEY} --repo=OpenMined/PySyft --branch={branch} --verbose" - ) - watch_shell(cmd) - - -def watch_shell(command: str) -> None: - process = subprocess.Popen( - command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT - ) # nosec - while True: - output = process.stdout.readline().decode() - if output == "" and process.poll() is not None: - break - if output: - print(output.strip()) - rc = process.poll() - return rc - - -def shell(command: str) -> str: - try: - output = subprocess.check_output( # nosec - command, shell=True, stderr=subprocess.STDOUT - ) - except Exception: - output = b"" - return output.decode("utf-8").strip() - - -def metadata_url(node: dict) -> str: - ip = node["ip"] - endpoint = node["metadata_endpoint"] - return f"http://{ip}{endpoint}" - - -def check_metadata(node: dict) -> dict | None: - try: - res = requests.get(metadata_url(node)) - if res.status_code != 200: - print(f"Got status_code: {res.status_code}") - metadata = res.json() - name = node["name"] - print(f"{name} syft_version: ", metadata["syft_version"]) - return metadata - except Exception as e: - print(f"Failed to get metadata. {e}") - return None - - -def process_node(node: dict[str, Any]) -> tuple[bool, str]: - repo_hash = get_repo_checkout(node) - metadata = check_metadata(node) - hash_string = check_remote_hash(node) - redeploy = False - if metadata is None or hash_string is None: - print(f"redeploy because metadata: {metadata} and remote hash: {hash_string}") - redeploy = True - - if hash_string is not None and repo_hash != hash_string: - print("repo_hash", len(repo_hash), type(repo_hash)) - print("hash_string", len(hash_string), type(hash_string)) - print( - f"redeploy because repo_hash: {repo_hash} != remote hash_string: {hash_string}" - ) - redeploy = True - - if redeploy: - print("🔧 Redeploying with HAGrid") - run_hagrid(node) - - hash_string = check_remote_hash(node) - if hash_string is None: - print(f"Cant get hash: {hash_string}") - - if hash_string is not None and hash_string != repo_hash: - print( - f"Hash doesnt match repo_hash: {repo_hash} != remote hash_string {hash_string}" - ) - - metadata = check_metadata(node) - if metadata is None: - print(f"Cant get metadata: {metadata}") - - if metadata and hash_string == repo_hash: - return True, repo_hash - return False, repo_hash - - -def get_repo_checkout(node: dict) -> str: - try: - branch = node["branch"] - repo_path = f"/tmp/{branch}/PySyft" - if not os.path.exists(repo_path): - os.makedirs(repo_path, exist_ok=True) - repo = git.Repo.clone_from( - "https://github.com/OpenMined/pysyft", - repo_path, - single_branch=True, - b=branch, - ) - else: - repo = git.Repo(repo_path) - if repo.is_dirty(): - repo.git.reset("--hard") - repo.git.checkout(branch) - repo.remotes.origin.pull() - sha = repo.head.commit.hexsha - return sha - except Exception as e: - print(f"Failed to get branch HEAD commit hash. {e}") - raise e - - -def run_remote_shell(node: dict, cmd: str) -> str | None: - try: - ip = node["ip"] - ssh_cmd = ( - f"ssh -o LogLevel=ERROR -o StrictHostKeyChecking=no -i {KEY} azureuser@{ip}" - ) - shell_cmd = f'{ssh_cmd} "{cmd}"' - print("Running:", shell_cmd) - return shell(shell_cmd) - except Exception: - print("Failed to run ssh command: {}") - return None - - -def check_remote_hash(node: dict) -> str | None: - cmd = "sudo runuser -l om -c 'cd /home/om/PySyft && git rev-parse HEAD'" - return run_remote_shell(node, cmd) - - -def check_staging() -> None: - nodes = load_staging_data(JSON_DATA) - for name, node in nodes.items(): - print(f"Processing {name}") - good = False - try: - good, updated_hash = process_node(node=node) - node["commit_hash"] = updated_hash - nodes[name] = node - save_staging_data(JSON_DATA, nodes) - except Exception as e: - print(f"Failed to process node: {name}. {e}") - emoji = "✅" if good else "❌" - print(f"{emoji} Node {name}") - - -def load_staging_data(path: str) -> dict[str, dict]: - with open(path) as f: - return json.loads(f.read()) - - -def save_staging_data(path: str, data: dict[str, dict]) -> None: - print("Saving changes to file", path) - with open(path, "w") as f: - f.write(f"{json.dumps(data)}") - - -if __name__ == "__main__": - # stdlib - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("--dev", action="store_true", help="Dev Mode") - parser.add_argument("--private-key", help="Dev Mode") - - args = parser.parse_args() - if args.dev: - DEV_MODE = True - if args.private_key: - path = os.path.expanduser(args.private_key) - if os.path.exists(path): - KEY = path - if KEY is None: - raise Exception("--private-key required") - print("DEV MODE", DEV_MODE) - - check_staging() diff --git a/test_helpers/__init__.py b/test_helpers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 00000000000..48894e3b168 --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1 @@ +**/*.events \ No newline at end of file diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 1c8a4fc8b27..40f2f5193e9 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,8 +1,16 @@ +# stdlib +from secrets import token_hex + # third party import _pytest from faker import Faker import pytest +# syft absolute +import syft as sy +from syft.abstract_server import ServerSideType +from syft.server.worker import Worker + def pytest_configure(config: _pytest.config.Config) -> None: config.addinivalue_line("markers", "frontend: frontend integration tests") @@ -10,7 +18,7 @@ def pytest_configure(config: _pytest.config.Config) -> None: config.addinivalue_line( "markers", "container_workload: container workload integration tests" ) - config.addinivalue_line("markers", "local_node: local node integration tests") + config.addinivalue_line("markers", "local_server: local server integration tests") @pytest.fixture @@ -19,15 +27,53 @@ def gateway_port() -> int: @pytest.fixture -def domain_1_port() -> int: +def datasite_1_port() -> int: return 9082 @pytest.fixture -def domain_2_port() -> int: +def datasite_2_port() -> int: return 9083 @pytest.fixture def faker(): return Faker() + + +@pytest.fixture(scope="function") +def full_low_worker(n_consumers: int = 3, create_producer: bool = True) -> Worker: + _server = sy.orchestra.launch( + server_side_type=ServerSideType.LOW_SIDE, + name=token_hex(8), + # dev_mode=True, + reset=True, + n_consumers=n_consumers, + create_producer=create_producer, + queue_port=None, + thread_workers=False, + ) + # startup code here + yield _server + # # Cleanup code + _server.python_server.cleanup() + _server.land() + + +@pytest.fixture(scope="function") +def full_high_worker(n_consumers: int = 3, create_producer: bool = True) -> Worker: + _server = sy.orchestra.launch( + server_side_type=ServerSideType.HIGH_SIDE, + name=token_hex(8), + # dev_mode=True, + reset=True, + n_consumers=n_consumers, + create_producer=create_producer, + queue_port=None, + thread_workers=False, + ) + # startup code here + yield _server + # Cleanup code + _server.python_server.cleanup() + _server.land() diff --git a/tests/integration/container_workload/blob_storage_test.py b/tests/integration/container_workload/blob_storage_test.py index 3f072eab77c..1f6ab279258 100644 --- a/tests/integration/container_workload/blob_storage_test.py +++ b/tests/integration/container_workload/blob_storage_test.py @@ -14,21 +14,21 @@ reason="AZURE_BLOB_STORAGE_KEY is not set", ) @pytest.mark.container_workload -def test_mount_azure_blob_storage(domain_1_port): - domain_client = sy.login( - email="info@openmined.org", password="changethis", port=domain_1_port +def test_mount_azure_blob_storage(datasite_1_port): + datasite_client = sy.login( + email="info@openmined.org", password="changethis", port=datasite_1_port ) azure_storage_key = os.environ.get("AZURE_BLOB_STORAGE_KEY", None) assert azure_storage_key - domain_client.api.services.blob_storage.mount_azure( + datasite_client.api.services.blob_storage.mount_azure( account_name="citestingstorageaccount", container_name="citestingcontainer", account_key=azure_storage_key, bucket_name="helmazurebucket", ) - blob_files = domain_client.api.services.blob_storage.get_files_from_bucket( + blob_files = datasite_client.api.services.blob_storage.get_files_from_bucket( bucket_name="helmazurebucket" ) assert isinstance(blob_files, list), blob_files diff --git a/tests/integration/container_workload/pool_image_test.py b/tests/integration/container_workload/pool_image_test.py index f8d297afde9..6654f0b93d6 100644 --- a/tests/integration/container_workload/pool_image_test.py +++ b/tests/integration/container_workload/pool_image_test.py @@ -1,60 +1,97 @@ # stdlib -from textwrap import dedent -from time import sleep +import os +from uuid import uuid4 # third party -from faker import Faker import numpy as np import pytest # syft absolute import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.custom_worker.config import DockerWorkerConfig +from syft.custom_worker.config import PrebuiltWorkerConfig from syft.service.request.request import Request from syft.service.response import SyftSuccess from syft.service.worker.worker_image import SyftWorkerImage from syft.service.worker.worker_pool import SyftWorker from syft.service.worker.worker_pool import WorkerPool +from syft.types.uid import UID +from syft.util.util import get_latest_tag +registry = os.getenv("SYFT_BASE_IMAGE_REGISTRY", "docker.io") +repo = "openmined/syft-backend" -@pytest.mark.container_workload -def test_image_build(domain_1_port) -> None: - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" +if "k3d" in registry: + tag = get_latest_tag(registry, repo) +else: + tag = sy.__version__ + +external_registry = os.getenv("EXTERNAL_REGISTRY", registry) +external_registry_username = os.getenv("EXTERNAL_REGISTRY_USERNAME", None) +external_registry_password = os.getenv("EXTERNAL_REGISTRY_PASSWORD", None) + + +@pytest.fixture +def external_registry_uid(datasite_1_port: int) -> UID: + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + image_registry_list = datasite_client.api.services.image_registry.get_all() + if len(image_registry_list) > 1: + raise Exception("Only one registry should be present for testing") + + elif len(image_registry_list) == 1: + assert ( + image_registry_list[0].url == external_registry + ), "External registry different from the one set in the environment variable" + return image_registry_list[0].id + else: + registry_add_result = datasite_client.api.services.image_registry.add( + external_registry + ) + + assert isinstance(registry_add_result, sy.SyftSuccess), str(registry_add_result) + + image_registry_list = datasite_client.api.services.image_registry.get_all() + return image_registry_list[0].id + + +def make_docker_config_test_case(pkg: str) -> tuple[str, str]: + return ( + DockerWorkerConfig( + dockerfile=(f"FROM {registry}/{repo}:{tag}\nRUN pip install {pkg}\n") + ), + f"openmined/custom-worker-{pkg}:latest", ) - syft_base_tag = {sy.__version__} - # Submit Docker Worker Config - docker_config_rl = f""" - FROM openmined/grid-backend:{syft_base_tag} - RUN pip install recordlinkage - """ - docker_config = DockerWorkerConfig(dockerfile=docker_config_rl) +@pytest.mark.container_workload +def test_image_build(datasite_1_port: int, external_registry_uid: UID) -> None: + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) - # Submit Worker Image - submit_result = domain_client.api.services.worker_image.submit_dockerfile( - docker_config=docker_config + docker_config, docker_tag = make_docker_config_test_case("recordlinkage") + + submit_result = datasite_client.api.services.worker_image.submit( + worker_config=docker_config ) assert isinstance(submit_result, SyftSuccess) - assert len(domain_client.images.get_all()) == 2 + assert len(datasite_client.images.get_all()) == 2 # Validate if we can get the worker image object from its config - workerimage = domain_client.api.services.worker_image.get_by_config(docker_config) - assert not isinstance(workerimage, sy.SyftError) - + workerimage = datasite_client.api.services.worker_image.get_by_config(docker_config) # Build docker image - tag_version = sy.UID().short() - docker_tag = f"openmined/custom-worker-rl:{tag_version}" - docker_build_result = domain_client.api.services.worker_image.build( + docker_build_result = datasite_client.api.services.worker_image.build( image_uid=workerimage.id, tag=docker_tag, + registry_uid=external_registry_uid, ) assert isinstance(docker_build_result, SyftSuccess) # Refresh the worker image object - workerimage = domain_client.images.get_by_uid(workerimage.id) + workerimage = datasite_client.images.get_by_uid(workerimage.id) assert not isinstance(workerimage, sy.SyftSuccess) assert workerimage.is_built @@ -62,154 +99,163 @@ def test_image_build(domain_1_port) -> None: assert workerimage.image_identifier.repo_with_tag == docker_tag assert workerimage.image_hash is not None - # Delete image - delete_result = domain_client.api.services.worker_image.remove(uid=workerimage.id) - assert isinstance(delete_result, sy.SyftSuccess) - - # Validate the image is successfully deleted - assert len(domain_client.images.get_all()) == 1 - workerimage = domain_client.images.get_all()[0] - assert workerimage.config != docker_config - @pytest.mark.container_workload -def test_pool_launch(domain_1_port) -> None: - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" +# @pytest.mark.parametrize("prebuilt", [True, False]) +@pytest.mark.parametrize("prebuilt", [False]) +def test_pool_launch( + datasite_1_port: int, external_registry_uid: UID, prebuilt: bool +) -> None: + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) - assert len(domain_client.worker_pools.get_all()) == 1 - - syft_base_tag = {sy.__version__} - - # Submit Docker Worker Config - docker_config_opendp = f""" - FROM openmined/grid-backend:{syft_base_tag} - RUN pip install opendp - """ - docker_config = DockerWorkerConfig(dockerfile=docker_config_opendp) # Submit Worker Image - submit_result = domain_client.api.services.worker_image.submit_dockerfile( - docker_config=docker_config + # nginx is intended to cause the startupProbe and livenessProbe to fail + worker_config, docker_tag = ( + (PrebuiltWorkerConfig(tag="docker.io/library/nginx:latest"), None) + if prebuilt + else make_docker_config_test_case("opendp") + ) + submit_result = datasite_client.api.services.worker_image.submit( + worker_config=worker_config ) assert isinstance(submit_result, SyftSuccess) - worker_image = domain_client.api.services.worker_image.get_by_config(docker_config) - assert not isinstance(worker_image, sy.SyftError) + worker_image = datasite_client.api.services.worker_image.get_by_config( + worker_config + ) assert worker_image is not None - assert not worker_image.is_built - # Build docker image - tag_version = sy.UID().short() - docker_tag = f"openmined/custom-worker-opendp:{tag_version}" - docker_build_result = domain_client.api.services.worker_image.build( - image_uid=worker_image.id, - tag=docker_tag, - ) - assert isinstance(docker_build_result, SyftSuccess) + if not worker_image.is_prebuilt: + assert not worker_image.is_built + + # Build docker image + docker_build_result = datasite_client.api.services.worker_image.build( + image_uid=worker_image.id, + tag=docker_tag, + registry_uid=external_registry_uid, + ) + assert isinstance(docker_build_result, SyftSuccess) + + # Push Image to External registry + push_result = datasite_client.api.services.worker_image.push( + worker_image.id, + username=external_registry_username, + password=external_registry_password, + ) + assert isinstance(push_result, sy.SyftSuccess), str(push_result) # Launch a worker pool - pool_version = sy.UID().short() - worker_pool_name = f"custom_worker_pool_ver{pool_version}" - worker_pool_res = domain_client.api.services.worker_pool.launch( - name=worker_pool_name, + worker_pool_name = f"custom-worker-pool-opendp{'-prebuilt' if prebuilt else ''}" + worker_pool_res = datasite_client.api.services.worker_pool.launch( + pool_name=worker_pool_name, image_uid=worker_image.id, - num_workers=3, + num_workers=2, ) - assert len(worker_pool_res) == 3 - - assert all(worker.error is None for worker in worker_pool_res) - assert len(domain_client.worker_pools.get_all()) == 2 - worker_pool = domain_client.worker_pools[worker_pool_name] - assert len(worker_pool.worker_list) == 3 + # TODO: we need to refactor this because the test is broken + if prebuilt: + # if the container has no liveness probe like nginx then _create_stateful_set + # will timeout with CREATE_POOL_TIMEOUT_SEC + # however this is currently longer than the blocking api call so we just see + # assert "timeout" in str(worker_pool_res).lower() + # if we lower the timout we get an exception here + # assert "Failed to start workers" in str(worker_pool_res) + pass + else: + assert all(worker.error is None for worker in worker_pool_res) - workers = worker_pool.workers - assert len(workers) == 3 + worker_pool = datasite_client.worker_pools[worker_pool_name] + assert len(worker_pool.worker_list) == 2 - for worker in workers: - assert worker.worker_pool_name == worker_pool_name - assert worker.image.id == worker_image.id + workers = worker_pool.workers + assert len(workers) == 2 - assert len(worker_pool.healthy_workers) == 3 + for worker in workers: + assert worker.worker_pool_name == worker_pool_name + assert worker.image.id == worker_image.id - # Grab the first worker - first_worker = workers[0] + assert len(worker_pool.healthy_workers) == 2 - # Check worker Logs - logs = domain_client.api.services.worker.logs(uid=first_worker.id) - assert not isinstance(logs, sy.SyftError) + # Grab the first worker + first_worker = workers[0] - # Check for worker status - status_res = domain_client.api.services.worker.status(uid=first_worker.id) - assert not isinstance(status_res, sy.SyftError) - assert isinstance(status_res, tuple) + # Check worker Logs + _ = datasite_client.api.services.worker.logs(uid=first_worker.id) - # Delete the pool's workers - for worker in worker_pool.workers: - res = domain_client.api.services.worker.delete(uid=worker.id, force=True) - assert isinstance(res, sy.SyftSuccess) + # Check for worker status + status_res = datasite_client.api.services.worker.status(uid=first_worker.id) + assert isinstance(status_res, tuple) - # TODO: delete the launched pool + # Delete the pool's workers + for worker in worker_pool.workers: + res = datasite_client.api.services.worker.delete(uid=worker.id, force=True) + assert isinstance(res, sy.SyftSuccess) - # Clean the build images - sleep(10) - delete_result = domain_client.api.services.worker_image.remove(uid=worker_image.id) - assert isinstance(delete_result, sy.SyftSuccess) + # delete the launched pool + res = datasite_client.api.services.worker_pool.delete(pool_name=worker_pool_name) + assert isinstance(res, SyftSuccess), res.message @pytest.mark.container_workload -def test_pool_image_creation_job_requests(domain_1_port) -> None: +@pytest.mark.parametrize("prebuilt", [True, False]) +def test_pool_image_creation_job_requests( + datasite_1_port: int, external_registry_uid: UID, prebuilt: bool +) -> None: """ Test register ds client, ds requests to create an image and pool creation, do approves, then ds creates a function attached to the worker pool, then creates another request. DO approves and runs the function """ - # construct a root client and data scientist client for the test domain - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + # construct a root client and data scientist client for the test datasite + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) - fake = Faker() - ds_username = fake.user_name() + ds_username = uuid4().hex[:8] ds_email = ds_username + "@example.com" - res = domain_client.register( + res = datasite_client.register( name=ds_username, email=ds_email, password="secret_pw", password_verify="secret_pw", ) assert isinstance(res, SyftSuccess) - ds_client = sy.login(email=ds_email, password="secret_pw", port=domain_1_port) - syft_base_tag = {sy.__version__} + # Grant user permission to request code execution + ds = next(u for u in datasite_client.users if u.email == ds_email) + ds.allow_mock_execution() - # the DS makes a request to create an image and a pool based on the image + ds_client = sy.login(email=ds_email, password="secret_pw", port=datasite_1_port) - docker_config_np = f""" - FROM openmined/grid-backend:{syft_base_tag} - RUN pip install numpy - """ - docker_config = DockerWorkerConfig(dockerfile=docker_config_np) - tag_version = sy.UID().short() - docker_tag = f"openmined/custom-worker-np:{tag_version}" - pool_version = sy.UID().short() - worker_pool_name = f"custom_worker_pool_ver{pool_version}" - request = ds_client.api.services.worker_pool.create_image_and_pool_request( - pool_name=worker_pool_name, - num_workers=1, - tag=docker_tag, - config=docker_config, - reason="I want to do some more cool data science with PySyft and Recordlinkage", + # the DS makes a request to create an image and a pool based on the image + worker_config, docker_tag = ( + (PrebuiltWorkerConfig(tag=f"{registry}/{repo}:{tag}"), None) + if prebuilt + else make_docker_config_test_case("numpy") ) + + worker_pool_name = f"custom-worker-pool-numpy{'-prebuilt' if prebuilt else ''}" + + kwargs = { + "pool_name": worker_pool_name, + "num_workers": 1, + "config": worker_config, + "reason": "I want to do some more cool data science with PySyft", + } + if not prebuilt: + kwargs.update({"tag": docker_tag, "registry_uid": external_registry_uid}) + + request = ds_client.api.services.worker_pool.create_image_and_pool_request(**kwargs) assert isinstance(request, Request) assert len(request.changes) == 2 - assert request.changes[0].config == docker_config + assert request.changes[0].config == worker_config assert request.changes[1].num_workers == 1 assert request.changes[1].pool_name == worker_pool_name - # the domain client approve the request, so the image should be built + # the datasite client approve the request, so the image should be built # and the worker pool should be launched - for r in domain_client.requests: + for r in datasite_client.requests: if r.id == request.id: req_result = r.approve() break @@ -222,13 +268,13 @@ def test_pool_image_creation_job_requests(domain_1_port) -> None: worker: SyftWorker = launched_pool.workers[0] assert launched_pool.name in worker.name - assert worker.status.value == "Pending" + assert worker.status.value == "Running" assert worker.healthcheck.value == "✅" # assert worker.consumer_state.value == "Idle" assert isinstance(worker.logs, str) assert worker.job_id is None - built_image = ds_client.api.services.worker_image.get_by_config(docker_config) + built_image = ds_client.api.services.worker_image.get_by_config(worker_config) assert isinstance(built_image, SyftWorkerImage) assert built_image.id == launched_pool.image.id assert worker.image.id == built_image.id @@ -236,7 +282,7 @@ def test_pool_image_creation_job_requests(domain_1_port) -> None: # Dataset data = np.array([1, 2, 3]) data_action_obj = sy.ActionObject.from_obj(data) - data_pointer = domain_client.api.services.action.set(data_action_obj) + data_pointer = data_action_obj.send(ds_client) # Function @sy.syft_function( @@ -247,13 +293,12 @@ def test_pool_image_creation_job_requests(domain_1_port) -> None: def custom_worker_func(x): return {"y": x + 1} - custom_worker_func.code = dedent(custom_worker_func.code) assert custom_worker_func.worker_pool_name == launched_pool.name # Request code execution code_request = ds_client.code.request_code_execution(custom_worker_func) assert isinstance(code_request, Request) assert code_request.status.value == 0 # pending - for r in domain_client.requests: + for r in datasite_client.requests: if r.id == code_request.id: code_req_result = r.approve(approve_nested=True) break @@ -264,7 +309,7 @@ def custom_worker_func(x): job.wait() assert job.status.value == "completed" - job = domain_client.jobs[-1] + job = datasite_client.jobs.get_by_user_code_id(job.user_code_id)[-1] assert job.job_worker_id == worker.id # Validate the result received from the syft function @@ -274,12 +319,9 @@ def custom_worker_func(x): # Delete the workers of the launched pools for worker in launched_pool.workers: - res = domain_client.api.services.worker.delete(uid=worker.id, force=True) + res = datasite_client.api.services.worker.delete(uid=worker.id, force=True) assert isinstance(res, sy.SyftSuccess) - # TODO: delete the launched pool - - # Clean the build images - sleep(10) - delete_result = domain_client.api.services.worker_image.remove(uid=built_image.id) - assert isinstance(delete_result, sy.SyftSuccess) + # delete the launched pool + res = datasite_client.api.services.worker_pool.delete(pool_id=launched_pool.id) + assert isinstance(res, SyftSuccess), res.message diff --git a/tests/integration/external/oblv/manual_code_submission_test.py b/tests/integration/external/oblv/manual_code_submission_test.py deleted file mode 100644 index fc1827df9cf..00000000000 --- a/tests/integration/external/oblv/manual_code_submission_test.py +++ /dev/null @@ -1,113 +0,0 @@ -# stdlib -import os -from textwrap import dedent - -# third party -import numpy as np - -# syft absolute -import syft as sy -from syft.service.action.numpy import NumpyArrayObject -from syft.service.code.user_code import SubmitUserCode - -LOCAL_ENCLAVE_PORT = os.environ.get("LOCAL_ENCLAVE_PORT", 8010) -# TODO: Should move to Docker Container tests - - -def load_dataset(domain_client) -> None: - dataset_name = f"{domain_client.name}'s... Private Data" - asset_name = "Secret data" - dataset = sy.Dataset(name=dataset_name) - asset = sy.Asset(name=asset_name) - - # Real Data - x = np.array([1, 2, 3]) - asset.set_obj(x) - - # Mock Data - y = np.array([1, 1, 1]) - asset.set_mock(y, mock_is_real=False) - - dataset.add_asset(asset) - - domain_client.upload_dataset(dataset) - - datasets = domain_client.datasets.get_all() - - assert len(datasets) == 1 - domain_dataset = datasets[0] - assert domain_dataset.name == dataset_name - assert len(domain_dataset.assets) == 1 - assert domain_dataset.assets[0].name == asset_name - - -def test_manual_code_submission_enclave() -> None: - # Step1: Login Phase - canada_root = sy.Worker.named(name="canada", local_db=True, reset=True).root_client - - italy_root = sy.Worker.named(name="italy", local_db=True, reset=True).root_client - - # Step 2: Uploading to Domain Nodes - load_dataset(canada_root) - load_dataset(italy_root) - - assert sy.enable_external_lib("oblv") - - # Step 3: Connection to Enclave - # TODO 🟣 Modify to use Data scientist account credentials - # after Permission are integrated - depl = sy.external.oblv.deployment_client.DeploymentClient( - deployment_id="d-2dfedbb1-7904-493b-8793-1a9554badae7", - oblv_client=None, - domain_clients=[canada_root, italy_root], - key_name="first", - ) # connection_port key can be added to set the port on which oblv_proxy will run - - depl.initiate_connection(LOCAL_ENCLAVE_PORT) - - depl.register( - name="Jane Doe", - email="jane@caltech.edu", - password="abc123", - institution="Caltech", - website="https://www.caltech.edu/", - ) - depl.login(email="jane@caltech.edu", password="abc123") - - # Step 4: Manual code preparation Phase - canada_data = canada_root.datasets[-1] - italy_data = italy_root.datasets[-1] - - @sy.syft_function( - input_policy=sy.ExactMatch( - canada_data=canada_data.assets[0], italy_data=italy_data.assets[0] - ), - output_policy=sy.SingleExecutionExactOutput(), - ) - def simple_function(canada_data, italy_data): - return canada_data + italy_data - - simple_function.code = dedent(simple_function.code) - assert isinstance(simple_function, SubmitUserCode) - - # Step 5 :Code Submission Phase - print(depl.request_code_execution(code=simple_function)) - - # Step 6: Code review phase - canada_requests = canada_root.api.services.request.get_all() - assert len(canada_requests) == 1 - assert canada_requests[0].approve() - - italy_requests = italy_root.api.services.request.get_all() - assert len(italy_requests) == 1 - assert italy_requests[0].approve() - - assert hasattr(depl.api.services.code, "simple_function") - res = depl.api.services.code.simple_function( - canada_data=canada_data.assets[0], italy_data=italy_data.assets[0] - ) - print(res, type(res)) - assert isinstance(res, NumpyArrayObject) - - canada_root.cleanup() - italy_root.cleanup() diff --git a/tests/integration/frontend/frontend_start_test.py b/tests/integration/frontend/frontend_start_test.py index fd531b3b87a..54490db450f 100644 --- a/tests/integration/frontend/frontend_start_test.py +++ b/tests/integration/frontend/frontend_start_test.py @@ -8,14 +8,14 @@ here = os.path.dirname(__file__) NETWORK_PORT = 9081 -DOMAIN_PORT = 9082 +DATASITE_PORT = 9082 HOST_IP = os.environ.get("HOST_IP", "localhost") @pytest.mark.frontend -def test_serves_domain_frontend() -> None: - title_str = "PyGrid" - url = f"http://{HOST_IP}:{DOMAIN_PORT}" +def test_serves_datasite_frontend() -> None: + title_str = "Syft UI" + url = f"http://{HOST_IP}:{DATASITE_PORT}" result = requests.get(url) assert result.status_code == 200 assert title_str in result.text @@ -23,7 +23,7 @@ def test_serves_domain_frontend() -> None: @pytest.mark.frontend def test_serves_network_frontend() -> None: - title_str = "PyGrid" + title_str = "Syft UI" url = f"http://localhost:{NETWORK_PORT}" result = requests.get(url) assert result.status_code == 200 diff --git a/tests/integration/local/__init__.py b/tests/integration/local/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/local/conftest.py b/tests/integration/local/conftest.py new file mode 100644 index 00000000000..f181da4ea49 --- /dev/null +++ b/tests/integration/local/conftest.py @@ -0,0 +1,85 @@ +# stdlib +from collections.abc import Generator +from collections.abc import Iterable +from dataclasses import dataclass +from itertools import product +from secrets import token_hex +from typing import Any + +# third party +import pytest +import pytest_asyncio + +# syft absolute +import syft as sy +from syft.orchestra import ClientAlias +from syft.orchestra import ServerHandle +from syft.util.test_helpers.email_helpers import EmailServer +from syft.util.test_helpers.email_helpers import SMTPTestServer +from syft.util.test_helpers.email_helpers import get_email_server + + +@pytest.fixture() +def server_args() -> dict[str, Any]: + return {} + + +@pytest.fixture +def server(server_args: dict[str, Any]) -> Generator[ServerHandle, None, None]: + _server = sy.orchestra.launch( + **{ + "name": token_hex(8), + "dev_mode": False, + "reset": True, + "queue_port": None, + **server_args, + } + ) + # startup code here + yield _server + # Cleanup code + if (python_server := _server.python_server) is not None: + python_server.cleanup() + _server.land() + + +@pytest.fixture +def client(server: ServerHandle) -> ClientAlias: + return server.login(email="info@openmined.org", password="changethis") + + +@pytest_asyncio.fixture(scope="function") +async def email_wrapper() -> Any: + email_server, smtp_server = get_email_server() + + @dataclass + class EmailServerWrapper: + email_server: EmailServer + smtp_server: SMTPTestServer + + email_wrapper = EmailServerWrapper( + email_server=email_server, smtp_server=smtp_server + ) + yield email_wrapper + + # Finish SMTP process. + smtp_server.stop() + + +def matrix( + *, + excludes_: Iterable[dict[str, Any]] | None = None, + **kwargs: Iterable, +) -> list[dict[str, Any]]: + args = ([(k, v) for v in vs] for k, vs in kwargs.items()) + args = product(*args) + + excludes_ = [] if excludes_ is None else [kv.items() for kv in excludes_] + + args = ( + arg + for arg in args + if not any(all(kv in arg for kv in kvs) for kvs in excludes_) + ) + + return [dict(kvs) for kvs in args] diff --git a/tests/integration/local/custom_api_usercode_gen_test.py b/tests/integration/local/custom_api_usercode_gen_test.py new file mode 100644 index 00000000000..b1164d7585b --- /dev/null +++ b/tests/integration/local/custom_api_usercode_gen_test.py @@ -0,0 +1,366 @@ +# stdlib +from contextlib import nullcontext as does_not_raise + +# third party +import pytest + +# syft absolute +import syft as sy +from syft.service.code.user_code import UserCode +from syft.service.request.request import Request +from syft.service.request.request import RequestStatus +from syft.service.response import SyftError +from syft.types.errors import SyftException + + +@sy.api_endpoint_method(settings={}) +def private_query_function( + context, + sql_query: str, +) -> str: + return f"PRIVATE QUERY: {sql_query}" + + +def is_within_rate_limit(context): + """Rate limiter for custom API calls made by users.""" + # stdlib + import datetime + + state = context.state + settings = context.settings + email = context.user.email + + current_time = datetime.datetime.now() + calls_last_min = [ + 1 if (current_time - call_time).seconds < 60 else 0 + for call_time in state[email] + ] + + return sum(calls_last_min) < settings["CALLS_PER_MIN"] + + +@sy.api_endpoint_method( + settings={ + "CALLS_PER_MIN": 10, + }, + helper_functions=[is_within_rate_limit], +) +def mock_query_function( + context, + sql_query: str, +) -> str: + # stdlib + import datetime + + # syft absolute + from syft.service.response import SyftError + + # Store a dict with the calltimes for each user, via the email. + if context.user.email not in context.state.keys(): + context.state[context.user.email] = [] + + if not context.code.is_within_rate_limit(context): + return SyftError(message="Rate limit of calls per minute has been reached.") + + try: + context.state[context.user.email].append(datetime.datetime.now()) + + return f"MOCK QUERY: {sql_query}" + + except Exception: + return SyftError( + message="An error occured executing the API call, please contact the domain owner." + ) + + +@sy.api_endpoint( + path="bigquery.submit_query", + description="API endpoint that allows you to submit SQL queries to run on the private data.", +) +def submit_query( + context, + func_name: str, + query: str, +) -> str: + # stdlib + import hashlib + + # syft absolute + import syft as sy + + hash_object = hashlib.new("sha256") + + hash_object.update(context.user.email.encode("utf-8")) + func_name = func_name + "_" + hash_object.hexdigest()[:6] + + @sy.syft_function( + name=func_name, + input_policy=sy.MixedInputPolicy( + endpoint=sy.Constant( + val=context.admin_client.api.services.bigquery.test_query + ), + query=sy.Constant(val=query), + client=context.admin_client, + ), + ) + def execute_query(query: str, endpoint): + res = endpoint(sql_query=query) + return res + + request = context.user_client.code.request_code_execution(execute_query) + if isinstance(request, sy.SyftError): + return request + context.admin_client.requests.set_tags(request, ["autosync"]) + + return ( + f"Query submitted {request}. Use `client.code.{func_name}()` to run your query" + ) + + +@pytest.fixture +def get_clients(full_high_worker): + admin_client = full_high_worker.login( + email="info@openmined.org", password="changethis" + ) + admin_client.register( + email="ds_user@test.abc", + name="ds_user", + password="1234", + password_verify="1234", + ) + ds_client = full_high_worker.login(email="ds_user@test.abc", password="1234") + yield admin_client, ds_client + + +@pytest.fixture +def setup_query_endpoint(get_clients): + admin_client, ds_client = get_clients + new_endpoint = sy.TwinAPIEndpoint( + path="bigquery.test_query", + description="This endpoint allows to query Bigquery storage via SQL queries.", + private_function=private_query_function, + mock_function=mock_query_function, + ) + + admin_client.custom_api.add(endpoint=new_endpoint) + + yield admin_client, ds_client + + +@pytest.fixture +def update_query_endpoint(setup_query_endpoint): + admin_client, ds_client = setup_query_endpoint + admin_client.api.services.api.update( + endpoint_path="bigquery.test_query", hide_mock_definition=True + ) + admin_client.api.services.api.update( + endpoint_path="bigquery.test_query", endpoint_timeout=10 + ) + yield admin_client, ds_client + + +@pytest.fixture +def create_submit_query_endpoint(update_query_endpoint): + admin_client, ds_client = update_query_endpoint + + admin_client.custom_api.add(endpoint=submit_query) + admin_client.api.services.api.update( + endpoint_path="bigquery.submit_query", hide_mock_definition=True + ) + yield admin_client, ds_client + + +@pytest.fixture +def submit_ds_request(create_submit_query_endpoint): + admin_client, ds_client = create_submit_query_endpoint + FUNC_NAME = "my_test_function" + QUERY = "SELECT * FROM `bigquery-public-data.ml_datasets.penguins` LIMIT 10" + + submit_res = ds_client.api.services.bigquery.submit_query( + func_name=FUNC_NAME, query=QUERY + ) + + fn_name = extract_code_path(submit_res) + yield admin_client, ds_client, fn_name + + +@pytest.fixture +def approve_original_request(submit_ds_request): + admin_client, ds_client, fn_name = submit_ds_request + + request = admin_client.requests[0] + + request.approve() + + yield admin_client, ds_client, fn_name + + +@pytest.fixture +def accept_request_by_deposit(submit_ds_request): + admin_client, ds_client, fn_name = submit_ds_request + + request = admin_client.requests[0] + + job = execute_request(admin_client, request) + job.wait() + job_info = job.info(result=True) + request.deposit_result(job_info, approve=True) + yield admin_client, ds_client, fn_name + + +@pytest.fixture +def reject_request(submit_ds_request): + admin_client, ds_client, fn_name = submit_ds_request + + request = admin_client.requests[0] + + request.deny(reason="Bad vibes :(") + + yield admin_client, ds_client, fn_name + + +def extract_code_path(response): + # stdlib + import re + + pattern = r"client\.code\.(\w+)\(\)" + match = re.search(pattern, str(response)) + if match: + extracted_code = match.group(1) + return extracted_code + return None + + +def execute_request(client_high, request) -> dict: + if not isinstance(request, Request): + return "This is not a request" + + code = request.code + if not isinstance(code, UserCode): + return "No usercode found" + + func_name = request.code.service_func_name + api_func = getattr(client_high.code, func_name, None) + if api_func is None: + return "Code name was not found on the client." + + job = api_func(blocking=False) + return job + + +# set up public submit query endpoint +@pytest.mark.local_server +def test_query_endpoint_added(update_query_endpoint) -> None: + admin_client, _ = update_query_endpoint + assert len(admin_client.custom_api.api_endpoints()) == 1 + + +@pytest.mark.local_server +def test_query_endpoint_mock_endpoint(update_query_endpoint) -> None: + query = "SELECT * FROM dataset_1.table_1 LIMIT 10" + admin_client, _ = update_query_endpoint + + mock_result = admin_client.api.services.bigquery.test_query.mock(sql_query=query) + assert not isinstance(mock_result, SyftError) + + retrieved_obj = mock_result.get() + + assert isinstance(retrieved_obj, str) + assert "MOCK QUERY" in retrieved_obj + assert query in retrieved_obj + + +@pytest.mark.local_server +def test_query_endpoint_private_endpoint(update_query_endpoint) -> None: + query = "SELECT * FROM dataset_1.table_1 LIMIT 100" + admin_client, _ = update_query_endpoint + + result = admin_client.api.services.bigquery.test_query.private(sql_query=query) + assert not isinstance(result, SyftError) + + retrieved_obj = result.get() + + assert isinstance(retrieved_obj, str) + assert "PRIVATE QUERY" in retrieved_obj + assert query in retrieved_obj + + +@pytest.mark.local_server +def test_submit_query_endpoint_added(create_submit_query_endpoint): + admin_client, _ = create_submit_query_endpoint + + assert len(admin_client.custom_api.api_endpoints()) == 2 + + +@pytest.mark.local_server +def test_submit_query_endpoint(create_submit_query_endpoint) -> None: + admin_client, ds_client = create_submit_query_endpoint + sql_query = "SELECT * FROM dataset_1.table_1 LIMIT 10" + # Inspect the context state on an endpoint + result = admin_client.api.services.bigquery.submit_query( + func_name="my_func", + query=sql_query, + ) + + assert not isinstance(result, SyftError) + + retrieved_obj = result.get() + + assert isinstance(retrieved_obj, str) + assert "client.code.my_func" in retrieved_obj + # stdlib + import re + + fn_name_pattern = re.compile(r"client\.code\.(.*)\(") + fn_to_call = fn_name_pattern.findall(retrieved_obj)[0] + assert "my_func" in fn_to_call + + result = getattr(admin_client.code, fn_to_call)() + + assert not isinstance(result, SyftError) + + retrieved_obj = result.get() + + assert isinstance(retrieved_obj, str) + + assert "PRIVATE QUERY" in retrieved_obj + assert sql_query in retrieved_obj + + +@pytest.mark.local_server +@pytest.mark.parametrize( + "fixture_name, expected_request_status, raises_expectation, error_message", + [ + ( + "submit_ds_request", + RequestStatus.PENDING, + pytest.raises(SyftException), + "UserCodeStatus.PENDING", + ), + ("approve_original_request", RequestStatus.APPROVED, does_not_raise(), ""), + ( + "reject_request", + RequestStatus.REJECTED, + pytest.raises(SyftException), + "Bad vibes :(", + ), + ("accept_request_by_deposit", RequestStatus.APPROVED, does_not_raise(), ""), + ], +) +def test_ds_result_retrieval( + fixture_name, expected_request_status, raises_expectation, error_message, request +): + flow_fixture = request.getfixturevalue(fixture_name) + admin_client, ds_client, fn_name = flow_fixture + + req = admin_client.requests[0] + assert isinstance(req, Request) + assert req.status == expected_request_status + + assert "my_test_function" in fn_name + + api_method = getattr(ds_client.code, fn_name) + with raises_expectation as exc: + api_method() + if error_message: + assert error_message in str(exc.value) diff --git a/tests/integration/local/enclave_local_test.py b/tests/integration/local/enclave_local_test.py index c91bdf887a6..d3377a570dc 100644 --- a/tests/integration/local/enclave_local_test.py +++ b/tests/integration/local/enclave_local_test.py @@ -6,19 +6,18 @@ # syft absolute import syft as sy -from syft.service.response import SyftError +from syft.types.errors import SyftException -@pytest.mark.local_node +@pytest.mark.local_server def test_enclave_root_client_exception(): - enclave_node = sy.orchestra.launch( + enclave_server = sy.orchestra.launch( name=token_hex(8), - node_type=sy.NodeType.ENCLAVE, + server_type=sy.ServerType.ENCLAVE, dev_mode=True, reset=True, - local_db=True, ) - res = enclave_node.login(email="info@openmined.org", password="changethis") - assert isinstance(res, SyftError) - enclave_node.python_node.cleanup() - enclave_node.land() + with pytest.raises(SyftException): + enclave_server.login(email="info@openmined.org", password="changethis") + enclave_server.python_server.cleanup() + enclave_server.land() diff --git a/tests/integration/local/gateway_local_test.py b/tests/integration/local/gateway_local_test.py index faf59b0d500..48bd790fb79 100644 --- a/tests/integration/local/gateway_local_test.py +++ b/tests/integration/local/gateway_local_test.py @@ -1,5 +1,7 @@ # stdlib +import os from secrets import token_hex +import time # third party from faker import Faker @@ -7,175 +9,292 @@ # syft absolute import syft as sy -from syft.abstract_node import NodeType -from syft.client.domain_client import DomainClient +from syft.abstract_server import ServerType +from syft.client.datasite_client import DatasiteClient from syft.client.enclave_client import EnclaveClient from syft.client.gateway_client import GatewayClient -from syft.service.network.node_peer import NodePeer +from syft.service.network.network_service import ServerPeerAssociationStatus +from syft.service.network.server_peer import ServerPeer +from syft.service.network.server_peer import ServerPeerConnectionStatus +from syft.service.network.utils import PeerHealthCheckTask +from syft.service.request.request import Request from syft.service.response import SyftSuccess from syft.service.user.user_roles import ServiceRole -def launch(node_type): +def _launch( + server_type: ServerType, + association_request_auto_approval: bool = True, + port: int | str | None = None, +): return sy.orchestra.launch( name=token_hex(8), - node_type=node_type, + server_type=server_type, dev_mode=True, reset=True, - local_db=True, + association_request_auto_approval=association_request_auto_approval, + port=port, + background_tasks=True, ) @pytest.fixture def gateway(): - node = launch(NodeType.GATEWAY) - yield node - node.python_node.cleanup() - node.land() + server = _launch(ServerType.GATEWAY) + yield server + server.python_server.cleanup() + server.land() + + +@pytest.fixture(params=[True, False]) +def gateway_association_request_auto_approval(request: pytest.FixtureRequest): + server = _launch( + ServerType.GATEWAY, association_request_auto_approval=request.param + ) + yield (request.param, server) + server.python_server.cleanup() + server.land() @pytest.fixture -def domain(): - node = launch(NodeType.DOMAIN) - yield node - node.python_node.cleanup() - node.land() +def datasite(): + server = _launch(ServerType.DATASITE) + yield server + server.python_server.cleanup() + server.land() @pytest.fixture -def domain_2(): - node = launch(NodeType.DOMAIN) - yield node - node.python_node.cleanup() - node.land() +def datasite_2(): + server = _launch(ServerType.DATASITE) + yield server + server.python_server.cleanup() + server.land() @pytest.fixture def enclave(): - node = launch(NodeType.ENCLAVE) - yield node - node.python_node.cleanup() - node.land() + server = _launch(ServerType.ENCLAVE) + yield server + server.python_server.cleanup() + server.land() + + +@pytest.fixture +def gateway_webserver(): + server = _launch(server_type=ServerType.GATEWAY, port="auto") + yield server + server.land() -@pytest.mark.local_node -def test_create_gateway_client(gateway): - client = gateway.client +@pytest.fixture +def datasite_webserver(): + server = _launch(ServerType.DATASITE, port="auto") + yield server + server.land() + + +@pytest.fixture +def datasite_2_webserver(): + server = _launch(ServerType.DATASITE, port="auto") + yield server + server.land() + + +@pytest.fixture(scope="function") +def set_network_json_env_var(gateway_webserver): + """Set the environment variable for the network registry JSON string.""" + json_string = f""" + {{ + "2.0.0": {{ + "gateways": [ + {{ + "name": "{gateway_webserver.name}", + "host_or_ip": "localhost", + "protocol": "http", + "port": "{gateway_webserver.port}", + "admin_email": "support@openmined.org", + "website": "https://www.openmined.org/", + "slack": "https://slack.openmined.org/", + "slack_channel": "#support" + }} + ] + }} + }} + """ + os.environ["NETWORK_REGISTRY_JSON"] = json_string + yield + # Clean up the environment variable after all tests in the module have run + del os.environ["NETWORK_REGISTRY_JSON"] + + +@pytest.mark.local_server +def test_create_gateway( + set_network_json_env_var, + gateway_webserver, + datasite_webserver, + datasite_2_webserver, +): + assert isinstance(sy.gateways, sy.NetworkRegistry) + assert len(sy.gateways) == 1 + assert len(sy.gateways.all_networks) == 1 + assert sy.gateways.all_networks[0]["name"] == gateway_webserver.name + assert len(sy.gateways.online_networks) == 1 + assert sy.gateways.online_networks[0]["name"] == gateway_webserver.name + + gateway_client: GatewayClient = gateway_webserver.login( + email="info@openmined.org", + password="changethis", + ) + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + datasite_client: DatasiteClient = datasite_webserver.login( + email="info@openmined.org", + password="changethis", + ) + datasite_client_2: DatasiteClient = datasite_2_webserver.login( + email="info@openmined.org", + password="changethis", + ) + result = datasite_client.connect_to_gateway(handle=gateway_webserver) + assert isinstance(result, SyftSuccess) + result = datasite_client_2.connect_to_gateway(handle=gateway_webserver) + assert isinstance(result, SyftSuccess) + + time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) + connected_peers = gateway_client.api.services.network.get_all_peers() + assert len(connected_peers) == 2 + for peer in connected_peers: + assert peer.ping_status == ServerPeerConnectionStatus.ACTIVE + + # check the gateway client + client = gateway_webserver.client assert isinstance(client, GatewayClient) - assert client.metadata.node_type == NodeType.GATEWAY.value + assert client.metadata.server_type == ServerType.GATEWAY.value -@pytest.mark.local_node -def test_domain_connect_to_gateway(gateway, domain): +@pytest.mark.local_server +def test_datasite_connect_to_gateway( + gateway_association_request_auto_approval, datasite +): + association_request_auto_approval, gateway = ( + gateway_association_request_auto_approval + ) gateway_client: GatewayClient = gateway.login( email="info@openmined.org", password="changethis", ) - domain_client: DomainClient = domain.login( + datasite_client: DatasiteClient = datasite.login( email="info@openmined.org", password="changethis", ) - result = domain_client.connect_to_gateway(handle=gateway) - assert isinstance(result, SyftSuccess) + result = datasite_client.connect_to_gateway(handle=gateway) + + if association_request_auto_approval: + assert isinstance(result, SyftSuccess) + else: + assert isinstance(result, Request) + r = gateway_client.api.services.request.get_all()[-1].approve() + assert isinstance(r, SyftSuccess) # check priority all_peers = gateway_client.api.services.network.get_all_peers() - assert all_peers[0].node_routes[0].priority == 1 + assert all_peers[0].server_routes[0].priority == 1 - # Try via client approach - result_2 = domain_client.connect_to_gateway(via_client=gateway_client) + # Try again (via client approach) + result_2 = datasite_client.connect_to_gateway(via_client=gateway_client) assert isinstance(result_2, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 - proxy_domain_client = gateway_client.peers[0] - domain_peer = domain_client.peers[0] + proxy_datasite_client = gateway_client.peers[0] + datasite_peer = datasite_client.peers[0] - assert isinstance(proxy_domain_client, DomainClient) - assert isinstance(domain_peer, NodePeer) + assert isinstance(proxy_datasite_client, DatasiteClient) + assert isinstance(datasite_peer, ServerPeer) - # Domain's peer is a gateway and vice-versa - assert domain_peer.node_type == NodeType.GATEWAY + # Datasite's peer is a gateway and vice-versa + assert datasite_peer.server_type == ServerType.GATEWAY - assert gateway_client.name == domain_peer.name - assert domain_client.name == proxy_domain_client.name + assert gateway_client.name == datasite_peer.name + assert datasite_client.name == proxy_datasite_client.name - assert len(gateway_client.domains) == 1 + assert len(gateway_client.datasites) == 1 assert len(gateway_client.enclaves) == 0 - assert proxy_domain_client.metadata == domain_client.metadata - assert proxy_domain_client.user_role == ServiceRole.NONE + assert proxy_datasite_client.metadata == datasite_client.metadata + assert proxy_datasite_client.user_role == ServiceRole.NONE - domain_client = domain_client.login( + datasite_client = datasite_client.login( email="info@openmined.org", password="changethis" ) - proxy_domain_client = proxy_domain_client.login( + proxy_datasite_client = proxy_datasite_client.login( email="info@openmined.org", password="changethis" ) - assert proxy_domain_client.logged_in_user == "info@openmined.org" - assert proxy_domain_client.user_role == ServiceRole.ADMIN - assert proxy_domain_client.credentials == domain_client.credentials + assert proxy_datasite_client.logged_in_user == "info@openmined.org" + assert proxy_datasite_client.user_role == ServiceRole.ADMIN + assert proxy_datasite_client.credentials == datasite_client.credentials assert ( - proxy_domain_client.api.endpoints.keys() == domain_client.api.endpoints.keys() + proxy_datasite_client.api.endpoints.keys() + == datasite_client.api.endpoints.keys() ) # check priority all_peers = gateway_client.api.services.network.get_all_peers() - assert all_peers[0].node_routes[0].priority == 2 + assert all_peers[0].server_routes[0].priority == 1 -@pytest.mark.local_node -def test_domain_connect_to_gateway_routes_priority(gateway, domain, domain_2) -> None: +@pytest.mark.local_server +def test_datasite_connect_to_gateway_routes_priority( + gateway, datasite, datasite_2 +) -> None: """ - A test for routes' priority (PythonNodeRoute) - TODO: Add a similar test for HTTPNodeRoute + A test for routes' priority (PythonServerRoute) """ gateway_client: GatewayClient = gateway.login( email="info@openmined.org", password="changethis", ) - domain_client: DomainClient = domain.login( + datasite_client: DatasiteClient = datasite.login( email="info@openmined.org", password="changethis", ) - result = domain_client.connect_to_gateway(handle=gateway) + result = datasite_client.connect_to_gateway(handle=gateway) assert isinstance(result, SyftSuccess) all_peers = gateway_client.api.services.network.get_all_peers() assert len(all_peers) == 1 - domain_1_routes = all_peers[0].node_routes - assert domain_1_routes[0].priority == 1 + datasite_1_routes = all_peers[0].server_routes + assert datasite_1_routes[0].priority == 1 - # reconnect to the gateway. The route's priority should be increased by 1 - result = domain_client.connect_to_gateway(via_client=gateway_client) + # reconnect to the gateway + result = datasite_client.connect_to_gateway(via_client=gateway_client) assert isinstance(result, SyftSuccess) all_peers = gateway_client.api.services.network.get_all_peers() assert len(all_peers) == 1 - domain_1_routes = all_peers[0].node_routes - assert domain_1_routes[0].priority == 2 + datasite_1_routes = all_peers[0].server_routes + assert datasite_1_routes[0].priority == 1 - # another domain client connects to the gateway - domain_client_2: DomainClient = domain_2.login( + # another datasite client connects to the gateway + datasite_client_2: DatasiteClient = datasite_2.login( email="info@openmined.org", password="changethis", ) - result = domain_client_2.connect_to_gateway(handle=gateway) + result = datasite_client_2.connect_to_gateway(handle=gateway) assert isinstance(result, SyftSuccess) all_peers = gateway_client.api.services.network.get_all_peers() assert len(all_peers) == 2 for peer in all_peers: - if peer.name == domain_client.metadata.name: - assert peer.node_routes[0].priority == 2 - if peer.name == domain_client_2.metadata.name: - assert peer.node_routes[0].priority == 1 + assert peer.server_routes[0].priority == 1 -@pytest.mark.local_node +@pytest.mark.local_server def test_enclave_connect_to_gateway(faker: Faker, gateway, enclave): gateway_client = gateway.client enclave_client: EnclaveClient = enclave.client @@ -194,15 +313,15 @@ def test_enclave_connect_to_gateway(faker: Faker, gateway, enclave): enclave_peer = enclave_client.peers[0] assert isinstance(proxy_enclave_client, EnclaveClient) - assert isinstance(enclave_peer, NodePeer) + assert isinstance(enclave_peer, ServerPeer) assert gateway_client.name == enclave_peer.name assert enclave_client.name == proxy_enclave_client.name - # Domain's peer is a gateway and vice-versa - assert enclave_peer.node_type == NodeType.GATEWAY + # Datasite's peer is a gateway and vice-versa + assert enclave_peer.server_type == ServerType.GATEWAY - assert len(gateway_client.domains) == 0 + assert len(gateway_client.datasites) == 0 assert len(gateway_client.enclaves) == 1 assert proxy_enclave_client.metadata == enclave_client.metadata @@ -228,3 +347,40 @@ def test_enclave_connect_to_gateway(faker: Faker, gateway, enclave): assert ( proxy_enclave_client.api.endpoints.keys() == enclave_client.api.endpoints.keys() ) + + +@pytest.mark.local_server +@pytest.mark.parametrize( + "gateway_association_request_auto_approval", [False], indirect=True +) +def test_repeated_association_requests_peers_health_check( + gateway_association_request_auto_approval, datasite +): + _, gateway = gateway_association_request_auto_approval + gateway_client: GatewayClient = gateway.login( + email="info@openmined.org", + password="changethis", + ) + datasite_client: DatasiteClient = datasite.login( + email="info@openmined.org", + password="changethis", + ) + + result = datasite_client.connect_to_gateway(handle=gateway) + assert isinstance(result, Request) + + result = datasite_client.connect_to_gateway(handle=gateway) + assert isinstance(result, Request) + + r = gateway_client.api.services.request.get_all()[-1].approve() + assert isinstance(r, SyftSuccess) + + result = datasite_client.connect_to_gateway(handle=gateway) + assert isinstance(result, SyftSuccess) + + # the gateway client checks that the peer is associated + res = gateway_client.api.services.network.check_peer_association( + peer_id=datasite_client.id + ) + assert isinstance(res, ServerPeerAssociationStatus) + assert res.value == "PEER_ASSOCIATED" diff --git a/tests/integration/local/job_test.py b/tests/integration/local/job_test.py new file mode 100644 index 00000000000..4c56faa516e --- /dev/null +++ b/tests/integration/local/job_test.py @@ -0,0 +1,134 @@ +# stdlib + +# stdlib +from secrets import token_hex + +# third party +import pytest + +# syft absolute +import syft as sy +from syft import syft_function +from syft import syft_function_single_use +from syft.service.job.job_service import wait_until +from syft.service.job.job_stash import JobStatus +from syft.service.response import SyftError +from syft.service.response import SyftSuccess + + +@pytest.mark.local_server +def test_job_restart(job) -> None: + job.wait(timeout=2) + + assert wait_until( + lambda: job.fetched_status == JobStatus.PROCESSING + ), "Job not started" + assert wait_until( + lambda: all( + subjob.fetched_status == JobStatus.PROCESSING for subjob in job.subjobs + ) + ), "Subjobs not started" + + result = job.subjobs[0].restart() + assert isinstance(result, SyftError), "Should not restart subjob" + + result = job.restart() + assert isinstance(result, SyftError), "Should not restart running job" + + result = job.kill() + assert isinstance(result, SyftSuccess), "Should kill job" + assert job.fetched_status == JobStatus.INTERRUPTED + + result = job.restart() + assert isinstance(result, SyftSuccess), "Should restart idle job" + + job.wait(timeout=10) + + assert wait_until( + lambda: job.fetched_status == JobStatus.PROCESSING + ), "Job not restarted" + assert wait_until( + lambda: len( + [ + subjob.fetched_status == JobStatus.PROCESSING + for subjob in job.subjobs + if subjob.fetched_status != JobStatus.INTERRUPTED + ] + ) + == 2 + ), "Subjobs not restarted" + + +@pytest.fixture +def server(): + server = sy.orchestra.launch( + name=token_hex(8), + dev_mode=False, + thread_workers=False, + reset=True, + n_consumers=4, + create_producer=True, + server_side_type=sy.ServerSideType.LOW_SIDE, + ) + try: + yield server + finally: + server.python_server.cleanup() + server.land() + + +@pytest.fixture +def job(server): + client = server.login(email="info@openmined.org", password="changethis") + _ = client.register(name="a", email="aa@b.org", password="c", password_verify="c") + ds_client = server.login(email="aa@b.org", password="c") + + @syft_function() + def process_batch(): + # stdlib + import time + + while time.sleep(1) is None: + ... + + ds_client.code.submit(process_batch) + + @syft_function_single_use() + def process_all(datasite): + # stdlib + import time + + _ = datasite.launch_job(process_batch) + _ = datasite.launch_job(process_batch) + + while time.sleep(1) is None: + ... + + _ = ds_client.code.request_code_execution(process_all) + client.requests[-1].approve(approve_nested=True) + client = server.login(email="info@openmined.org", password="changethis") + job = client.code.process_all(blocking=False) + try: + yield job + finally: + job.kill() + + +@pytest.mark.local_server +def test_job_kill(job) -> None: + job.wait(timeout=2) + assert wait_until( + lambda: job.fetched_status == JobStatus.PROCESSING + ), "Job not started" + assert wait_until( + lambda: all( + subjob.fetched_status == JobStatus.PROCESSING for subjob in job.subjobs + ) + ), "Subjobs not started" + + result = job.subjobs[0].kill() + assert isinstance(result, SyftError), "Should not kill subjob" + + result = job.kill() + assert isinstance(result, SyftSuccess), "Should kill job" + assert job.fetched_status == JobStatus.INTERRUPTED diff --git a/tests/integration/local/request_multiple_nodes_test.py b/tests/integration/local/request_multiple_nodes_test.py index a7bb0643db1..b5443343551 100644 --- a/tests/integration/local/request_multiple_nodes_test.py +++ b/tests/integration/local/request_multiple_nodes_test.py @@ -1,6 +1,5 @@ # stdlib from secrets import token_hex -from textwrap import dedent # third party import numpy as np @@ -8,62 +7,56 @@ # syft absolute import syft as sy -from syft.service.job.job_stash import Job -from syft.service.job.job_stash import JobStatus @pytest.fixture(scope="function") -def node_1(): - node = sy.orchestra.launch( +def server_1(): + server = sy.orchestra.launch( name=token_hex(8), - node_side_type="low", + server_side_type="low", dev_mode=False, reset=True, - local_db=True, create_producer=True, n_consumers=1, - in_memory_workers=True, queue_port=None, ) - yield node - node.python_node.cleanup() - node.land() + yield server + server.python_server.cleanup() + server.land() @pytest.fixture(scope="function") -def node_2(): - node = sy.orchestra.launch( +def server_2(): + server = sy.orchestra.launch( name=token_hex(8), - node_side_type="high", + server_side_type="high", dev_mode=False, reset=True, - local_db=True, create_producer=True, n_consumers=1, - in_memory_workers=True, queue_port=None, ) - yield node - node.python_node.cleanup() - node.land() + yield server + server.python_server.cleanup() + server.land() @pytest.fixture(scope="function") -def client_do_1(node_1): - return node_1.login(email="info@openmined.org", password="changethis") +def client_do_1(server_1): + return server_1.login(email="info@openmined.org", password="changethis") @pytest.fixture(scope="function") -def client_do_2(node_2): - return node_2.login(email="info@openmined.org", password="changethis") +def client_do_2(server_2): + return server_2.login(email="info@openmined.org", password="changethis") @pytest.fixture(scope="function") -def client_ds_1(node_1, client_do_1): +def client_ds_1(server_1, client_do_1): client_do_1.register( name="test_user", email="test@us.er", password="1234", password_verify="1234" ) - return node_1.login(email="test@us.er", password="1234") + return server_1.login(email="test@us.er", password="1234") @pytest.fixture(scope="function") @@ -110,98 +103,3 @@ def dataset_2(client_do_2): client_do_2.upload_dataset(dataset) return client_do_2.datasets[0].assets[0] - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -@pytest.mark.local_node -def test_transfer_request_blocking( - client_ds_1, client_do_1, client_do_2, dataset_1, dataset_2 -): - @sy.syft_function_single_use(data=dataset_1) - def compute_sum(data) -> float: - return data.mean() - - compute_sum.code = dedent(compute_sum.code) - - client_ds_1.code.request_code_execution(compute_sum) - - # Submit + execute on second node - request_1_do = client_do_1.requests[0] - client_do_2.sync_code_from_request(request_1_do) - - # DO executes + syncs - client_do_2._fetch_api(client_do_2.credentials) - result_2 = client_do_2.code.compute_sum(data=dataset_2).get() - assert result_2 == dataset_2.data.mean() - res = request_1_do.accept_by_depositing_result(result_2) - assert isinstance(res, sy.SyftSuccess) - - # DS gets result blocking + nonblocking - result_ds_blocking = client_ds_1.code.compute_sum( - data=dataset_1, blocking=True - ).get() - - job_1_ds = client_ds_1.code.compute_sum(data=dataset_1, blocking=False) - assert isinstance(job_1_ds, Job) - assert job_1_ds == client_ds_1.code.compute_sum.jobs[-1] - assert job_1_ds.status == JobStatus.COMPLETED - - result_ds_nonblocking = job_1_ds.wait().get() - - assert result_ds_blocking == result_ds_nonblocking == dataset_2.data.mean() - - -@pytest.mark.flaky(reruns=3, reruns_delay=3) -@pytest.mark.local_node -def test_transfer_request_nonblocking( - client_ds_1, client_do_1, client_do_2, dataset_1, dataset_2 -): - @sy.syft_function_single_use(data=dataset_1) - def compute_mean(data) -> float: - return data.mean() - - compute_mean.code = dedent(compute_mean.code) - - client_ds_1.code.request_code_execution(compute_mean) - - # Submit + execute on second node - request_1_do = client_do_1.requests[0] - client_do_2.sync_code_from_request(request_1_do) - - client_do_2._fetch_api(client_do_2.credentials) - job_2 = client_do_2.code.compute_mean(data=dataset_2, blocking=False) - assert isinstance(job_2, Job) - - # Transfer back Job Info - job_2_info = job_2.info() - assert job_2_info.result is None - assert job_2_info.status is not None - res = request_1_do.sync_job(job_2_info) - assert isinstance(res, sy.SyftSuccess) - - # DS checks job info - job_1_ds = client_ds_1.code.compute_mean.jobs[-1] - assert job_1_ds.status == job_2.status - - # DO finishes + syncs job result - result = job_2.wait().get() - assert result == dataset_2.data.mean() - assert job_2.status == JobStatus.COMPLETED - - job_2_info_with_result = job_2.info(result=True) - res = request_1_do.accept_by_depositing_result(job_2_info_with_result) - assert isinstance(res, sy.SyftSuccess) - - # DS gets result blocking + nonblocking - result_ds_blocking = client_ds_1.code.compute_mean( - data=dataset_1, blocking=True - ).get() - - job_1_ds = client_ds_1.code.compute_mean(data=dataset_1, blocking=False) - assert isinstance(job_1_ds, Job) - assert job_1_ds == client_ds_1.code.compute_mean.jobs[-1] - assert job_1_ds.status == JobStatus.COMPLETED - - result_ds_nonblocking = job_1_ds.wait().get() - - assert result_ds_blocking == result_ds_nonblocking == dataset_2.data.mean() diff --git a/tests/integration/local/syft_function_test.py b/tests/integration/local/syft_function_test.py index 7ce54697ad0..941285e158a 100644 --- a/tests/integration/local/syft_function_test.py +++ b/tests/integration/local/syft_function_test.py @@ -1,7 +1,6 @@ # stdlib from secrets import token_hex import sys -from textwrap import dedent # third party import pytest @@ -11,48 +10,58 @@ from syft import ActionObject from syft import syft_function from syft import syft_function_single_use -from syft.service.response import SyftError +from syft.service.job.job_stash import Job from syft.service.response import SyftSuccess @pytest.fixture -def node(): - _node = sy.orchestra.launch( +def server(): + _server = sy.orchestra.launch( name=token_hex(8), dev_mode=True, reset=True, n_consumers=3, create_producer=True, queue_port=None, - in_memory_workers=True, - local_db=False, ) # startup code here - yield _node + yield _server # Cleanup code - _node.python_node.cleanup() - _node.land() + _server.python_server.cleanup() + _server.land() # @pytest.mark.flaky(reruns=3, reruns_delay=3) @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -def test_nested_jobs(node): - client = node.login(email="info@openmined.org", password="changethis") +@pytest.mark.local_server +def test_nested_jobs(server): + client = server.login(email="info@openmined.org", password="changethis") - res = client.register(name="a", email="aa@b.org", password="c", password_verify="c") + new_user_email = "aa@b.org" + res = client.register( + name="a", email=new_user_email, password="c", password_verify="c" + ) assert isinstance(res, SyftSuccess) - ds_client = node.login(email="aa@b.org", password="c") + ## Dataset x = ActionObject.from_obj([1, 2]) x_ptr = x.send(client) + search_result = [u for u in client.users.get_all() if u.email == new_user_email] + assert len(search_result) == 1 + + new_ds_user = search_result[0] + new_ds_user.allow_mock_execution() + + # Login as data scientist + ds_client = server.login(email=new_user_email, password="c") + ## aggregate function @sy.syft_function() def aggregate_job(job_results): return sum(job_results) - aggregate_job.code = dedent(aggregate_job.code) res = ds_client.code.submit(aggregate_job) ## Batch function @@ -61,37 +70,35 @@ def process_batch(batch): print(f"starting batch {batch}") return batch + 1 - process_batch.code = dedent(process_batch.code) - res = ds_client.code.submit(process_batch) print(res) ## Main function @syft_function_single_use(x=x_ptr) - def process_all(domain, x): + def process_all(datasite, x): job_results = [] for elem in x: - batch_job = domain.launch_job(process_batch, batch=elem) + batch_job = datasite.launch_job(process_batch, batch=elem) job_results += [batch_job.result] - job = domain.launch_job(aggregate_job, job_results=job_results) + job = datasite.launch_job(aggregate_job, job_results=job_results) return job.result - process_all.code = dedent(process_all.code) assert process_all.worker_pool_name is None # Approve & run res = ds_client.code.request_code_execution(process_all) print(res) - assert not isinstance(res, SyftError) assert ds_client.code[-1].worker_pool_name is not None client.requests[-1].approve(approve_nested=True) job = ds_client.code.process_all(x=x_ptr, blocking=False) - job.wait(timeout=0) + assert isinstance(job, Job) + + job.wait(timeout=5) assert len(job.subjobs) == 3 diff --git a/tests/integration/local/syft_worker_deletion_test.py b/tests/integration/local/syft_worker_deletion_test.py new file mode 100644 index 00000000000..d5f88c22181 --- /dev/null +++ b/tests/integration/local/syft_worker_deletion_test.py @@ -0,0 +1,128 @@ +# stdlib +import operator +import time +from typing import Any + +# third party +import numpy as np +import pytest + +# syft absolute +import syft as sy +from syft.orchestra import ServerHandle +from syft.service.job.job_stash import JobStatus +from syft.service.response import SyftError +from syft.types.errors import SyftException + +# relative +from .conftest import matrix + +# equivalent to adding this mark to every test in this file +pytestmark = pytest.mark.local_server + + +SERVER_ARGS_TEST_CASES = { + "n_consumers": [1], + "dev_mode": [True, False], + "thread_workers": [True, False], + "create_producer": [True], +} + + +class FlakyMark(RuntimeError): + """To mark a flaky part of a test to use with @pytest.mark.flaky""" + + pass + + +@pytest.mark.flaky(reruns=3, rerun_delay=1, only_rerun=["FlakyMark"]) +@pytest.mark.parametrize( + "server_args", + matrix( + **{**SERVER_ARGS_TEST_CASES, "n_consumers": [3]}, + ), +) +@pytest.mark.parametrize("force", [True, False]) +def test_delete_idle_worker( + server: ServerHandle, force: bool, server_args: dict[str, Any] +) -> None: + client = server.login(email="info@openmined.org", password="changethis") + original_workers = client.worker.get_all() + worker_to_delete = max(original_workers, key=operator.attrgetter("name")) + + client.worker.delete(worker_to_delete.id, force=force) + + if force: + assert ( + len(workers := client.worker.get_all()) == len(original_workers) - 1 + and all(w.id != worker_to_delete.id for w in workers) + ), f"{workers.message=} {server_args=} {[(w.id, w.name) for w in original_workers]}" + return + + start = time.time() + while True: + workers = client.worker.get_all() + if isinstance(workers, SyftError): + raise FlakyMark( + f"`workers = client.worker.get_all()` failed.\n" + f"{workers.message=} {server_args=} {[(w.id, w.name) for w in original_workers]}" + ) + + if len(workers) == len(original_workers) - 1 and all( + w.id != worker_to_delete.id for w in workers + ): + break + if time.time() - start > 3: + raise TimeoutError("Worker did not get removed from stash.") + + +@pytest.mark.parametrize("server_args", matrix(**SERVER_ARGS_TEST_CASES)) +@pytest.mark.parametrize("force", [True, False]) +def test_delete_worker(server: ServerHandle, force: bool) -> None: + client = server.login(email="info@openmined.org", password="changethis") + data = np.array([1, 2, 3]) + data_action_obj = sy.ActionObject.from_obj(data) + data_pointer = data_action_obj.send(client) + + @sy.syft_function_single_use(data=data_pointer) + def compute_mean(data): + # stdlib + import time + + time.sleep(1.5) + return data.mean() + + client.code.request_code_execution(compute_mean) + client.requests[-1].approve() + + job = client.code.compute_mean(data=data_pointer, blocking=False) + + start = time.time() + while True: + if (syft_worker_id := client.jobs.get_all()[0].job_worker_id) is not None: + break + if time.time() - start > 5: + raise TimeoutError("Job did not get picked up by any worker.") + + client.worker.delete(syft_worker_id, force=force) + + if not force and len(client.worker.get_all()) > 0: + assert client.worker.get(syft_worker_id).to_be_deleted + job.wait(timeout=30) + + job = client.jobs[0] + if force: + assert job.status in (JobStatus.COMPLETED, JobStatus.INTERRUPTED) + else: + assert job.status == JobStatus.COMPLETED + + start = time.time() + while True: + try: + client.worker.get(syft_worker_id) + except SyftException: + break + if time.time() - start > 5: + raise TimeoutError("Worker did not get removed from stash.") + + assert len(client.worker.get_all()) == 0 diff --git a/tests/integration/local/twin_api_endpoint_test.py b/tests/integration/local/twin_api_endpoint_test.py new file mode 100644 index 00000000000..eac918288d2 --- /dev/null +++ b/tests/integration/local/twin_api_endpoint_test.py @@ -0,0 +1,203 @@ +# stdlib +from collections.abc import Callable +import os +import sys +import time + +# third party +from faker import Faker +import pytest + +# syft absolute +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.service.api.api import TwinAPIEndpoint +from syft.service.response import SyftError +from syft.service.response import SyftSuccess + +JOB_TIMEOUT = 20 + + +def get_external_registry() -> str: + """Get the external registry to use for the worker image.""" + return os.environ.get("EXTERNAL_REGISTRY", "docker.io") + + +def get_worker_tag() -> str: + """Get the worker tag to use for the worker image.""" + return os.environ.get("PRE_BUILT_WORKER_TAG", f"openmined/backend:{sy.__version__}") + + +def public_function( + context, +) -> str: + return "Public Function Execution" + + +def private_function( + context, +) -> str: + return "Private Function Execution" + + +def get_twin_api_endpoint(worker_pool_name: str) -> TwinAPIEndpoint: + """Get a twin API endpoint with a custom worker pool name.""" + + public_func = sy.api_endpoint_method(settings={"Hello": "Public"})(public_function) + pvt_func = sy.api_endpoint_method(settings={"Hello": "Private"})(private_function) + + new_endpoint = sy.TwinAPIEndpoint( + path="second.query", + mock_function=public_func, + private_function=pvt_func, + description="Lore ipsulum ...", + worker_pool_name=worker_pool_name, + ) + + return new_endpoint + + +faker = Faker() + + +def get_ds_client(client: DatasiteClient) -> DatasiteClient: + """Get a datasite client with a registered user.""" + pwd = faker.password() + email = faker.email() + client.register( + name=faker.name(), + email=email, + password=pwd, + password_verify=pwd, + ) + return client.login(email=email, password=pwd) + + +def get_syft_function(worker_pool_name: str, endpoint: Callable) -> Callable: + @sy.syft_function_single_use(endpoint=endpoint, worker_pool_name=worker_pool_name) + def job_function(endpoint): + return endpoint() + + return job_function + + +def submit_project(ds_client: DatasiteClient, syft_function: Callable): + # Create a new project + new_project = sy.Project( + name=f"Project - {faker.text(max_nb_chars=20)}", + description="Hi, I want to calculate the trade volume in million's with my cool code.", + members=[ds_client], + ) + + result = new_project.create_code_request(syft_function, ds_client) + assert isinstance(result, SyftSuccess) + + +@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") +# @pytest.mark.local_server +def test_twin_api_with_custom_worker(full_high_worker): + high_client = full_high_worker.login( + email="info@openmined.org", password="changethis" + ) + + worker_pool_name = "custom-worker-pool" + + external_registry = get_external_registry() + worker_docker_tag = get_worker_tag() + + # Create pre-built worker image + docker_config = sy.PrebuiltWorkerConfig( + tag=f"{external_registry}/{worker_docker_tag}" + ) + + # Submit the worker image + submit_result = high_client.api.services.worker_image.submit( + worker_config=docker_config + ) + + # Check if the submission was successful + assert not isinstance(submit_result, SyftError), submit_result + + # Get the worker image + worker_image = high_client.images.get_all()[-1] + + launch_result = high_client.api.services.worker_pool.launch( + pool_name=worker_pool_name, + image_uid=worker_image.id, + num_workers=2, + ) + + # Check if the worker pool was launched successfully + assert not isinstance(launch_result, SyftError), launch_result + + # Add the twin API endpoint + twin_api_endpoint = get_twin_api_endpoint(worker_pool_name) + twin_endpoint_result = high_client.api.services.api.add(endpoint=twin_api_endpoint) + + # Check if the twin API endpoint was added successfully + assert isinstance(twin_endpoint_result, SyftSuccess) + + # validate the number of endpoints + assert len(high_client.api.services.api.api_endpoints()) == 1 + + # refresh the client + high_client.refresh() + + # Get datasite client + high_client_ds = get_ds_client(high_client) + + # Execute the public endpoint + mock_endpoint_result = high_client_ds.api.services.second.query() + assert mock_endpoint_result == "Public Function Execution" + + # Get the syft function + custom_function = get_syft_function( + worker_pool_name, high_client_ds.api.services.second.query + ) + + # Submit the project + submit_project(high_client_ds, custom_function) + + ds_email = high_client_ds.logged_in_user + + # Approve the request + for r in high_client.requests.get_all(): + if r.requesting_user_email == ds_email: + r.approve() + + private_func_result_job = high_client_ds.code.job_function( + endpoint=high_client_ds.api.services.second.query, blocking=False + ) + + # Wait for the job to complete + job_start_time = time.time() + while True: + # Check if the job is resolved + _ = private_func_result_job.resolved + + if private_func_result_job.resolve: + break + + # Check if the job is timed out + if time.time() - job_start_time > JOB_TIMEOUT: + raise TimeoutError(f"Job did not complete in given time: {JOB_TIMEOUT}") + time.sleep(1) + + # Check if the job worker is the same as the worker pool name + private_func_job = high_client_ds.jobs.get(private_func_result_job.id) + + assert private_func_job is not None + + # Check if job is assigned to a worker + assert private_func_job.job_worker_id is not None + + # Check if the job worker is the same as the worker pool name + assert private_func_job.worker.worker_pool_name == worker_pool_name + + # Check if the job was successful + assert private_func_result_job.resolved + private_func_result = private_func_result_job.result + + assert not isinstance(private_func_result, SyftError), private_func_result + + assert private_func_result.get() == "Private Function Execution" diff --git a/tests/integration/local/twin_api_sync_test.py b/tests/integration/local/twin_api_sync_test.py new file mode 100644 index 00000000000..f8c137e5967 --- /dev/null +++ b/tests/integration/local/twin_api_sync_test.py @@ -0,0 +1,186 @@ +# stdlib +import sys + +# third party +import pytest + +# syft absolute +import syft +import syft as sy +from syft.client.datasite_client import DatasiteClient +from syft.client.syncing import compare_clients +from syft.client.syncing import resolve +from syft.service.job.job_stash import JobStatus +from syft.service.response import SyftError +from syft.service.response import SyftSuccess +from syft.types.errors import SyftException + + +def compare_and_resolve(*, from_client: DatasiteClient, to_client: DatasiteClient): + diff_state_before = compare_clients(from_client, to_client) + for obj_diff_batch in diff_state_before.batches: + widget = resolve(obj_diff_batch) + widget.click_share_all_private_data() + res = widget.click_sync() + assert isinstance(res, SyftSuccess) + from_client.refresh() + to_client.refresh() + diff_state_after = compare_clients(from_client, to_client) + return diff_state_before, diff_state_after + + +def run_and_accept_result(client): + job_high = client.code.compute(blocking=True) + client.requests[0].accept_by_depositing_result(job_high) + return job_high + + +def get_ds_client(client: DatasiteClient) -> DatasiteClient: + client.register( + name="a", + email="a@a.com", + password="asdf", + password_verify="asdf", + ) + return client.login(email="a@a.com", password="asdf") + + +@sy.api_endpoint_method() +def mock_function(context) -> str: + return -42 + + +@sy.api_endpoint_method() +def private_function(context) -> str: + return 42 + + +@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") +@pytest.mark.local_server +def test_twin_api_integration(full_high_worker, full_low_worker): + low_client = full_low_worker.login( + email="info@openmined.org", password="changethis" + ) + high_client = full_high_worker.login( + email="info@openmined.org", password="changethis" + ) + client_low_ds = get_ds_client(low_client) + + new_endpoint = sy.TwinAPIEndpoint( + path="testapi.query", + private_function=private_function, + mock_function=mock_function, + description="", + ) + high_client.api.services.api.add(endpoint=new_endpoint) + high_client.refresh() + high_private_result = high_client.api.services.testapi.query.private() + + job = high_client.api.services.job.get_all()[0] + private_job_id = job.id + + diff_before, diff_after = compare_and_resolve( + from_client=high_client, to_client=low_client + ) + assert not diff_before.is_same + assert diff_after.is_same + + client_low_ds.refresh() + + @syft.syft_function_single_use( + query=client_low_ds.api.services.testapi.query, + ) + def compute(query): + return query() + + _ = client_low_ds.code.request_code_execution(compute) + + diff_before, diff_after = compare_and_resolve( + from_client=low_client, to_client=high_client + ) + + job_high = high_client.code.compute(query=high_client.api.services.testapi.query) + high_client.requests[0].deposit_result(job_high) + diff_before, diff_after = compare_and_resolve( + from_client=high_client, to_client=low_client + ) + client_low_ds.refresh() + res = client_low_ds.code.compute(query=client_low_ds.api.services.testapi.query) + assert res.syft_action_data == high_private_result + assert diff_after.is_same + + # verify that ds cannot access private job + with pytest.raises(SyftException): + assert client_low_ds.api.services.job.get(private_job_id) is None + with pytest.raises(SyftException): + assert low_client.api.services.job.get(private_job_id) is None + + # we only sync the mock function, we never sync the private function to the low side + mock_res = low_client.api.services.testapi.query.mock() + assert mock_res == -42 + + with pytest.raises(SyftException): + low_client.api.services.testapi.query.private() + # verify updating twin api endpoint works + + timeout_before = ( + full_low_worker.python_server.services.api.stash.get_all( + credentials=full_low_worker.client.credentials, has_permission=True + ) + .ok()[0] + .endpoint_timeout + ) + expected_timeout_after = timeout_before + 1 + + high_client.custom_api.update( + endpoint_path="testapi.query", endpoint_timeout=expected_timeout_after + ) + widget = sy.sync(from_client=high_client, to_client=low_client) + widget._sync_all() + + timeout_after = ( + full_low_worker.python_server.services.api.stash.get_all( + credentials=full_low_worker.client.credentials, has_permission=True + ) + .ok()[0] + .endpoint_timeout + ) + assert ( + timeout_after == expected_timeout_after + ), "Timeout should be updated on low side." + + +@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") +@pytest.mark.local_server +def test_function_error(full_low_worker) -> None: + root_datasite_client = full_low_worker.login( + email="info@openmined.org", password="changethis" + ) + root_datasite_client.register( + name="data-scientist", + email="test_user@openmined.org", + password="0000", + password_verify="0000", + ) + ds_client = root_datasite_client.login( + email="test_user@openmined.org", + password="0000", + ) + + users = root_datasite_client.users.get_all() + + @sy.syft_function_single_use() + def compute_sum(): + raise RuntimeError + + ds_client.api.services.code.request_code_execution(compute_sum) + + users[-1].allow_mock_execution() + with pytest.raises(SyftException): + result = ds_client.api.services.code.compute_sum(blocking=True) + + job_info = ds_client.api.services.code.compute_sum(blocking=False) + result = job_info.wait(timeout=10) + # TODO: we should split out SyftError in a different property for Jobs + assert isinstance(result, SyftError) + assert job_info.status == JobStatus.ERRORED diff --git a/tests/integration/local/user_update_test.py b/tests/integration/local/user_update_test.py new file mode 100644 index 00000000000..3772a05b5ac --- /dev/null +++ b/tests/integration/local/user_update_test.py @@ -0,0 +1,59 @@ +# stdlib +from typing import TypedDict +from uuid import uuid4 + +# third party +import pytest + +# syft absolute +from syft.orchestra import ClientAlias +from syft.service.response import SyftError +from syft.service.user.user_roles import ServiceRole + +# relative +from .conftest import matrix + +pytestmark = pytest.mark.local_server + + +class UserCreateArgs(TypedDict): + name: str + email: str + password: str + password_verify: str + + +@pytest.fixture +def user_create_args() -> UserCreateArgs: + return { + "name": uuid4().hex, + "email": f"{uuid4().hex}@example.org", + "password": (pw := uuid4().hex), + "password_verify": pw, + } + + +@pytest.fixture +def user(client: ClientAlias, user_create_args: UserCreateArgs) -> ClientAlias: + res = client.register(**user_create_args) + assert not isinstance(res, SyftError) + + return client.login( + email=user_create_args["email"], + password=user_create_args["password"], + ) + + +@pytest.mark.parametrize("server_args", matrix(port=[None, "auto"])) +def test_user_update_role_str(client: ClientAlias, user: ClientAlias) -> None: + res = client.users.update(uid=user.account.id, role="admin") + assert not isinstance(res, SyftError) + + user.refresh() + assert user.account.role is ServiceRole.ADMIN + + res = user.account.update(role="data_scientist") + assert not isinstance(res, SyftError) + + user.refresh() + assert user.account.role is ServiceRole.DATA_SCIENTIST diff --git a/tests/integration/network/client_test.py b/tests/integration/network/client_test.py index 018d66eab90..113ee79ea8d 100644 --- a/tests/integration/network/client_test.py +++ b/tests/integration/network/client_test.py @@ -3,19 +3,19 @@ # syft absolute import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.client.gateway_client import GatewayClient -DOMAIN_PORT = 9082 +DATASITE_PORT = 9082 NETWORK_PORT = 9081 @pytest.mark.parametrize( - "node_metadata", [(NETWORK_PORT, GatewayClient), (DOMAIN_PORT, DomainClient)] + "server_metadata", [(NETWORK_PORT, GatewayClient), (DATASITE_PORT, DatasiteClient)] ) @pytest.mark.network -def test_client_type(node_metadata): - port, client_type = node_metadata +def test_client_type(server_metadata): + port, client_type = server_metadata client = sy.login(port=port, email="info@openmined.org", password="changethis") assert isinstance(client, client_type) diff --git a/tests/integration/network/gateway_test.py b/tests/integration/network/gateway_test.py index 182a7e65344..15313d7934c 100644 --- a/tests/integration/network/gateway_test.py +++ b/tests/integration/network/gateway_test.py @@ -1,128 +1,968 @@ # stdlib -from textwrap import dedent +import itertools +import os +import time import uuid # third party import numpy as np +import pytest # syft absolute import syft as sy -from syft.abstract_node import NodeType -from syft.client.domain_client import DomainClient +from syft.abstract_server import ServerType +from syft.client.client import HTTPConnection +from syft.client.client import SyftClient +from syft.client.datasite_client import DatasiteClient from syft.client.gateway_client import GatewayClient -from syft.service.network.node_peer import NodePeer +from syft.client.registry import NetworkRegistry +from syft.service.network.association_request import AssociationRequestChange +from syft.service.network.network_service import ServerPeerAssociationStatus +from syft.service.network.routes import HTTPServerRoute +from syft.service.network.routes import ServerRouteType +from syft.service.network.server_peer import ServerPeer +from syft.service.network.server_peer import ServerPeerConnectionStatus +from syft.service.network.utils import PeerHealthCheckTask from syft.service.request.request import Request +from syft.service.response import SyftError from syft.service.response import SyftSuccess from syft.service.user.user_roles import ServiceRole -def test_domain_connect_to_gateway(domain_1_port, gateway_port): - gateway_client: GatewayClient = sy.login_as_guest(port=gateway_port) +@pytest.fixture(scope="function") +def set_env_var( + gateway_port: int, + gateway_server: str = "testgateway1", + host_or_ip: str = "localhost", + protocol: str = "http", +): + """Set the environment variable for the network registry JSON string.""" + json_string = f""" + {{ + "2.0.0": {{ + "gateways": [ + {{ + "name": "{gateway_server}", + "host_or_ip": "{host_or_ip}", + "protocol": "{protocol}", + "port": {gateway_port}, + "admin_email": "support@openmined.org", + "website": "https://www.openmined.org/", + "slack": "https://slack.openmined.org/", + "slack_channel": "#support" + }} + ] + }} + }} + """ + os.environ["NETWORK_REGISTRY_JSON"] = json_string + yield + # Clean up the environment variable after all tests in the module have run + del os.environ["NETWORK_REGISTRY_JSON"] + + +def _random_hash() -> str: + return uuid.uuid4().hex[:16] + + +def _remove_existing_peers(client: SyftClient) -> SyftSuccess | SyftError: + peers: list[ServerPeer] | SyftError = client.api.services.network.get_all_peers() + for peer in peers: + client.api.services.network.delete_peer_by_id(peer.id) + return SyftSuccess(message="All peers removed.") + + +@pytest.mark.skip(reason="Will be tested when the network registry URL works.") +def test_network_registry_from_url() -> None: + assert isinstance(sy.gateways, NetworkRegistry) + assert len(sy.gateways.all_networks) == len(sy.gateways.online_networks) == 1 + - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" +@pytest.mark.network +def test_network_registry_env_var(set_env_var) -> None: + assert isinstance(sy.gateways, NetworkRegistry) + assert len(sy.gateways.all_networks) == len(sy.gateways.online_networks) == 1 + assert isinstance(sy.gateways[0], GatewayClient) + assert isinstance(sy.gateways[0].connection, HTTPConnection) + + +@pytest.mark.network +def test_datasite_connect_to_gateway( + set_env_var, datasite_1_port: int, gateway_port: int +) -> None: + # check if we can see the online gateways + assert isinstance(sy.gateways, NetworkRegistry) + assert len(sy.gateways.all_networks) == len(sy.gateways.online_networks) == 1 + + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) - result = domain_client.connect_to_gateway(gateway_client) - assert isinstance(result, SyftSuccess) + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Disable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=False) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, Request) + assert isinstance(result.changes[0], AssociationRequestChange) + + assert len(datasite_client.peers) == 1 + assert len(gateway_client.peers) == 0 + + gateway_client_root = gateway_client.login( + email="info@openmined.org", password="changethis" + ) + res = gateway_client_root.api.services.request.get_all()[-1].approve() - assert len(domain_client.peers) == 1 assert len(gateway_client.peers) == 1 - proxy_domain_client = gateway_client.peers[0] - domain_peer = domain_client.peers[0] + time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) + + proxy_datasite_client = gateway_client.peers[0] + datasite_peer = datasite_client.peers[0] - assert isinstance(proxy_domain_client, DomainClient) - assert isinstance(domain_peer, NodePeer) + assert isinstance(proxy_datasite_client, DatasiteClient) + assert isinstance(datasite_peer, ServerPeer) - # Domain's peer is a gateway and vice-versa - assert domain_peer.node_type == NodeType.GATEWAY + # Datasite's peer is a gateway and vice-versa + assert datasite_peer.server_type == ServerType.GATEWAY - assert gateway_client.name == domain_peer.name - assert domain_client.name == proxy_domain_client.name + assert gateway_client.name == datasite_peer.name + assert datasite_client.name == proxy_datasite_client.name - assert len(gateway_client.domains) == 1 + assert len(gateway_client.datasites) == 1 assert len(gateway_client.enclaves) == 0 - assert proxy_domain_client.metadata == domain_client.metadata - assert proxy_domain_client.user_role == ServiceRole.NONE + assert proxy_datasite_client.metadata == datasite_client.metadata + assert proxy_datasite_client.user_role == ServiceRole.NONE - domain_client = domain_client.login( + datasite_client = datasite_client.login( email="info@openmined.org", password="changethis" ) - proxy_domain_client = proxy_domain_client.login( + proxy_datasite_client = proxy_datasite_client.login( email="info@openmined.org", password="changethis" ) - assert proxy_domain_client.logged_in_user == "info@openmined.org" - assert proxy_domain_client.user_role == ServiceRole.ADMIN - assert proxy_domain_client.credentials == domain_client.credentials + assert proxy_datasite_client.logged_in_user == "info@openmined.org" + assert proxy_datasite_client.user_role == ServiceRole.ADMIN + assert proxy_datasite_client.credentials == datasite_client.credentials assert ( - proxy_domain_client.api.endpoints.keys() == domain_client.api.endpoints.keys() + proxy_datasite_client.api.endpoints.keys() + == datasite_client.api.endpoints.keys() ) + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +@pytest.mark.skip(reason="Disabled since the dataset search functionality was removed") +def test_dataset_search(set_env_var, gateway_port: int, datasite_1_port: int) -> None: + """ + Scenario: Connecting a datasite server to a gateway server. The datasite + client then upload a dataset, which should be searchable by the syft network. + People who install syft can see the mock data and metadata of the uploaded datasets + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) -def random_hash() -> str: - return uuid.uuid4().hex[:16] - + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) -def test_domain_gateway_user_code(domain_1_port, gateway_port): - gateway_client: GatewayClient = sy.login_as_guest(port=gateway_port) + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" - ) + # connect the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + # the datasite client uploads a dataset input_data = np.array([1, 2, 3]) mock_data = np.array([4, 5, 6]) - - asset_name = random_hash() + asset_name = _random_hash() asset = sy.Asset(name=asset_name, data=input_data, mock=mock_data) - dataset_name = random_hash() + dataset_name = _random_hash() dataset = sy.Dataset(name=dataset_name, asset_list=[asset]) + dataset_res = datasite_client.upload_dataset(dataset) + assert isinstance(dataset_res, SyftSuccess) + + # since dataset search is done by checking from the online datasites, + # we need to wait to make sure peers health check is done + # time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) + # test if the dataset can be searched by the syft network + # right_search = sy.search(dataset_name) + # assert isinstance(right_search, SearchResults) + # assert len(right_search) == 1 + # dataset = right_search[0] + # assert isinstance(dataset, Dataset) + # assert len(dataset.assets) == 1 + # assert isinstance(dataset.assets[0].mock, np.ndarray) + # assert dataset.assets[0].data is None + + # # search a wrong dataset should return an empty list + # wrong_search = sy.search(_random_hash()) + # assert len(wrong_search) == 0 + + # # the datasite client delete the dataset + # datasite_client.api.services.dataset.delete(uid=dataset.id) + + # # Remove existing peers + # assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + # assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.skip(reason="Possible bug") +@pytest.mark.network +def test_datasite_gateway_user_code( + set_env_var, datasite_1_port: int, gateway_port: int +) -> None: + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) - dataset_res = domain_client.upload_dataset(dataset) + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + # the datasite client uploads a dataset + input_data = np.array([1, 2, 3]) + mock_data = np.array([4, 5, 6]) + asset_name = _random_hash() + asset = sy.Asset(name=asset_name, data=input_data, mock=mock_data) + dataset_name = _random_hash() + dataset = sy.Dataset(name=dataset_name, asset_list=[asset]) + dataset_res = datasite_client.upload_dataset(dataset) assert isinstance(dataset_res, SyftSuccess) - user_create_res = domain_client.register( - name="Sheldon Cooper", - email="sheldon@caltech.edu", + # the datasite client registers a data data scientist account on its datasite + random_name: str = str(_random_hash()) + user_create_res = datasite_client.register( + name=random_name, + email=f"{random_name}@caltech.edu", password="changethis", password_verify="changethis", institution="Caltech", website="https://www.caltech.edu/", ) - assert isinstance(user_create_res, SyftSuccess) - gateway_con_res = domain_client.connect_to_gateway(gateway_client) - assert isinstance(gateway_con_res, SyftSuccess) + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) - proxy_client = gateway_client.domains[0] + # the datasite client connects to the gateway + gateway_con_res = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(gateway_con_res, SyftSuccess) + # get the proxy client to the datasite, login to the data scientist account + proxy_client = gateway_client.datasites[0] proxy_ds = proxy_client.login( - email="sheldon@caltech.edu", password="changethis", password_verify="changethis" + email=f"{random_name}@caltech.edu", + password="changethis", + password_verify="changethis", ) + # submits a request for code execution asset = proxy_ds.datasets[0].assets[0] @sy.syft_function_single_use(asset=asset) def mock_function(asset): return asset + 1 - mock_function.code = dedent(mock_function.code) - request_res = proxy_ds.code.request_code_execution(mock_function) assert isinstance(request_res, Request) - assert len(domain_client.requests.get_all()) == 1 - - req_approve_res = domain_client.requests[-1].approve() + # datasite client approves the request + assert len(datasite_client.requests.get_all()) == 1 + req_approve_res = datasite_client.requests[-1].approve() assert isinstance(req_approve_res, SyftSuccess) + # the proxy data scientist client executes the code and gets the result result = proxy_ds.code.mock_function(asset=asset) - final_result = result.get() - assert (final_result == input_data + 1).all() + + # the datasite client delete the dataset + datasite_client.api.services.dataset.delete(uid=dataset.id) + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_deleting_peers(set_env_var, datasite_1_port: int, gateway_port: int) -> None: + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # clean up before test + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + assert len(datasite_client.peers) == 1 + assert len(gateway_client.peers) == 1 + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + # check that removing peers work as expected + assert len(datasite_client.peers) == 0 + assert len(gateway_client.peers) == 0 + + # reconnect the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + assert len(datasite_client.peers) == 1 + assert len(gateway_client.peers) == 1 + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + # check that removing peers work as expected + assert len(datasite_client.peers) == 0 + assert len(gateway_client.peers) == 0 + + +@pytest.mark.network +def test_add_route(set_env_var, gateway_port: int, datasite_1_port: int) -> None: + """ + Test the network service's `add_route` functionalities to add routes directly + for a self datasite. + Scenario: Connect a datasite to a gateway. The gateway adds 2 new routes to the datasite + and check their priorities get updated. + Check for the gateway if the proxy client to connect to the datasite uses the + route with the highest priority. + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + assert len(datasite_client.peers) == 1 + assert len(gateway_client.peers) == 1 + + # add a new route to connect to the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + res = gateway_client.api.services.network.add_route( + peer_verify_key=datasite_peer.verify_key, route=new_route + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 2 + assert datasite_peer.server_routes[-1].port == new_route.port + + # adding another route to the datasite + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + res = gateway_client.api.services.network.add_route( + peer_verify_key=datasite_peer.verify_key, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 3 + assert datasite_peer.server_routes[-1].port == new_route2.port + assert datasite_peer.server_routes[-1].priority == 3 + + # add an existed route to the datasite. Its priority should not be updated + res = gateway_client.api.services.network.add_route( + peer_verify_key=datasite_peer.verify_key, route=datasite_peer.server_routes[0] + ) + assert "route already exists" in res.message + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 3 + assert datasite_peer.server_routes[0].priority == 1 + + # getting the proxy client using the current highest priority route should + # be successful since now we pick the oldest route (port 9082 with priority 1) + # to have the highest priority by default + proxy_datasite_client = gateway_client.peers[0] + assert isinstance(proxy_datasite_client, DatasiteClient) + + # the routes the datasite client uses to connect to the gateway should stay the same + gateway_peer: ServerPeer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 1 + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_delete_route(set_env_var, gateway_port: int, datasite_1_port: int) -> None: + """ + Scenario: + Connect a datasite to a gateway. The gateway adds a new route to the datasite + and then deletes it. + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + assert len(datasite_client.peers) == 1 + assert len(gateway_client.peers) == 1 + + # add a new route to connect to the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + res = gateway_client.api.services.network.add_route( + peer_verify_key=datasite_peer.verify_key, route=new_route + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 2 + assert datasite_peer.server_routes[-1].port == new_route.port + + # delete the added route + res = gateway_client.api.services.network.delete_route( + peer_verify_key=datasite_peer.verify_key, route=new_route + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 1 + assert datasite_peer.server_routes[-1].port == datasite_1_port + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_add_route_on_peer( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: + """ + Test the `add_route_on_peer` of network service. + Connect a datasite to a gateway. + The gateway adds 2 new routes for itself remotely on the datasite and check their priorities. + Then the datasite adds a route to itself for the gateway. + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Remove existing peers + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + assert len(datasite_client.peers) == 1 + assert len(gateway_client.peers) == 1 + gateway_peer: ServerPeer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 1 + assert gateway_peer.server_routes[-1].priority == 1 + + # adding a new route for the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + res = gateway_client.api.services.network.add_route_on_peer( + peer=datasite_peer, route=new_route + ) + assert isinstance(res, SyftSuccess) + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] + assert len(gateway_peer.server_routes) == 2 + assert gateway_peer.server_routes[-1].port == new_route.port + assert gateway_peer.server_routes[-1].priority == 2 + + # adding another route for the datasite + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + res = gateway_client.api.services.network.add_route_on_peer( + peer=datasite_peer, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] + assert len(gateway_peer.server_routes) == 3 + assert gateway_peer.server_routes[-1].port == new_route2.port + assert gateway_peer.server_routes[-1].priority == 3 + + # the datasite calls `add_route_on_peer` to to add a route to itself for the gateway + assert len(datasite_peer.server_routes) == 1 + res = datasite_client.api.services.network.add_route_on_peer( + peer=datasite_client.peers[0], route=new_route + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert datasite_peer.server_routes[-1].port == new_route.port + assert len(datasite_peer.server_routes) == 2 + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +@pytest.mark.flaky(reruns=2, reruns_delay=2) +def test_delete_route_on_peer( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: + """ + Connect a datasite to a gateway, the gateway adds 2 new routes for the datasite + , then delete them. + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Remove existing peers + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + + # gateway adds 2 new routes for the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + res = gateway_client.api.services.network.add_route_on_peer( + peer=datasite_peer, route=new_route + ) + assert isinstance(res, SyftSuccess) + res = gateway_client.api.services.network.add_route_on_peer( + peer=datasite_peer, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + + gateway_peer: ServerPeer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 3 + + # gateway delete the routes for the datasite + res = gateway_client.api.services.network.delete_route_on_peer( + peer=datasite_peer, route=new_route + ) + assert isinstance(res, SyftSuccess) + gateway_peer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 2 + + res = gateway_client.api.services.network.delete_route_on_peer( + peer=datasite_peer, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + gateway_peer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 1 + + # gateway deletes the last the route to it for the datasite + last_route: ServerRouteType = gateway_peer.server_routes[0] + res = gateway_client.api.services.network.delete_route_on_peer( + peer=datasite_peer, route=last_route + ) + assert isinstance(res, SyftSuccess) + assert "There is no routes left" in res.message + assert ( + len(datasite_client.peers) == 0 + ) # gateway is no longer a peer of the datasite + + # The gateway client also removes the datasite as a peer + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_update_route_priority( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Try remove existing peers + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + + # gateway adds 2 new routes to the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + res = gateway_client.api.services.network.add_route( + peer_verify_key=datasite_peer.verify_key, route=new_route + ) + assert isinstance(res, SyftSuccess) + res = gateway_client.api.services.network.add_route( + peer_verify_key=datasite_peer.verify_key, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + + # check if the priorities of the routes are correct + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + routes_port_priority: dict = { + route.port: route.priority for route in datasite_peer.server_routes + } + assert routes_port_priority[datasite_1_port] == 1 + assert routes_port_priority[new_route.port] == 2 + assert routes_port_priority[new_route2.port] == 3 + + # update the priorities for the routes + res = gateway_client.api.services.network.update_route_priority( + peer_verify_key=datasite_peer.verify_key, route=new_route, priority=5 + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + routes_port_priority: dict = { + route.port: route.priority for route in datasite_peer.server_routes + } + assert routes_port_priority[new_route.port] == 5 + + # if we don't specify `priority`, the route will be automatically updated + # to have the biggest priority value among all routes + res = gateway_client.api.services.network.update_route_priority( + peer_verify_key=datasite_peer.verify_key, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + routes_port_priority: dict = { + route.port: route.priority for route in datasite_peer.server_routes + } + assert routes_port_priority[new_route2.port] == 6 + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_update_route_priority_on_peer( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Remove existing peers + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # Enable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + + # gateway adds 2 new routes to itself remotely on the datasite server + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + res = gateway_client.api.services.network.add_route_on_peer( + peer=datasite_peer, route=new_route + ) + assert isinstance(res, SyftSuccess) + + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + res = gateway_client.api.services.network.add_route_on_peer( + peer=datasite_peer, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + + # check if the priorities of the routes are correct + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] + routes_port_priority: dict = { + route.port: route.priority for route in gateway_peer.server_routes + } + assert routes_port_priority[gateway_port] == 1 + assert routes_port_priority[new_route.port] == 2 + assert routes_port_priority[new_route2.port] == 3 + + # gateway updates the route priorities for the datasite remotely + res = gateway_client.api.services.network.update_route_priority_on_peer( + peer=datasite_peer, route=new_route, priority=5 + ) + assert isinstance(res, SyftSuccess) + res = gateway_client.api.services.network.update_route_priority_on_peer( + peer=datasite_peer, route=new_route2 + ) + assert isinstance(res, SyftSuccess) + + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] + routes_port_priority: dict = { + route.port: route.priority for route in gateway_peer.server_routes + } + assert routes_port_priority[new_route.port] == 5 + assert routes_port_priority[new_route2.port] == 6 + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_dataset_stream(set_env_var, gateway_port: int, datasite_1_port: int) -> None: + """ + Scenario: Connecting a datasite server to a gateway server. The datasite + client then upload a dataset, which should be searchable by the syft network. + People who install syft can see the mock data and metadata of the uploaded datasets + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + # Remove existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + res = gateway_client.settings.allow_association_request_auto_approval(enable=True) + assert isinstance(res, SyftSuccess) + + # connect the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, SyftSuccess) + + # the datasite client uploads a dataset + input_data = np.array([1, 2, 3]) + mock_data = np.array([4, 5, 6]) + asset_name = _random_hash() + asset = sy.Asset(name=asset_name, data=input_data, mock=mock_data) + dataset_name = _random_hash() + dataset = sy.Dataset(name=dataset_name, asset_list=[asset]) + dataset_res = datasite_client.upload_dataset(dataset) + assert isinstance(dataset_res, SyftSuccess) + + datasite_proxy_client = next( + gateway_client.datasites[i] + for i in itertools.count() + if gateway_client.datasites[i].name == datasite_client.name + ) + root_proxy_client = datasite_proxy_client.login( + email="info@openmined.org", password="changethis" + ) + retrieved_dataset = root_proxy_client.datasets[dataset_name] + retrieved_asset = retrieved_dataset.assets[asset_name] + assert np.all(retrieved_asset.data == input_data) + + # the datasite client delete the dataset + datasite_client.api.services.dataset.delete(uid=retrieved_dataset.id) + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +# TODO: remove this and fix this test +@pytest.mark.xfail(reason="Unsure but its flapping in CI we need to fix it") +@pytest.mark.network +def test_peer_health_check( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: + """ + Scenario: Connecting a datasite server to a gateway server. + The gateway client approves the association request. + The gateway client checks that the datasite peer is associated + """ + # login to the datasite and gateway + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + res = gateway_client.settings.allow_association_request_auto_approval(enable=False) + assert isinstance(res, SyftSuccess) + + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # gateway checks that the datasite is not yet associated + res = gateway_client.api.services.network.check_peer_association( + peer_id=datasite_client.id + ) + assert isinstance(res, ServerPeerAssociationStatus) + assert res.value == "PEER_NOT_FOUND" + + # the datasite tries to connect to the gateway + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, Request) + assert isinstance(result.changes[0], AssociationRequestChange) + + # check that the peer's association request is pending + res = gateway_client.api.services.network.check_peer_association( + peer_id=datasite_client.id + ) + assert isinstance(res, ServerPeerAssociationStatus) + assert res.value == "PEER_ASSOCIATION_PENDING" + + # the datasite tries to connect to the gateway (again) + result = datasite_client.connect_to_gateway(gateway_client) + assert isinstance(result, Request) # the pending request is returned + # there should be only 1 association requests from the datasite + assert len(gateway_client.api.services.request.get_all()) == 1 + + # check again that the peer's association request is still pending + res = gateway_client.api.services.network.check_peer_association( + peer_id=datasite_client.id + ) + assert isinstance(res, ServerPeerAssociationStatus) + assert res.value == "PEER_ASSOCIATION_PENDING" + + # the gateway client approves one of the association requests + res = gateway_client.api.services.request.get_all()[-1].approve() + assert len(gateway_client.peers) == 1 + + # the gateway client checks that the peer is associated + res = gateway_client.api.services.network.check_peer_association( + peer_id=datasite_client.id + ) + assert isinstance(res, ServerPeerAssociationStatus) + assert res.value == "PEER_ASSOCIATED" + + time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert datasite_peer.ping_status == ServerPeerConnectionStatus.ACTIVE + + # Remove existing peers + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) + assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_reverse_tunnel_connection(datasite_1_port: int, gateway_port: int): + # login to the datasite and gateway + + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" + ) + + _ = gateway_client.settings.allow_association_request_auto_approval(enable=False) + + # Try removing existing peers just to make sure + _remove_existing_peers(datasite_client) + _remove_existing_peers(gateway_client) + + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client, reverse_tunnel=True) + + assert isinstance(result, Request) + assert isinstance(result.changes[0], AssociationRequestChange) + + assert len(datasite_client.peers) == 1 + + # Datasite's peer is a gateway and vice-versa + datasite_peer = datasite_client.peers[0] + assert datasite_peer.server_type == ServerType.GATEWAY + assert datasite_peer.server_routes[0].rtunnel_token is None + assert len(gateway_client.peers) == 0 + + gateway_client_root = gateway_client.login( + email="info@openmined.org", password="changethis" + ) + _ = gateway_client_root.api.services.request.get_all()[-1].approve() + time.sleep(90) + + gateway_peers = gateway_client.api.services.network.get_all_peers() + assert len(gateway_peers) == 1 + assert len(gateway_peers[0].server_routes) == 1 + assert gateway_peers[0].server_routes[0].rtunnel_token is not None + + proxy_datasite_client = gateway_client.peers[0] + + assert isinstance(proxy_datasite_client, DatasiteClient) + assert isinstance(datasite_peer, ServerPeer) + assert gateway_client.name == datasite_peer.name + assert datasite_client.name == proxy_datasite_client.name + + assert not isinstance(proxy_datasite_client.datasets.get_all(), SyftError) + + # Try removing existing peers just to make sure + _remove_existing_peers(gateway_client) + _remove_existing_peers(datasite_client) diff --git a/tests/integration/orchestra/orchestra_test.py b/tests/integration/orchestra/orchestra_test.py index d814b89fabb..cc59d7a7d8b 100644 --- a/tests/integration/orchestra/orchestra_test.py +++ b/tests/integration/orchestra/orchestra_test.py @@ -7,38 +7,37 @@ # syft absolute import syft as sy -from syft.node.node import Node +from syft.server.server import Server -@pytest.mark.parametrize("node_type", ["domain", "gateway", "enclave"]) -def test_orchestra_python_local(node_type): +@pytest.mark.parametrize("server_type", ["datasite", "gateway", "enclave"]) +def test_orchestra_python_local(server_type): name = token_hex(8) - node = sy.orchestra.launch(name=name, node_type=node_type, local_db=False) + server = sy.orchestra.launch(name=name, server_type=server_type) try: - assert isinstance(node.python_node, Node) - assert node.python_node.name == name - assert node.python_node.node_type == node_type - assert node.python_node.metadata.node_type == node_type + assert isinstance(server.python_server, Server) + assert server.python_server.name == name + assert server.python_server.server_type == server_type + assert server.python_server.metadata.server_type == server_type finally: - node.python_node.cleanup() - node.land() + server.python_server.cleanup() + server.land() -@pytest.mark.parametrize("node_type", ["domain", "gateway", "enclave"]) -def test_orchestra_python_server(node_type): +@pytest.mark.parametrize("server_type", ["datasite", "gateway", "enclave"]) +def test_orchestra_python_server(server_type): name = token_hex(8) - node = sy.orchestra.launch( + server = sy.orchestra.launch( name=name, port="auto", - node_type=node_type, - local_db=False, + server_type=server_type, ) try: - metadata = requests.get(f"http://localhost:{node.port}/api/v2/metadata") + metadata = requests.get(f"http://localhost:{server.port}/api/v2/metadata") assert metadata.status_code == 200 assert metadata.json()["name"] == name - assert metadata.json()["node_type"] == node_type + assert metadata.json()["server_type"] == server_type finally: - node.land() + server.land() diff --git a/tests/integration/veilid/gateway_veilid_test.py b/tests/integration/veilid/gateway_veilid_test.py deleted file mode 100644 index fa4e092aefa..00000000000 --- a/tests/integration/veilid/gateway_veilid_test.py +++ /dev/null @@ -1,96 +0,0 @@ -# third party -import pytest - -# syft absolute -import syft as sy -from syft.abstract_node import NodeType -from syft.client.domain_client import DomainClient -from syft.client.gateway_client import GatewayClient -from syft.client.protocol import SyftProtocol -from syft.service.network.node_peer import NodePeer -from syft.service.network.routes import VeilidNodeRoute -from syft.service.response import SyftSuccess -from syft.service.user.user_roles import ServiceRole - - -def remove_existing_peers(client): - for peer in client.api.services.network.get_all_peers(): - res = client.api.services.network.delete_peer_by_id(peer.id) - assert isinstance(res, SyftSuccess) - - -@pytest.mark.skip( - reason="The tests are highly flaky currently.Will be re-enabled soon!" -) -@pytest.mark.veilid -def test_domain_connect_to_gateway_veilid(domain_1_port, gateway_port): - # Revert to the guest login, when we automatically generate the dht key - # gateway_client: GatewayClient = sy.login_as_guest(port=gateway_port) - gateway_client: GatewayClient = sy.login( - port=gateway_port, email="info@openmined.org", password="changethis" - ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" - ) - - # Remove existing peers due to the previous gateway test - remove_existing_peers(domain_client) - remove_existing_peers(gateway_client) - - # Generate DHT Record - gateway_dht_res = gateway_client.api.services.veilid.generate_vld_key() - assert isinstance(gateway_dht_res, SyftSuccess), gateway_dht_res - domain_dht_res = domain_client.api.services.veilid.generate_vld_key() - assert isinstance(domain_dht_res, SyftSuccess), domain_dht_res - - # Retrieve DHT Record - domain_veilid_route = domain_client.api.services.veilid.get_veilid_route() - assert isinstance(domain_veilid_route, VeilidNodeRoute), domain_veilid_route - gateway_veilid_route = gateway_client.api.services.veilid.get_veilid_route() - assert isinstance(gateway_veilid_route, VeilidNodeRoute), gateway_veilid_route - - # Connect Domain to Gateway via Veilid - result = domain_client.connect_to_gateway( - gateway_client, protocol=SyftProtocol.VEILID - ) - assert isinstance(result, SyftSuccess) - - proxy_domain_client = gateway_client.peers[0] - domain_peer = domain_client.peers[0] - gateway_peer = gateway_client.api.services.network.get_all_peers()[0] - - # Domain Asserts - assert len(domain_client.peers) == 1 - assert isinstance(proxy_domain_client, DomainClient) - assert domain_peer.node_type == NodeType.GATEWAY - assert isinstance(domain_peer, NodePeer) - assert isinstance(domain_peer.node_routes[0], VeilidNodeRoute) - assert domain_peer.node_routes[0].vld_key == gateway_veilid_route.vld_key - assert domain_client.name == proxy_domain_client.name - - # Gateway Asserts - assert len(gateway_client.peers) == 1 - assert gateway_peer.node_type == NodeType.DOMAIN - assert isinstance(gateway_peer.node_routes[0], VeilidNodeRoute) - assert gateway_peer.node_routes[0].vld_key == domain_veilid_route.vld_key - assert gateway_client.name == domain_peer.name - assert len(gateway_client.domains) == 1 - assert len(gateway_client.enclaves) == 0 - - # Proxy Domain Asserts - assert proxy_domain_client.metadata == domain_client.metadata - assert proxy_domain_client.user_role == ServiceRole.NONE - - domain_client = domain_client.login( - email="info@openmined.org", password="changethis" - ) - proxy_domain_client = proxy_domain_client.login( - email="info@openmined.org", password="changethis" - ) - - assert proxy_domain_client.logged_in_user == "info@openmined.org" - assert proxy_domain_client.user_role == ServiceRole.ADMIN - assert proxy_domain_client.credentials == domain_client.credentials - assert ( - proxy_domain_client.api.endpoints.keys() == domain_client.api.endpoints.keys() - ) diff --git a/tests/scenariosv2/__init__.py b/tests/scenariosv2/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scenariosv2/flows/__init__.py b/tests/scenariosv2/flows/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scenariosv2/flows/admin_bigquery_api.py b/tests/scenariosv2/flows/admin_bigquery_api.py new file mode 100644 index 00000000000..1deb967a4b8 --- /dev/null +++ b/tests/scenariosv2/flows/admin_bigquery_api.py @@ -0,0 +1,132 @@ +# stdlib +from typing import Any + +# syft absolute +import syft as sy +from syft.util.test_helpers.apis import make_schema +from syft.util.test_helpers.apis import make_test_query + +# relative +from ..sim.core import SimulatorContext +from .utils import server_info + +__all__ = ["bq_schema_endpoint", "bq_test_endpoint", "bq_submit_endpoint"] + + +def bq_schema_endpoint( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + worker_pool: str, + path: str = "bigquery.schema", +): + schema_function = make_schema( + settings={ + "calls_per_min": 5, + }, + worker_pool_name=worker_pool, + path=path, + ) + + # Call admin_client.custom_api.add + __create_endpoint(ctx, admin_client, schema_function, path) + + +def bq_test_endpoint( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + worker_pool: str, + path="bigquery.test_query", +): + private_query_function = make_test_query( + settings={ + "rate_limiter_enabled": False, + } + ) + mock_query_function = make_test_query( + settings={ + "rate_limiter_enabled": True, + "calls_per_min": 10, + } + ) + + test_endpoint = sy.TwinAPIEndpoint( + path=path, + description="This endpoint allows to query Bigquery storage via SQL queries.", + private_function=private_query_function, + mock_function=mock_query_function, + worker_pool_name=worker_pool, + endpoint_timeout=120, + ) + + # Call admin_client.custom_api.add + __create_endpoint(ctx, admin_client, test_endpoint, path) + + +def bq_submit_endpoint( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + worker_pool: str, + path="bigquery.submit_query", +): + @sy.api_endpoint( + path=path, + description="API endpoint that allows you to submit SQL queries to run on the private data.", + worker_pool_name=worker_pool, + settings={"worker": worker_pool}, + endpoint_timeout=120, + ) + def submit_query( + context, + func_name: str, + query: str, + ) -> str: + # stdlib + import hashlib + + # syft absolute + import syft as sy + + hash_object = hashlib.new("sha256") + hash_object.update(context.user.email.encode("utf-8")) + func_name = func_name + "_" + hash_object.hexdigest()[:6] + + @sy.syft_function( + name=func_name, + input_policy=sy.MixedInputPolicy( + endpoint=sy.Constant( + val=context.admin_client.api.services.bigquery.test_query + ), + query=sy.Constant(val=query), + client=context.admin_client, + ), + worker_pool_name=context.settings["worker"], + ) + def execute_query(query: str, endpoint): + res = endpoint(sql_query=query) + return res + + request = context.user_client.code.request_code_execution(execute_query) + if isinstance(request, sy.SyftError): + return request + context.admin_client.requests.set_tags(request, ["autosync"]) + + return f"Query submitted {request}. Use `client.code.{func_name}()` to run your query" + + # Call admin_client.custom_api.add + __create_endpoint(ctx, admin_client, submit_query, path) + + +def __create_endpoint( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + endpoint: Any, + path: str, +): + msg = f"Admin {admin_client.metadata.server_side_type}: Endpoint '{path}' on {server_info(admin_client)}" + ctx.logger.info(f"{msg} - Creating") + + # Create the endpoint + result = admin_client.custom_api.add(endpoint=endpoint) + assert isinstance(result, sy.SyftSuccess), result + + ctx.logger.info(f"{msg} - Created") diff --git a/tests/scenariosv2/flows/admin_bigquery_pool.py b/tests/scenariosv2/flows/admin_bigquery_pool.py new file mode 100644 index 00000000000..91fda486e9c --- /dev/null +++ b/tests/scenariosv2/flows/admin_bigquery_pool.py @@ -0,0 +1,54 @@ +# stdlib +import os + +# syft absolute +import syft as sy +from syft.orchestra import DeploymentType +from syft.util.test_helpers.worker_helpers import ( + build_and_launch_worker_pool_from_docker_str, +) + +# relative +from ..sim.core import SimulatorContext +from .utils import server_info + +__all__ = ["bq_create_pool"] + + +def bq_create_pool( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + worker_pool="biquery-pool", + external_registry_url="k3d-registry.localhost:5800", +): + base_image = admin_client.images.get_all()[0] + worker_image_tag = str(base_image.image_identifier).replace( + "syft-backend", worker_pool + ) + + worker_dockerfile = ( + f"FROM {str(base_image.image_identifier)}\n" + f"RUN uv pip install db-dtypes google-cloud-bigquery" + ) + + msg = ( + f"Admin {admin_client.metadata.server_side_type}: " + f"Worker Pool tag '{worker_image_tag}' on {server_info(admin_client)}" + ) + + ctx.logger.info(f"{msg} - Creating") + + deployment_type = os.environ.get("ORCHESTRA_DEPLOYMENT_TYPE", DeploymentType.PYTHON) + + build_and_launch_worker_pool_from_docker_str( + environment=str(deployment_type), + client=admin_client, + worker_pool_name=worker_pool, + worker_dockerfile=worker_dockerfile, + external_registry=external_registry_url, + docker_tag=worker_image_tag, + custom_pool_pod_annotations=None, + custom_pool_pod_labels=None, + scale_to=3, + ) + ctx.logger.info(f"{msg} - Created") diff --git a/tests/scenariosv2/flows/admin_common.py b/tests/scenariosv2/flows/admin_common.py new file mode 100644 index 00000000000..2e8454ff1f7 --- /dev/null +++ b/tests/scenariosv2/flows/admin_common.py @@ -0,0 +1,20 @@ +# syft absolute +import syft as sy + +# relative +from ..sim.core import SimulatorContext +from .utils import server_info + +__all__ = ["register_user"] + + +def register_user(ctx: SimulatorContext, admin_client: sy.DatasiteClient, user: dict): + msg = f"Admin {admin_client.metadata.server_side_type}: User {user['email']} on {server_info(admin_client)}" + ctx.logger.info(f"{msg} - Creating") + _ = admin_client.register( + name=user["name"], + email=user["email"], + password=user["password"], + password_verify=user["password"], + ) + ctx.logger.info(f"{msg} - Created") diff --git a/tests/scenariosv2/flows/user_bigquery_api.py b/tests/scenariosv2/flows/user_bigquery_api.py new file mode 100644 index 00000000000..eede3d2264b --- /dev/null +++ b/tests/scenariosv2/flows/user_bigquery_api.py @@ -0,0 +1,75 @@ +# stdlib +import random + +# syft absolute +import syft as sy +from syft import test_settings +from syft.service.request.request import RequestStatus + +# relative +from ..sim.core import SimulatorContext + +__all__ = ["bq_test_query", "bq_submit_query", "bq_check_query_results"] + + +def query_sql(): + dataset_2 = test_settings.get("dataset_2", default="dataset_2") + table_2 = test_settings.get("table_2", default="table_2") + table_2_col_id = test_settings.get("table_2_col_id", default="table_id") + table_2_col_score = test_settings.get("table_2_col_score", default="colname") + + query = f"SELECT {table_2_col_id}, AVG({table_2_col_score}) AS average_score \ + FROM {dataset_2}.{table_2} \ + GROUP BY {table_2_col_id} \ + LIMIT 10000" + return query + + +def bq_test_query(ctx: SimulatorContext, client: sy.DatasiteClient): + user = client.logged_in_user + + msg = f"User: {user} - bigquery.test_query" + ctx.logger.info(f"{msg} = Invoked") + res = client.api.bigquery.test_query(sql_query=query_sql()) + assert len(res) == 10000 + ctx.logger.info(f"{msg} - Response - {len(res)} rows") + return res + + +def bq_submit_query(ctx: SimulatorContext, client: sy.DatasiteClient): + user = client.logged_in_user + # Randomly define a func_name a function to call + func_name = "invalid_func" if random.random() < 0.5 else "test_query" + + msg = f"User: {user} - bigquery.submit_query(func_name={func_name})" + ctx.logger.info(f"{msg} - Calling") + res = client.api.bigquery.submit_query( + func_name=func_name, + query=query_sql(), + ) + assert "Query submitted" in str(res) + ctx.logger.info(f"{msg} - Response - {res}") + return res + + +def bq_check_query_results(ctx: SimulatorContext, client: sy.DatasiteClient): + user = client.logged_in_user + + for request in client.requests: + status = request.get_status() + + msg = f"User: {user} - Request {request.code.service_func_name}" + + if status == RequestStatus.APPROVED: + func_name = request.code.service_func_name + api_func = getattr(client.code, func_name, None) + job = api_func(blocking=False) + result = job.wait() + assert len(result) == 10000 + ctx.logger.info(f"{msg} - Approved") + elif status == RequestStatus.REJECTED: + ctx.logger.info(f"{user} - Rejected") + else: + ctx.logger.info(f"{user} - Pending") + + return True diff --git a/tests/scenariosv2/flows/utils.py b/tests/scenariosv2/flows/utils.py new file mode 100644 index 00000000000..a114f48a59c --- /dev/null +++ b/tests/scenariosv2/flows/utils.py @@ -0,0 +1,29 @@ +# stdlib +from urllib.parse import urlparse + +# syft absolute +import syft as sy +from syft.orchestra import ServerHandle + + +def server_info(client: sy.DatasiteClient) -> str: + url = getattr(client.connection, "url", "python") + return f"{client.name}(url={url}, side={client.metadata.server_side_type})" + + +def launch_server( + server_url: str, + server_name: str, + server_side_type: str | None = "high", +) -> ServerHandle | None: + parsed_url = urlparse(server_url) + port = parsed_url.port + return sy.orchestra.launch( + name=server_name, + server_side_type=server_side_type, + reset=True, + dev_mode=True, + port=port, + create_producer=True, + n_consumers=1, + ) diff --git a/tests/scenariosv2/l0_test.py b/tests/scenariosv2/l0_test.py new file mode 100644 index 00000000000..edf847d28ed --- /dev/null +++ b/tests/scenariosv2/l0_test.py @@ -0,0 +1,464 @@ +# stdlib +import asyncio +from enum import auto +import os +import random + +# third party +from faker import Faker +import pytest + +# syft absolute +import syft as sy +from syft.orchestra import DeploymentType +from syft.service.request.request import RequestStatus + +# relative +from .flows.admin_bigquery_api import bq_schema_endpoint +from .flows.admin_bigquery_api import bq_submit_endpoint +from .flows.admin_bigquery_api import bq_test_endpoint +from .flows.admin_bigquery_pool import bq_create_pool +from .flows.admin_common import register_user +from .flows.user_bigquery_api import bq_check_query_results +from .flows.user_bigquery_api import bq_submit_query +from .flows.user_bigquery_api import bq_test_query +from .flows.utils import launch_server +from .sim.core import BaseEvent +from .sim.core import Simulator +from .sim.core import SimulatorContext +from .sim.core import sim_activity +from .sim.core import sim_entrypoint + +fake = Faker() +NUM_USERS = 10 +NUM_ENDPOINTS = 3 # test_query, submit_query, schema_query +TIMEOUT = 900 + + +class Event(BaseEvent): + # overall state + INIT = auto() + ADMIN_LOWSIDE_FLOW_COMPLETED = auto() + ADMIN_HIGHSIDE_FLOW_COMPLETED = auto() + ADMIN_LOW_ALL_RESULTS_AVAILABLE = auto() + USER_FLOW_COMPLETED = auto() + # admin - endpoints + ADMIN_ALL_ENDPOINTS_CREATED = auto() + ADMIN_BQ_TEST_ENDPOINT_CREATED = auto() + ADMIN_BQ_SUBMIT_ENDPOINT_CREATED = auto() + ADMIN_BQ_SCHEMA_ENDPOINT_CREATED = auto() + ADMIN_LOW_SIDE_ENDPOINTS_AVAILABLE = auto() + # admin - worker pool + ADMIN_WORKER_POOL_CREATED = auto() + ADMIN_LOWSIDE_WORKER_POOL_CREATED = auto() + ADMIN_HIGHSIDE_WORKER_POOL_CREATED = auto() + # admin sync + ADMIN_SYNC_COMPLETED = auto() + ADMIN_SYNCED_HIGH_TO_LOW = auto() + ADMIN_SYNCED_LOW_TO_HIGH = auto() + # users + GUEST_USERS_CREATED = auto() + USER_CAN_QUERY_TEST_ENDPOINT = auto() + USER_CAN_SUBMIT_QUERY = auto() + USER_CHECKED_RESULTS = auto() + + +# ------------------------------------------------------------------------------------------------ + + +@sim_activity( + wait_for=Event.ADMIN_LOW_SIDE_ENDPOINTS_AVAILABLE, + trigger=Event.USER_CAN_QUERY_TEST_ENDPOINT, +) +async def user_bq_test_query(ctx: SimulatorContext, client: sy.DatasiteClient): + """Run query on test endpoint""" + await asyncio.to_thread(bq_test_query, ctx, client) + + +@sim_activity( + wait_for=Event.ADMIN_LOW_SIDE_ENDPOINTS_AVAILABLE, + trigger=Event.USER_CAN_SUBMIT_QUERY, +) +async def user_bq_submit_query(ctx: SimulatorContext, client: sy.DatasiteClient): + """Submit query to be run on private data""" + await asyncio.to_thread(bq_submit_query, ctx, client) + + +@sim_activity( + wait_for=Event.ADMIN_LOW_ALL_RESULTS_AVAILABLE, + trigger=Event.USER_CHECKED_RESULTS, +) +async def user_bq_results(ctx: SimulatorContext, client: sy.DatasiteClient): + await asyncio.to_thread(bq_check_query_results, ctx, client) + + +@sim_activity(wait_for=Event.GUEST_USERS_CREATED, trigger=Event.USER_FLOW_COMPLETED) +async def user_low_side_flow(ctx: SimulatorContext, server_url_low: str, user: dict): + """ + User flow on low-side: + - User logs in + - User invokes the test query endpoint to get mock results - user_bq_test_query + - User submits a query to be run on the private data for approval - user_bq_submit_query + - User checks if request is approved and retrieves the results - user_bq_results + + The test -> submit -> results are typically done in sequence. + test & submit can be done in parallel but results can be checked only after submit is done. + """ + + client = sy.login( + url=server_url_low, + email=user["email"], + password=user["password"], + ) + ctx.logger.info(f"User: {client.logged_in_user} - logged in") + + await user_bq_test_query(ctx, client) + await user_bq_submit_query(ctx, client) + await user_bq_results(ctx, client) + + +# ------------------------------------------------------------------------------------------------ + + +@sim_activity(trigger=Event.GUEST_USERS_CREATED) +async def admin_register_users( + ctx: SimulatorContext, admin_client: sy.DatasiteClient, users: list[dict] +): + await asyncio.gather( + *[asyncio.to_thread(register_user, ctx, admin_client, user) for user in users], + ) + + +@sim_activity(trigger=Event.ADMIN_BQ_SCHEMA_ENDPOINT_CREATED) +async def admin_create_bq_schema_endpoint( + ctx: SimulatorContext, admin_client: sy.DatasiteClient, worker_pool: str +): + await asyncio.to_thread(bq_schema_endpoint, ctx, admin_client, worker_pool) + + +@sim_activity(trigger=Event.ADMIN_BQ_TEST_ENDPOINT_CREATED) +async def admin_create_bq_test_endpoint( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + worker_pool: str, +): + await asyncio.to_thread(bq_test_endpoint, ctx, admin_client, worker_pool) + + +@sim_activity(trigger=Event.ADMIN_BQ_SUBMIT_ENDPOINT_CREATED) +async def admin_create_bq_submit_endpoint( + ctx: SimulatorContext, + admin_client: sy.DatasiteClient, + worker_pool: str, +): + await asyncio.to_thread(bq_submit_endpoint, ctx, admin_client, worker_pool) + + +@sim_activity(trigger=Event.ADMIN_ALL_ENDPOINTS_CREATED) +async def admin_high_create_endpoints( + ctx: SimulatorContext, admin_client: sy.DatasiteClient +): + worker_pool = "biquery-pool" + + await asyncio.gather( + admin_create_bq_test_endpoint(ctx, admin_client, worker_pool), + admin_create_bq_submit_endpoint(ctx, admin_client, worker_pool), + admin_create_bq_schema_endpoint(ctx, admin_client, worker_pool), + ) + ctx.logger.info("Admin high: Created all endpoints") + + +def all_available(paths: list[str], expected: list[str]): + return set(expected).issubset(set(paths)) + + +@sim_activity( + # endpoints work only after low side worker pool is created + wait_for=Event.ADMIN_LOWSIDE_WORKER_POOL_CREATED +) +async def admin_low_triage_requests( + ctx: SimulatorContext, admin_client: sy.DatasiteClient +): + expected_paths = [ + "bigquery.test_query", + "bigquery.submit_query", + "bigquery.schema", + ] + + while True: + await asyncio.sleep(random.uniform(5, 10)) + + # check if endpoints are available + if not ctx.events.is_set(Event.ADMIN_LOW_SIDE_ENDPOINTS_AVAILABLE): + endpoints = admin_client.custom_api.get_all() + paths = [ep.path for ep in endpoints] + ctx.logger.debug(f"Admin low: API endpoints - {paths}") + + if all_available(paths, expected_paths): + ctx.logger.info("Admin low: All endpoints available") + ctx.events.trigger(Event.ADMIN_LOW_SIDE_ENDPOINTS_AVAILABLE) + else: + ctx.logger.info(f"Admin low: Waiting for all endpoints {paths}") + + # Check if all requests are approved or denied + requests = admin_client.requests.get_all() + pending = [req for req in requests if req.status == RequestStatus.PENDING] + ctx.logger.info(f"Admin low: Requests={len(requests)} Pending={len(pending)}") + + # If all requests have been triaged, then exit + if len(requests) == NUM_USERS: + ctx.events.trigger(Event.ADMIN_LOW_ALL_RESULTS_AVAILABLE) + break + + ctx.logger.info("Admin low: All requests triaged.") + + +@sim_activity(trigger=Event.ADMIN_HIGHSIDE_WORKER_POOL_CREATED) +async def admin_high_create_bq_pool( + ctx: SimulatorContext, admin_client: sy.DatasiteClient +): + await asyncio.to_thread(bq_create_pool, ctx, admin_client) + + +@sim_activity(trigger=Event.ADMIN_LOWSIDE_WORKER_POOL_CREATED) +async def admin_low_create_bq_pool( + ctx: SimulatorContext, admin_client: sy.DatasiteClient +): + await asyncio.to_thread(bq_create_pool, ctx, admin_client) + + +@sim_activity( + wait_for=[ + Event.USER_CAN_SUBMIT_QUERY, + Event.ADMIN_SYNCED_LOW_TO_HIGH, + ], + trigger=Event.ADMIN_HIGHSIDE_FLOW_COMPLETED, +) +async def admin_high_triage_requests( + ctx: SimulatorContext, admin_client: sy.DatasiteClient +): + while not ctx.events.is_set(Event.ADMIN_LOW_ALL_RESULTS_AVAILABLE): + await asyncio.sleep(random.uniform(5, 10)) + + # check if there are any requests + # BUG: request that are executed request.code() are always in pending state + requests = admin_client.requests.get_all() + pending = [req for req in requests if req.status == RequestStatus.PENDING] + ctx.logger.info(f"Admin high: Requests={len(requests)} Pending={len(pending)}") + + for request in pending: + # ignore non-code requests + if not getattr(request, "code", None): + continue + + if "invalid_func" in request.code.service_func_name: + ctx.logger.info(f"Admin high: Denying request {request}") + request.deny("You gave me an `invalid_func` function") + else: + ctx.logger.info(f"Admin high: Approving request by executing {request}") + func_name = request.code.service_func_name + api_func = getattr(admin_client.code, func_name, None) + job = api_func(blocking=False) + result = job.wait() + ctx.logger.info(f"Admin high: Request result {result}") + if len(requests) == NUM_USERS: + break + ctx.logger.info("Admin high: All requests triaged.") + + +@sim_activity(trigger=Event.ADMIN_HIGHSIDE_FLOW_COMPLETED) +async def admin_high_side_flow(ctx: SimulatorContext, admin_auth): + admin_client = sy.login(**admin_auth) + ctx.logger.info("Admin high: logged in") + + await asyncio.gather( + admin_high_create_bq_pool(ctx, admin_client), + admin_high_create_endpoints(ctx, admin_client), + admin_high_triage_requests(ctx, admin_client), + ) + + +@sim_activity(trigger=Event.ADMIN_LOWSIDE_FLOW_COMPLETED) +async def admin_low_side_flow(ctx: SimulatorContext, admin_auth, users): + admin_client = sy.login(**admin_auth) + ctx.logger.info("Admin low: logged in") + + await asyncio.gather( + admin_register_users(ctx, admin_client, users), + admin_low_create_bq_pool(ctx, admin_client), + admin_low_triage_requests(ctx, admin_client), + ) + + +# ------------------------------------------------------------------------------------------------ + + +async def admin_sync( + ctx: SimulatorContext, + from_auth: dict, + to_auth: dict, + trigger: Event, + exit_after: Event, +): + from_client = sy.login(**from_auth) + to_client = sy.login(**to_auth) + + from_ = from_client.metadata.server_side_type + to_ = to_client.metadata.server_side_type + + while not ctx.events.is_set(exit_after): + try: + await asyncio.sleep(random.uniform(3, 5)) + + ctx.logger.info(f"Admin {from_}: Sync {from_}->{to_} - Checking") + result = sy.sync(from_client, to_client) + if isinstance(result, sy.SyftSuccess): + continue + + ctx.logger.info(f"Admin {from_}: Sync {from_}->{to_} - Result={result}") + result._share_all() + result._sync_all() + + ctx.events.trigger(trigger) + ctx.logger.info(f"Admin {from_}: Sync {from_}->{to_} - Synced") + + except Exception as e: + ctx.logger.error(f"Admin {from_}: Sync {from_}->{to_} - Error: {str(e)}") + ctx.logger.info(f"Admin {from_}: Sync {from_}->{to_} - Waiting a bit..") + await asyncio.sleep(random.uniform(2, 4)) + + ctx.logger.info(f"Admin {from_}: Sync {from_}->{to_} - Closed") + + +@sim_activity(trigger=Event.ADMIN_SYNC_COMPLETED) +async def admin_sync_high_to_low_flow( + ctx: SimulatorContext, admin_auth_high: dict, admin_auth_low: dict +): + await admin_sync( + ctx, + # high -> low + from_auth=admin_auth_high, + to_auth=admin_auth_low, + trigger=Event.ADMIN_SYNCED_HIGH_TO_LOW, + # TODO: see if we have a better exit clause + exit_after=Event.ADMIN_HIGHSIDE_FLOW_COMPLETED, + ) + + +@sim_activity(trigger=Event.ADMIN_SYNC_COMPLETED) +async def admin_sync_low_to_high_flow( + ctx: SimulatorContext, admin_auth_high: dict, admin_auth_low: dict +): + await admin_sync( + ctx, + # low -> high + from_auth=admin_auth_low, + to_auth=admin_auth_high, + trigger=Event.ADMIN_SYNCED_LOW_TO_HIGH, + # TODO: see if we have a better exit clause + exit_after=Event.ADMIN_LOWSIDE_FLOW_COMPLETED, + ) + + +# ------------------------------------------------------------------------------------------------ + + +def setup_servers(ctx: SimulatorContext, server_url_high, server_url_low): + deployment_type = os.environ.get("ORCHESTRA_DEPLOYMENT_TYPE", DeploymentType.REMOTE) + ctx.logger.info(f"Deployment type: {deployment_type}") + + if deployment_type == DeploymentType.REMOTE: + return None, None + + ctx.logger.info(f"Launching python server high side server on {server_url_high}") + server_high = launch_server( + server_url=server_url_high, + server_name="syft-high", + server_side_type="high", + ) + + ctx.logger.info(f"Launching python server low side server on {server_url_low}") + server_low = launch_server( + server_url=server_url_low, + server_name="syft-low", + server_side_type="low", + ) + + return server_high, server_low + + +def shutdown_servers(server_high, server_low): + if server_high: + server_high.land() + + if server_low: + server_low.land() + + +@sim_entrypoint +async def sim_l0_scenario(ctx: SimulatorContext): + users = [ + dict( # noqa: C408 + name=fake.name(), + email=fake.email(), + password="password", + ) + for _ in range(NUM_USERS) + ] + + server_url_high = "http://localhost:8080" + admin_auth_high = dict( # noqa: C408 + url=server_url_high, + email="info@openmined.org", + password="changethis", + ) + + server_url_low = "http://localhost:8081" + admin_auth_low = dict( # noqa: C408 + url=server_url_low, + email="info@openmined.org", + password="changethis", + ) + + server_high, server_low = setup_servers(ctx, server_url_high, server_url_low) + + ctx.events.trigger(Event.INIT) + ctx.logger.info("--- Initializing L0 BigQuery Scenario Test ---") + + await asyncio.gather( + admin_low_side_flow(ctx, admin_auth_low, users), + admin_high_side_flow(ctx, admin_auth_high), + admin_sync_high_to_low_flow(ctx, admin_auth_high, admin_auth_low), + admin_sync_low_to_high_flow(ctx, admin_auth_high, admin_auth_low), + *[user_low_side_flow(ctx, server_url_low, user) for user in users], + ) + + shutdown_servers(server_high, server_low) + + +@pytest.mark.asyncio +async def test_l0_scenario(request): + sim = Simulator("l0_scenario") + + await sim.start( + sim_l0_scenario, + random_wait=None, # (0.5, 1.5), + check_events=[ + # admin lowside + Event.GUEST_USERS_CREATED, + Event.ADMIN_LOWSIDE_WORKER_POOL_CREATED, + Event.ADMIN_LOW_ALL_RESULTS_AVAILABLE, + Event.ADMIN_LOWSIDE_FLOW_COMPLETED, + # admin high side + Event.ADMIN_ALL_ENDPOINTS_CREATED, + Event.ADMIN_HIGHSIDE_WORKER_POOL_CREATED, + Event.ADMIN_HIGHSIDE_FLOW_COMPLETED, + # admin sync + Event.ADMIN_SYNC_COMPLETED, + # users + Event.USER_CAN_QUERY_TEST_ENDPOINT, + Event.USER_CHECKED_RESULTS, + Event.USER_FLOW_COMPLETED, + ], + timeout=TIMEOUT, + ) diff --git a/tests/scenariosv2/l2_test.py b/tests/scenariosv2/l2_test.py new file mode 100644 index 00000000000..1d713249a57 --- /dev/null +++ b/tests/scenariosv2/l2_test.py @@ -0,0 +1,160 @@ +# stdlib +import asyncio +import os +import random + +# third party +from faker import Faker +import pytest + +# syft absolute +import syft as sy +from syft.orchestra import DeploymentType + +# relative +from .flows.user_bigquery_api import bq_submit_query +from .flows.user_bigquery_api import bq_test_query +from .flows.utils import launch_server +from .l0_test import Event +from .l0_test import admin_high_create_bq_pool +from .l0_test import admin_high_create_endpoints +from .l0_test import admin_register_users +from .sim.core import Simulator +from .sim.core import SimulatorContext +from .sim.core import sim_activity +from .sim.core import sim_entrypoint + +fake = Faker() + + +# ---------------------------------- admin ---------------------------------- +@sim_activity( + wait_for=[ + Event.USER_CAN_SUBMIT_QUERY, + ] +) +async def admin_triage_requests(ctx: SimulatorContext, admin_client: sy.DatasiteClient): + while True: + await asyncio.sleep(random.uniform(3, 5)) + ctx.logger.info("Admin: Triaging requests") + + pending_requests = admin_client.requests.get_all_pending() + if len(pending_requests) == 0: + break + for request in admin_client.requests.get_all_pending(): + ctx.logger.info(f"Admin: Found request {request.__dict__}") + if "invalid_func" in request.code.service_func_name: + request.deny(reason="you submitted an invalid code") + else: + request.approve() + + +@sim_activity(trigger=Event.ADMIN_HIGHSIDE_FLOW_COMPLETED) +async def admin_flow( + ctx: SimulatorContext, admin_auth: dict, users: list[dict] +) -> None: + admin_client = sy.login(**admin_auth) + ctx.logger.info("Admin: logged in") + + await asyncio.gather( + admin_register_users(ctx, admin_client, users), + admin_high_create_bq_pool(ctx, admin_client), + admin_high_create_endpoints(ctx, admin_client), + admin_triage_requests(ctx, admin_client), + ) + + +# ---------------------------------- user ---------------------------------- +@sim_activity( + wait_for=[ + Event.ADMIN_ALL_ENDPOINTS_CREATED, + Event.ADMIN_HIGHSIDE_WORKER_POOL_CREATED, + ], + trigger=Event.USER_CAN_QUERY_TEST_ENDPOINT, +) +async def user_bq_test_query(ctx: SimulatorContext, client: sy.DatasiteClient): + """Run query on test endpoint""" + await asyncio.to_thread(bq_test_query, ctx, client) + + +@sim_activity( + wait_for=[ + Event.ADMIN_ALL_ENDPOINTS_CREATED, + Event.ADMIN_HIGHSIDE_WORKER_POOL_CREATED, + ], + trigger=Event.USER_CAN_SUBMIT_QUERY, +) +async def user_bq_submit_query(ctx: SimulatorContext, client: sy.DatasiteClient): + """Submit query to be run on private data""" + await asyncio.to_thread(bq_submit_query, ctx, client) + + +@sim_activity( + wait_for=[Event.GUEST_USERS_CREATED, Event.ADMIN_ALL_ENDPOINTS_CREATED], + trigger=Event.USER_FLOW_COMPLETED, +) +async def user_flow(ctx: SimulatorContext, server_url: str, user: dict): + client = sy.login( + url=server_url, + email=user["email"], + password=user["password"], + ) + ctx.logger.info(f"User: {client.logged_in_user} - logged in") + + await user_bq_test_query(ctx, client) + await user_bq_submit_query(ctx, client) + + +# ---------------------------------- test ---------------------------------- + + +@sim_entrypoint +async def sim_l2_scenario(ctx: SimulatorContext): + ctx.events.trigger(Event.INIT) + ctx.logger.info("--- Initializing L2 BigQuery Scenario Test ---") + + users = [ + { + "name": fake.name(), + "email": fake.email(), + "password": "password", + } + for i in range(3) + ] + + server_url = "http://localhost:8080" + deployment_type = os.environ.get("ORCHESTRA_DEPLOYMENT_TYPE", DeploymentType.REMOTE) + ctx.logger.info(f"Deployment type: {deployment_type}") + if deployment_type == DeploymentType.PYTHON: + server = launch_server(server_url, "syft-high") + + admin_auth = { + "url": server_url, + "email": "info@openmined.org", + "password": "changethis", + } + + await asyncio.gather( + admin_flow(ctx, admin_auth, users), + *[user_flow(ctx, server_url, user) for user in users], + ) + + if deployment_type == DeploymentType.PYTHON: + server.land() + + +@pytest.mark.asyncio +async def test_l2_scenario(request): + sim = Simulator("l2_scenario") + + await sim.start( + sim_l2_scenario, + random_wait=None, + check_events=[ + Event.GUEST_USERS_CREATED, + Event.ADMIN_HIGHSIDE_WORKER_POOL_CREATED, + Event.ADMIN_ALL_ENDPOINTS_CREATED, + Event.ADMIN_HIGHSIDE_FLOW_COMPLETED, + ], + timeout=300, + ) diff --git a/tests/scenariosv2/sim/__init__.py b/tests/scenariosv2/sim/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/scenariosv2/sim/core.py b/tests/scenariosv2/sim/core.py new file mode 100644 index 00000000000..4b1ad074b44 --- /dev/null +++ b/tests/scenariosv2/sim/core.py @@ -0,0 +1,200 @@ +# stdlib +import asyncio +from datetime import datetime +from enum import Enum +from functools import wraps +import logging +from pathlib import Path +import random +import time + +LOGS_DIR = Path(__file__).resolve().parents[1] / ".logs" + +logging.Formatter.formatTime = ( + lambda self, record, datefmt=None: datetime.fromtimestamp(record.created).isoformat( + sep="T", + timespec="microseconds", + ) +) + +DEFAULT_FORMATTER = logging.Formatter( + "%(asctime)s - %(threadName)s - %(levelname)s - %(message)s", +) +EVENT_FORMATTER = logging.Formatter( + "%(asctime)s - %(threadName)s - %(message)s", +) + + +def make_logger( + name: str, + instance: str, + formatter=DEFAULT_FORMATTER, + level=logging.INFO, +): + log_file = f"{int(time.time())}_{instance}" + log_path = Path(LOGS_DIR, log_file, name).with_suffix(".log") + log_path.parent.mkdir(parents=True, exist_ok=True) + + logger = logging.getLogger(name) + file_handler = logging.FileHandler(log_path, mode="w") + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + logger.setLevel(level) + return logger + + +class TestFailure(Exception): + """Custom exception to signal test failures""" + + pass + + +class BaseEvent(Enum): + """Base class for events. Subclass this to define your specific events.""" + + pass + + +class EventManager: + def __init__(self, name: str): + self.name = name + self.events = {} + self.logger = make_logger("events", instance=name, level=logging.INFO) + + async def wait_for(self, event: BaseEvent): + if event not in self.events: + self.events[event] = asyncio.Event() + await self.events[event].wait() + + def trigger(self, event: BaseEvent): + if event not in self.events: + self.events[event] = asyncio.Event() + self.logger.info(f"Triggered: {event.name}") + self.events[event].set() + + def is_set(self, event: BaseEvent) -> bool: + if event not in self.events: + return False + return self.events[event].is_set() + + +class SimulatorContext: + def __init__(self, name: str, random_wait=None): + self.name = name + self.events = EventManager(name) + self.random_wait = random_wait + + self.logger = make_logger("activity", instance=name, level=logging.INFO) + self._elogger = make_logger("executions", instance=name, level=logging.DEBUG) + + def unfired_events(self, events: list[BaseEvent]): + evts = filter(lambda e: not self.events.is_set(e), events) + evts = [e.name for e in evts] + return evts + + @staticmethod + async def blocking_call(func, /, *args, **kwargs): + return await asyncio.to_thread(func, *args, **kwargs) + + @staticmethod + async def gather(*tasks): + return await asyncio.gather(*tasks) + + +class Simulator: + def __init__(self, name: str): + self.name = name + + async def start(self, *tasks, check_events=None, random_wait=None, timeout=60): + context = SimulatorContext(self.name, random_wait) + results = None + + try: + results = await asyncio.wait_for( + asyncio.gather(*[task(context) for task in tasks]), + timeout=timeout, + ) + except asyncio.TimeoutError: + unfired_events = context.unfired_events(check_events) + if len(unfired_events) == 0: + # simulator timed out and all events fired + return results + if check_events: + context._elogger.error(f"Timed out. Unfired Events = {unfired_events}") + raise TestFailure( + f"simulator timed out after {timeout}s. Please check logs at {LOGS_DIR} for more details." + ) + + if check_events: + evts = context.unfired_events(check_events) + if evts: + raise TestFailure(f"Unfired events: {evts}") + + return results + + +def sim_entrypoint(func): + @wraps(func) + async def wrapper(ctx: SimulatorContext, *args, **kwargs): + try: + ctx._elogger.info(f"Started: {func.__name__}") + result = await func(ctx, *args, **kwargs) + ctx._elogger.info(f"Completed: {func.__name__}") + return result + except Exception: + ctx._elogger.error( + f"sim_entrypoint - {func.__name__} - Unhandled exception", + exc_info=True, + ) + raise + + return wrapper + + +def sim_activity( + wait_for: BaseEvent | list[BaseEvent] | None = None, + trigger: BaseEvent | None = None, +): + def decorator(func): + @wraps(func) + async def wrapper(ctx: SimulatorContext, *args, **kwargs): + fsig = f"{func.__name__}({args}, {kwargs})" + + # ! todo: this isn't working + _wait_for = kwargs.get("wait_for", wait_for) + _trigger = kwargs.get("after", trigger) + + if _wait_for: + ctx._elogger.debug(f"Blocked: for={_wait_for} {fsig}") + if isinstance(_wait_for, list): + await asyncio.gather(*[ctx.events.wait_for(e) for e in _wait_for]) + else: + await ctx.events.wait_for(_wait_for) + ctx._elogger.debug(f"Unblocked: {fsig}") + + wait = 0 + if ctx.random_wait: + wait = random.uniform(*ctx.random_wait) + await asyncio.sleep(wait) + + try: + ctx._elogger.info(f"Started: {fsig} time_wait={wait:.3f}s") + start = time.time() + result = await func(ctx, *args, **kwargs) + total = time.time() - start + ctx._elogger.info(f"Completed: {fsig} time_taken={total:.3f}s") + + if _trigger: + ctx.events.trigger(_trigger) + ctx.logger.info(f"Triggering event: {_trigger.name}") + + return result + except Exception as e: + ctx._elogger.error( + f"sim_activity - {fsig} - Unhandled exception", exc_info=True + ) + raise TestFailure(e) + + return wrapper + + return decorator diff --git a/tox.ini b/tox.ini index 58ab2016277..2c410f3e9aa 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,9 @@ [tox] envlist = + dev.k8s.launch.datasite + dev.k8s.launch.gateway + dev.k8s.launch.datasite.highlow + dev.k8s.destroy.datasite.highlow dev.k8s.registry dev.k8s.start dev.k8s.deploy @@ -8,7 +12,7 @@ envlist = dev.k8s.cleanup dev.k8s.destroy dev.k8s.destroyall - hagrid.publish + dev.k8s.install.signoz lint stack.test.integration syft.docs @@ -16,12 +20,19 @@ envlist = syft.publish syft.test.security syft.test.unit + syft.test.scenario + syft.test.scenario.sync syft.test.notebook + syft.test.notebook.scenario + syft.test.notebook.scenario.sync + single_container.launch + single_container.destroy stack.test.notebook - stack.test.integration.enclave.oblv stack.test.integration.k8s - stack.test.vm - stack.test.podman + stack.test.notebook.scenario.k8s + stack.test.notebook.scenario.k8s.sync + stack.test.scenario.k8s + stack.test.scenario.k8s.sync frontend.test.unit frontend.test.e2e frontend.generate.types @@ -33,15 +44,23 @@ envlist = syftcli.test.unit syftcli.publish syftcli.build + seaweedfs.test.unit backend.test.basecpu e2e.test.notebook + migration.prepare + migration.test + migration.k8s.prepare + migration.k8s.test + syft.api.snapshot skipsdist = True [testenv] -basepython = python3 +basepython = {env:TOX_PYTHON:python3} commands = python --version +setenv = + UV_HTTP_TIMEOUT = 600 # Syft [testenv:syft] @@ -65,16 +84,6 @@ allowlist_externals = commands = bash -c 'uv pip list || pip list' -[testenv:hagrid] -deps = - -e{toxinidir}/packages/hagrid[dev] -changedir = {toxinidir}/packages/hagrid -description = Syft -allowlist_externals = - bash -commands = - bash -c 'uv pip list || pip list' - [testenv:syftcli] deps = -e{toxinidir}/packages/syftcli[dev] @@ -94,14 +103,6 @@ commands = python -c 'from shutil import rmtree; rmtree("build", True); rmtree("dist", True)' python -m build . -[testenv:hagrid.publish] -changedir = {toxinidir}/packages/hagrid -description = Build and Publish Hagrid Wheel -deps = - build -commands = - python -c 'from shutil import rmtree; rmtree("build", True); rmtree("dist", True)' - python -m build . [testenv:syftcli.publish] changedir = {toxinidir}/packages/syftcli @@ -115,7 +116,6 @@ commands = python -m build . [testenv:syftcli.build] -basepython = python3 changedir = {toxinidir}/packages/syftcli description = Build SyftCLI Binary for each platform deps = @@ -175,8 +175,6 @@ commands = [testenv:frontend.test.unit] description = Frontend Unit Tests -deps = - {[testenv:hagrid]deps} allowlist_externals = docker bash @@ -193,148 +191,10 @@ commands = pnpm install; \ pnpm run test:unit; \ else \ - docker build --target grid-ui-tests -t ui-test -f frontend.dockerfile .; \ + docker build --target syft-ui-tests -t ui-test -f frontend.dockerfile .; \ docker run -t ui-test; \ fi' -[testenv:frontend.test.e2e] -description = Frontend Unit Tests -deps = - {[testenv:hagrid]deps} -allowlist_externals = - docker - bash - pnpm - sleep -passenv=HOME, USER -changedir = {toxinidir}/packages/grid/frontend -setenv = - HAGRID_FLAGS = {env:HAGRID_FLAGS:--tag=local --test} - ENABLE_SIGNUP=True -commands = - bash ./scripts/check_pnpm.sh - - bash -c "echo Running with HAGRID_FLAGS=$HAGRID_FLAGS; date" - - ; install hagrid - bash -c 'if [[ "$HAGRID_FLAGS" == *"local"* ]]; then \ - uv pip install -e "../../hagrid"; \ - else \ - uv pip install --force hagrid; \ - fi' - - ; fix windows encoding - - chcp 65001 - - ; check docker versions - bash -c "docker --version" - bash -c "docker compose version" - - ; reset volumes and create nodes - bash -c "echo Starting Nodes; date" - bash -c "docker rm -f $(docker ps -a -q) || true" - bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' - - bash -c 'HAGRID_ART=$HAGRID_ART hagrid launch test_domain_1 domain to docker:9081 $HAGRID_FLAGS --enable-signup --no-health-checks --verbose --no-warnings' - - bash -c '(docker logs test-domain-1-frontend-1 -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - bash -c '(docker logs test_domain_1-backend-1 -f &) | grep -q "Application startup complete" || true' - - pnpm install - pnpm dlx playwright@1.36.1 install --with-deps - pnpm test:e2e - - ; shutdown - bash -c "echo Killing Nodes; date" - bash -c 'HAGRID_ART=false hagrid land all --force' - bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' - - -[testenv:stack.test.integration] -description = Integration Tests for Core Stack -deps = - {[testenv:syft]deps} - {[testenv:hagrid]deps} - pytest -changedir = {toxinidir} -allowlist_externals = - docker - grep - sleep - bash - chcp -passenv=HOME, USER, AZURE_BLOB_STORAGE_KEY -setenv = - HAGRID_FLAGS = {env:HAGRID_FLAGS:--tag=local --release=development --dev} - EMULATION = {env:EMULATION:false} - HAGRID_ART = false - PYTHONIOENCODING = utf-8 - PYTEST_MODULES = {env:PYTEST_MODULES:frontend container_workload network} -commands = - bash -c "whoami; id;" - - bash -c "echo Running with HAGRID_FLAGS=$HAGRID_FLAGS EMULATION=$EMULATION PYTEST_MODULES=$PYTEST_MODULES; date" - - ; install syft and hagrid - bash -c 'if [[ "$HAGRID_FLAGS" == *"latest"* ]]; then \ - echo "Installing latest syft and hagrid"; \ - uv pip install --force hagrid syft; \ - elif [[ "$HAGRID_FLAGS" == *"beta"* ]]; then \ - echo "Installing beta syft and hagrid"; \ - uv pip install --force hagrid; \ - uv pip install --force -U --pre syft; \ - else \ - echo "Using local syft and hagrid"; \ - fi' - - ; fix windows encoding - - chcp 65001 - - ; check docker versions - bash -c "docker --version" - bash -c "docker compose version" - - ; reset volumes and create nodes - bash -c "echo Starting Nodes; date" - bash -c 'docker rm -f $(docker ps -a -q --filter "label=orgs.openmined.syft") || true' - bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' - - python -c 'import syft as sy; sy.stage_protocol_changes()' - - ; Make sure that pacakge-cache is owned by the current user - ; instead of docker creating it as root - bash -c 'mkdir -p packages/grid/data/package-cache' - - bash -c 'HAGRID_ART=$HAGRID_ART hagrid launch test-gateway-1 gateway to docker:9081 $HAGRID_FLAGS --no-health-checks --verbose --no-warnings --build' - bash -c 'HAGRID_ART=$HAGRID_ART hagrid launch test-domain-1 domain to docker:9082 $HAGRID_FLAGS --no-health-checks --enable-signup --verbose --no-warnings --build' - ; bash -c 'HAGRID_ART=$HAGRID_ART hagrid launch test-domain-2 domain to docker:9083 --headless $HAGRID_FLAGS --enable-signup --no-health-checks --verbose --no-warnings --build' - - ; wait for nodes to start - docker ps - bash -c "echo Waiting for Nodes; date" - bash -c '(docker logs test-domain-1-frontend-1 -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - bash -c '(docker logs test-domain-1-backend-1 -f &) | grep -q "Application startup complete" || true' - ; bash -c '(docker logs test_domain_2-backend-1 -f &) | grep -q "Application startup complete" || true' - bash -c '(docker logs test-gateway-1-backend-1 -f &) | grep -q "Application startup complete" || true' - - bash -c '\ - PYTEST_MODULES=($PYTEST_MODULES); \ - for i in "${PYTEST_MODULES[@]}"; do \ - echo "Starting test for $i"; date; \ - pytest tests/integration -m $i -vvvv -p no:randomly -p no:benchmark -o log_cli=True --capture=no; \ - return=$?; \ - echo "Finished $i"; \ - date; \ - if [[ $return -ne 0 ]]; then \ - exit $return; \ - fi; \ - done' - - ; shutdown - bash -c "echo Killing Nodes; date" - bash -c 'HAGRID_ART=false hagrid land all --force' - bash -c 'docker rm -f $(docker ps -a -q --filter "label=orgs.openmined.syft") || true' - bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' [testenv:syft.docs] @@ -342,7 +202,6 @@ description = Build Docs for Syft changedir = {toxinidir}/docs deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} -r {toxinidir}/docs/requirements.txt allowlist_externals = make @@ -363,11 +222,16 @@ commands = description = Jupyter Notebook with Editable Syft deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} jupyter jupyterlab +allowlist_externals = + bash commands = - jupyter lab --ip 0.0.0.0 --ServerApp.token={posargs} + bash -c 'if [ -z "{posargs}" ]; then \ + jupyter lab --ip 0.0.0.0; \ + else \ + jupyter lab --ip 0.0.0.0 --ServerApp.token={posargs}; \ + fi' [testenv:syft.protocol.check] description = Syft Protocol Check @@ -385,23 +249,41 @@ commands = python -c "import syft as sy; sy.bump_protocol_version()"; \ fi' +[testenv:syft.api.snapshot] +description = Syft API Snapshot Check +deps = + {[testenv:syft-minimal]deps} +changedir = {toxinidir}/packages/syft +allowlist_externals = + bash +setenv = + SAVE_SNAP = {env:SAVE_SNAP:False} + STABLE_RELEASE = {env:STABLE_RELEASE:False} +commands = + bash -c "echo Using SAVE_SNAP=${SAVE_SNAP}, STABLE_RELEASE=${STABLE_RELEASE}" + python -c 'import syft as sy; sy.show_api_diff()' + bash -c 'if [[ "$SAVE_SNAP" != "False" ]]; then \ + python -c "import syft as sy; sy.take_api_snapshot()"; \ + fi' + + [testenv:syft.test.security] description = Security Checks for Syft changedir = {toxinidir}/packages/syft deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} commands = bandit -r src - # ansible 8.4.0 # restrictedpython 6.2 - safety check -i 60840 -i 54229 -i 54230 -i 42923 -i 54230 -i 54229 -i 62044 -i 65213 + # Temporarily ignore pytorch vulnerability warning here + # https://data.safetycli.com/v/71670/97c + # TODO: Remove `-i 71670` once torch is updated + safety check -i 70612 -i 71670 -i 74882 [testenv:syft.test.unit] description = Syft Unit Tests deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} allowlist_externals = bash uv @@ -412,41 +294,89 @@ commands = bash -c 'ulimit -n 4096 || true' pytest -n auto --dist loadgroup --durations=20 --disable-warnings -[testenv:stack.test.integration.enclave.oblv] -description = Integration Tests for Oblv Enclave +[testenv:syft.test.scenario] +description = BigQuery Scenario Tests on Python Servers (L2) changedir = {toxinidir} +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:python} deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} - oblv-ctl==0.3.1 + pytest-asyncio + pytest-timeout + db-dtypes + google-cloud-bigquery allowlist_externals = - grep bash -passenv=HOME, USER -setenv = - LOCAL_ENCLAVE_PORT=8010 - OBLV_ENABLED=true - OBLV_LOCALHOST_PORT=8010 - ENABLE_SIGNUP=True + pytest commands = - # run at start to kill any process started beforehand - bash -c 'chmod +x scripts/kill_process_in_port.sh && ./scripts/kill_process_in_port.sh $LOCAL_ENCLAVE_PORT' + bash -c "echo Running L2 BigQuery Scenario Tests with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE" + bash -c "pytest -s --disable-warnings tests/scenariosv2/l2_test.py" - bash -c 'rm -rf ~/.syft/syft-enclave' - bash -c 'git clone https://github.com/OpenMined/syft-enclave.git ~/.syft/syft-enclave || true' - bash -c 'cd ~/.syft/syft-enclave && git fetch && git checkout dev && git pull && uv pip install -r requirements_test.txt || true' +[testenv:syft.test.scenario.sync] +description = BigQuery Scenario Tests on Python Servers (L0) +changedir = {toxinidir} +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:python} +deps = + {[testenv:syft]deps} + pytest-asyncio + pytest-timeout + db-dtypes + google-cloud-bigquery +allowlist_externals = + bash + pytest +commands = + bash -c "echo Running L0 BigQuery Scenario Tests with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE" + bash -c "pytest -s --disable-warnings tests/scenariosv2/l0_test.py" - # Starting FastAPI server locally - bash -c 'cd ~/.syft/syft-enclave/src && uvicorn app:app --host 0.0.0.0 --port $LOCAL_ENCLAVE_PORT > /dev/null 2>&1 &' +[testenv:stack.test.scenario.k8s] +description = BigQuery Scenario Tests on K8s (L2) +changedir = {toxinidir} +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} +deps = + {[testenv:syft]deps} + pytest-asyncio + pytest-timeout +allowlist_externals = + bash + just + pytest +commands_pre = + just delete-all start-high deploy-high wait-high +commands = + bash -c "echo Running L2 BigQuery Scenario Tests on K8s with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE" + bash -c "pytest -s --disable-warnings tests/scenariosv2/l2_test.py" +commands_post = + just delete-all - bash -c 'cd tests/integration/external/oblv && pytest -p no:randomly -vvvv' - bash -c 'chmod +x scripts/kill_process_in_port.sh && ./scripts/kill_process_in_port.sh $LOCAL_ENCLAVE_PORT' +[testenv:stack.test.scenario.k8s.sync] +description = BigQuery Scenario Tests on K8s (L0) +changedir = {toxinidir} +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} +deps = + {[testenv:syft]deps} + pytest-asyncio + pytest-timeout +allowlist_externals = + bash + just + pytest +commands_pre = + just delete-all start-high deploy-high wait-high + just start-low deploy-low wait-low +commands = + bash -c "echo Running L0 BigQuery Scenario Tests on K8s with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE" + bash -c "pytest -s --disable-warnings tests/scenariosv2/l0_test.py" +commands_post = + just delete-all [testenv:syft.test.notebook] description = Syft Notebook Tests deps = -e{toxinidir}/packages/syft[dev,data_science] - {[testenv:hagrid]deps} nbmake changedir = {toxinidir}/notebooks allowlist_externals = @@ -456,145 +386,300 @@ setenv = DEV_MODE = {env:DEV_MODE:True} TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:api/0.8,tutorials} ENABLE_SIGNUP={env:ENABLE_SIGNUP:False} + BUMP_PROTOCOL={env:BUMP_PROTOCOL:False} commands = + bash -c 'if [[ $BUMP_PROTOCOL == "True" ]]; then \ + python -c "import syft as sy; sy.bump_protocol_version()"; \ + fi;' bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; ENABLE_SIGNUP=$ENABLE_SIGNUP; date" bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' '); do \ if [[ $subfolder == *tutorials* ]]; then \ - pytest --nbmake "$subfolder" -p no:randomly --ignore=tutorials/model-training -n $(python -c 'import multiprocessing; print(multiprocessing.cpu_count())') -vvvv && \ - pytest --nbmake tutorials/model-training -p no:randomly -vvvv; \ + pytest -x --nbmake "$subfolder" \ + -p no:randomly \ + --ignore=tutorials/model-training \ + --ignore=tutorials/model-auditing \ + --ignore=tutorials/version-upgrades \ + -n $(python -c 'import multiprocessing; print(multiprocessing.cpu_count())') \ + -vvvv && \ + pytest -x --nbmake tutorials/model-training -p no:randomly -vvvv; \ else \ - pytest --nbmake "$subfolder" -p no:randomly -k 'not 11-container-images-k8s.ipynb' -vvvv; \ + pytest -x --nbmake "$subfolder" -p no:randomly -k 'not 11-container-images-k8s.ipynb and not 01-user-log.ipynb' -vvvv; \ fi \ done" - ; pytest --nbmake api/0.8 -p no:randomly -vvvv - ; pytest --nbmake api/0.9 -p no:randomly -vvvv - ; pytest --nbmake tutorials -p no:randomly -vvvv - ; pytest --nbmake tutorials/pandas-cookbook -p no:randomly -vvvv + ; pytest -x --nbmake api/0.8 -p no:randomly -vvvv + ; pytest -x --nbmake api/0.9 -p no:randomly -vvvv + ; pytest -x --nbmake tutorials -p no:randomly -vvvv + ; pytest -x --nbmake tutorials/pandas-cookbook -p no:randomly -vvvv - -[testenv:stack.test.notebook] -description = Stack Notebook Tests +# This is testing BQ without syncing and with in-memory python +[testenv:syft.test.notebook.scenario] +description = Syft Notebook Scenario Tests deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} nbmake + db-dtypes + google-cloud-bigquery + aiosmtpd changedir = {toxinidir}/notebooks allowlist_externals = bash setenv = - ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:single_container} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:python} DEV_MODE = {env:DEV_MODE:True} - TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:api/0.8} - ENABLE_SIGNUP=True + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:scenarios/bigquery} + TEST_query_limit_size={env:test_query_limit_size:500000} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + NUM_TEST_USERS = {env:NUM_TEST_USERS:5} + NUM_TEST_JOBS = {env:NUM_TEST_JOBS:10} commands = + python --version + bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; date" + bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' ');\ + do \ + pytest -s -x --nbmake --nbmake-timeout=1000 "$subfolder" --ignore=scenarios/bigquery/sync --ignore=scenarios/bigquery/upgradability -p no:randomly -vvvv --log-cli-level=DEBUG --capture=no;\ + done" - # Volume cleanup - bash -c 'hagrid land all --force || true' - bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' +# This is testing BQ with syncing and with in-memory python +[testenv:syft.test.notebook.scenario.sync] +description = Syft Notebook Scenario Tests +deps = + {[testenv:syft]deps} + nbmake + db-dtypes + google-cloud-bigquery + aiosmtpd +changedir = {toxinidir}/notebooks +allowlist_externals = + bash +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:python} + DEV_MODE = {env:DEV_MODE:True} + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:scenarios/bigquery/sync} + TEST_BIGQUERY_APIS_LIVE = {env:TEST_BIGQUERY_APIS_LIVE:false} + TEST_query_limit_size={env:test_query_limit_size:500000} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} +commands = bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; date" bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' ');\ do \ - pytest --nbmake "$subfolder" -p no:randomly -vvvv -k 'not 11-container-images-k8s.ipynb' --nbmake-timeout=1000;\ + pytest -s -x --nbmake --nbmake-timeout=1000 "$subfolder" -p no:randomly -vvvv;\ done" - ; pytest --nbmake api/0.8 -p no:randomly -vvvv - ; pytest --nbmake api/0.9 -p no:randomly -vvvv - ; pytest --nbmake tutorials -p no:randomly -vvvv - ; pytest --nbmake tutorials/pandas-cookbook -p no:randomly -vvvv - bash -c 'hagrid land all --force' - bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' +# This is testing BQ without syncing over k8s +[testenv:stack.test.notebook.scenario.k8s] +description = Scenario Notebook Tests for Core Stack using K8s +deps = + {[testenv:syft]deps} + nbmake + db-dtypes + google-cloud-bigquery + aiosmtpd +changedir = {toxinidir} +passenv=HOME, USER +allowlist_externals = + devspace + kubectl + grep + sleep + bash + k3d + echo + tox +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + DEVSPACE_PROFILE = bigquery-scenario-tests + GITHUB_CI = {env:GITHUB_CI:false} + SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:bigquery-high} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + TEST_EXTERNAL_REGISTRY = {env:TEST_EXTERNAL_REGISTRY:k3d-registry.localhost:5800} + TEST_QUERY_LIMIT_SIZE={env:TEST_QUERY_LIMIT_SIZE:500000} + TRACING={env:TRACING:False} + NUM_TEST_USERS = {env:NUM_TEST_USERS:5} + NUM_TEST_JOBS = {env:NUM_TEST_JOBS:10} + NUM_TEST_WORKERS = {env:NUM_TEST_WORKERS:2} +commands = + bash -c "python --version || true" + bash -c "echo Running with GITHUB_CI=$GITHUB_CI; date" + bash -c "echo Running with TEST_EXTERNAL_REGISTRY=$TEST_EXTERNAL_REGISTRY; date" + python -c 'import syft as sy; sy.stage_protocol_changes()' + k3d version + + # Deleting Old Cluster + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" + + # Deleting registry & volumes + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" -[testenv:stack.test.vm] -description = Stack VM Tests + # Create registry + tox -e dev.k8s.registry + + + # Creating bigquery-high cluster on port SERVER_PORT + bash -c '\ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} CLUSTER_HTTP_PORT=${SERVER_PORT} && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.deploy' + + ; # free up build cache after build of images + ; bash -c 'if [[ "$GITHUB_CI" != "false" ]]; then \ + ; docker image prune --all --force; \ + ; docker builder prune --all --force; \ + ; fi' + + ; sleep 30 + + + # wait for bigquery-high + bash packages/grid/scripts/wait_for.sh service postgres --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' + + # Checking logs generated & startup of bigquery-high + bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' + + bash -c "pytest -s -x --nbmake notebooks/scenarios/bigquery -p no:randomly --ignore=notebooks/scenarios/bigquery/sync --ignore=notebooks/scenarios/bigquery/upgradability -vvvv --nbmake-timeout=1000 --log-cli-level=DEBUG --capture=no;" + + # deleting clusters created + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" + + +# This is testing BQ with syncing over k8s +[testenv:stack.test.notebook.scenario.k8s.sync] +description = Syft Notebook Scenario Tests over k8s deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} nbmake + db-dtypes + google-cloud-bigquery + aiosmtpd +changedir = {toxinidir}/notebooks allowlist_externals = - cd - vagrant bash -changedir = {toxinidir} + devspace setenv = - ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:vm} - VAGRANT_DESTROY = {env:VAGRANT_DESTROY:skip} + DEV_MODE = {env:DEV_MODE:True} + DEVSPACE_PROFILE = bigquery-scenario-tests + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:scenarios/bigquery/sync} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + CLUSTER_NAME_HIGH = {env:CLUSTER_NAME_HIGH:bigquery-high} + CLUSTER_NAME_LOW = {env:CLUSTER_NAME_LOW:bigquery-low} + CLUSTER_HTTP_PORT_HIGH={env:CLUSTER_HTTP_PORT_HIGH:9081} + CLUSTER_HTTP_PORT_LOW={env:CLUSTER_HTTP_PORT_LOW:9083} + SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} + TEST_EXTERNAL_REGISTRY = {env:TEST_EXTERNAL_REGISTRY:k3d-registry.localhost:5800} commands = - bash -c 'if [[ "$(uname -m)" == *"arm"* ]]; then \ - export VAGRANT_BOX="ubuntu-22-04-arm64"; \ - elif [[ "$(uname -m)" == *"x86"* ]]; then \ - export VAGRANT_BOX="ubuntu-22-04-x86"; \ - else \ - echo "Unsupported architecture."; \ - fi; \ - echo $VAGRANT_BOX; \ - cd packages/grid; \ - if [[ "$VAGRANT_DESTROY" == *"true"* ]]; then \ - vagrant destroy $VAGRANT_BOX --force || true; \ - else \ - vagrant ssh $VAGRANT_BOX -c "docker ps -aq | xargs -I {:} docker rm {:} --force"; \ - vagrant ssh $VAGRANT_BOX -c "docker volume prune --filter all=1 --force || true"; \ - fi; \ - vagrant up $VAGRANT_BOX --provision; \ - ' + bash -c "echo Running highlow with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; date" + bash -c 'tox -e dev.k8s.destroy.datasite.highlow' + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${CLUSTER_NAME_HIGH}-images --force || true" + bash -c "docker volume rm k3d-${CLUSTER_NAME_LOW}-images --force || true" - pytest --nbmake notebooks/api/0.8 -p no:randomly -vvvv - ; pytest --nbmake notebooks/api/0.9 -p no:randomly -vvvv + # Now create everything + bash -c 'tox -e dev.k8s.launch.datasite.highlow' - bash -c 'if [[ "$(uname -m)" == *"arm"* ]]; then \ - export VAGRANT_BOX="ubuntu-22-04-arm64"; \ - elif [[ "$(uname -m)" == *"x86"* ]]; then \ - export VAGRANT_BOX="ubuntu-22-04-x86"; \ - else \ - echo "Unsupported architecture."; \ - fi; \ - echo $VAGRANT_BOX; \ - cd packages/grid; \ - if [[ "$VAGRANT_DESTROY" == *"true"* ]]; then \ - vagrant destroy $VAGRANT_BOX --force || true; \ - fi; \ - ' + bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' ');\ + do \ + pytest -x --nbmake --nbmake-timeout=1000 "$subfolder" -p no:randomly -vvvv;\ + done" -[testenv:stack.test.podman] -description = Stack podman Tests for Rhel & Centos + # Clean up again + bash -c 'tox -e dev.k8s.destroy.datasite.highlow' + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${CLUSTER_NAME_HIGH}-images --force || true" + bash -c "docker volume rm k3d-${CLUSTER_NAME_LOW}-images --force || true" + + +[testenv:single_container.launch] +description = Launch a single backend container using the dockerfile +changedir = {toxinidir}/packages +setenv = + N_CONSUMERS = {env:N_CONSUMERS:1} + SERVER_NAME = {env:SERVER_NAME:test_datasite_sc} + SERVER_TYPE = {env:SERVER_TYPE:datasite} + SERVER_PORT = {env:SERVER_PORT:8080} +allowlist_externals = + bash +commands = + bash -c 'tox -e single_container.destroy' + bash -c 'docker build -f grid/backend/backend.dockerfile . -t openmined/syft-backend:local-dev' + bash -c 'docker run -d \ + -e SERVER_NAME=${SERVER_NAME} \ + -e SERVER_TYPE=${SERVER_TYPE} \ + -e N_CONSUMERS=${N_CONSUMERS} \ + -e SINGLE_CONTAINER_MODE=true \ + -e CREATE_PRODUCER=true \ + -e INMEMORY_WORKERS=true \ + -p ${SERVER_PORT}:80 --add-host=host.docker.internal:host-gateway \ + --name ${SERVER_NAME} openmined/syft-backend:local-dev' + +[testenv:single_container.destroy] +description = Destroy the single backend container run using single_container.launch +changedir = {toxinidir}/packages +setenv = + SERVER_NAME = {env:SERVER_NAME:test_datasite_sc} +allowlist_externals = + bash +commands = + # Image is not cleaned up + bash -c 'docker stop ${SERVER_NAME} || true' + bash -c 'docker rm ${SERVER_NAME} || true' + +[testenv:stack.test.notebook] +description = Stack Notebook Tests deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} nbmake +changedir = {toxinidir}/notebooks allowlist_externals = - cd - vagrant bash -changedir = {toxinidir} setenv = - ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:podman} - NODE_PORT = {env:NODE_PORT:8080} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + DEV_MODE = {env:DEV_MODE:True} + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:api/0.8} + ENABLE_SIGNUP=True + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} commands = - python -c 'import syft as sy; sy.stage_protocol_changes()' - bash -c "podman pod rm --force --all || true"; - bash -c "podman system prune --volumes --force || true"; - bash -c "podman volume rm $(podman volume ls -q)||true"; - # Force Removal of images - bash -c "podman image prune --all --force || true"; + # Volume cleanup + bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' + bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=com.docker.volume.anonymous") || true' + bash -c 'docker network rm -f $(docker network ls -q --filter "label=orgs.openmined.syft") || true' + + bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; date" + bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' ');\ + do \ + pytest -x --nbmake --nbmake-timeout=1000 "$subfolder" -p no:randomly -vvvv -k 'not 11-container-images-k8s.ipynb and not 12-custom-api-endpoint.ipynb';\ + done" + + ; pytest -x --nbmake --nbmake-timeout=1000 api/0.8 -p no:randomly -vvvv + ; pytest -x --nbmake --nbmake-timeout=1000 api/0.9 -p no:randomly -vvvv + ; pytest -x --nbmake --nbmake-timeout=1000 tutorials -p no:randomly -vvvv + ; pytest -x --nbmake --nbmake-timeout=1000 tutorials/pandas-cookbook -p no:randomly -vvvv - # Build Backend Image - bash -c "SYFT_VERSION=$(python packages/grid/VERSION) && podman build -t docker.io/openmined/grid-backend:$SYFT_VERSION -f packages/grid/backend/backend.dockerfile --target backend packages"; + bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=orgs.openmined.syft") || true' + bash -c 'docker volume rm -f $(docker volume ls -q --filter "label=com.docker.volume.anonymous") || true' + bash -c 'docker network rm -f $(docker network ls -q --filter "label=orgs.openmined.syft") || true' - # Build Frontend Image - bash -c "SYFT_VERSION=$(python packages/grid/VERSION) && podman build -t docker.io/openmined/grid-frontend:$SYFT_VERSION -f packages/grid/frontend/frontend.dockerfile --target grid-ui-development packages/grid/frontend"; - bash -c 'cd packages/grid/podman/podman-kube && podman play kube podman-syft-kube.yaml --configmap=podman-syft-kube-config.yaml' - bash -c '(podman logs -f syft-backend &) | grep -q "Application startup complete" || true' - pytest --nbmake notebooks/api/0.8 -p no:randomly -vvvv [testenv:frontend.generate.types] description = Generate Types for Frontend deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} allowlist_externals = cd bash @@ -610,22 +695,150 @@ commands = bash -c 'rm -rf ./schema' bash -c 'rm -rf ./src/types/generated' - ; generate new ones - bash -c 'python3 -c "import syft as sy;sy.util.schema.generate_json_schemas()"' - bash -c "json2ts -i './schema/**/*.json' -o ./src/types/generated" - bash -c "python3 ./scripts/replace_imports.py ./src/types/generated" + ; generate new ones + bash -c 'python3 -c "import syft as sy;sy.util.schema.generate_json_schemas()"' + bash -c "json2ts -i './schema/**/*.json' -o ./src/types/generated" + bash -c "python3 ./scripts/replace_imports.py ./src/types/generated" + +[mypy] +python_version = 3.12 +disable_error_code = attr-defined, valid-type, no-untyped-call, arg-type + +[testenv:syft.test.integration] +description = Integration Tests for Syft Stack +deps = + {[testenv:syft]deps} + pytest-asyncio +changedir = {toxinidir} +passenv=HOME, USER +allowlist_externals = + bash +setenv = + PYTEST_MODULES = {env:PYTEST_MODULES:asyncio local_server} + ASSOCIATION_REQUEST_AUTO_APPROVAL = {env:ASSOCIATION_REQUEST_AUTO_APPROVAL:true} + PYTEST_FLAGS = {env:PYTEST_FLAGS:--ignore=tests/integration/local/job_test.py} +commands = + python -c 'import syft as sy; sy.stage_protocol_changes()' + + # Run Integration Tests + bash -c '\ + PYTEST_MODULES=($PYTEST_MODULES); \ + for i in "${PYTEST_MODULES[@]}"; do \ + echo "Starting test for $i"; date; \ + pytest tests/integration -m $i -vvvv -p no:randomly -p no:benchmark -o log_cli=True --capture=no $PYTEST_FLAGS; \ + return=$?; \ + echo "Finished $i"; \ + date; \ + if [[ $return -ne 0 ]]; then \ + exit $return; \ + fi; \ + done' + +[testenv:stack.test.integration.k8s] +description = Integration Tests for Core Stack using K8s +deps = + {[testenv:syft]deps} + pytest-asyncio +changedir = {toxinidir} +passenv=HOME, USER, AZURE_BLOB_STORAGE_KEY +allowlist_externals = + devspace + kubectl + grep + sleep + bash + kubectx + k3d + echo + tox +setenv = + SERVER_PORT = {env:SERVER_PORT:9082} + GITHUB_CI = {env:GITHUB_CI:false} + PYTEST_MODULES = {env:PYTEST_MODULES:frontend network container_workload} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:test-datasite-1} + GATEWAY_CLUSTER_NAME = {env:GATEWAY_CLUSTER_NAME:test-gateway-1} + ASSOCIATION_REQUEST_AUTO_APPROVAL = {env:ASSOCIATION_REQUEST_AUTO_APPROVAL:true} + SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} +commands = + bash -c "echo Running with GITHUB_CI=$GITHUB_CI; date" + python -c 'import syft as sy; sy.stage_protocol_changes()' + k3d version + + # Deleting Old Cluster + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" + bash -c "k3d cluster delete ${GATEWAY_CLUSTER_NAME} || true" + + # Deleting registry & volumes + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${GATEWAY_CLUSTER_NAME}-images --force || true" + + # Create registry + tox -e dev.k8s.registry + + # Creating test-gateway-1 cluster on port 9081 + bash -c '\ + export CLUSTER_NAME=${GATEWAY_CLUSTER_NAME} CLUSTER_HTTP_PORT=9081 DEVSPACE_PROFILE=gateway && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.deploy' + + # Creating test-datasite-1 cluster on port 9082 + bash -c '\ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} CLUSTER_HTTP_PORT=9082 DEVSPACE_PROFILE=datasite-tunnel && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.deploy' + + # free up build cache after build of images + bash -c 'if [[ "$GITHUB_CI" != "false" ]]; then \ + docker image prune --all --force; \ + docker builder prune --all --force; \ + fi' + + sleep 30 + + # wait for test gateway 1 + bash packages/grid/scripts/wait_for.sh service postgres --context k3d-{env:GATEWAY_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:GATEWAY_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:GATEWAY_CLUSTER_NAME} --namespace syft + + # wait for test datasite 1 + bash packages/grid/scripts/wait_for.sh service postgres --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' + + # Checking logs generated & startup of test-datasite 1 + bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' + # Checking logs generated & startup of testgateway1 + bash -c '(kubectl logs service/backend --context k3d-${GATEWAY_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' -[mypy] -python_version = 3.12 -disable_error_code = attr-defined, valid-type, no-untyped-call, arg-type + # Run Integration Tests + bash -c '\ + PYTEST_MODULES=($PYTEST_MODULES); \ + for i in "${PYTEST_MODULES[@]}"; do \ + echo "Starting test for $i"; date; \ + pytest tests/integration -m $i -vvvv -p no:randomly -p no:benchmark -o log_cli=True --capture=no; \ + return=$?; \ + echo "Finished $i"; \ + date; \ + if [[ $return -ne 0 ]]; then \ + exit $return; \ + fi; \ + done' + # deleting clusters created + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c "CLUSTER_NAME=${GATEWAY_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${GATEWAY_CLUSTER_NAME}-images --force || true" -[testenv:stack.test.integration.k8s] -description = Integration Tests for Core Stack -basepython = python3 +[testenv:stack.test.notebook.k8s] +description = Notebook Tests for Core Stack using K8s deps = {[testenv:syft]deps} - {[testenv:hagrid]deps} nbmake changedir = {toxinidir} passenv=HOME, USER @@ -635,45 +848,35 @@ allowlist_externals = grep sleep bash - kubectx k3d echo tox setenv = - ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:k8s} - NODE_PORT = {env:NODE_PORT:9082} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} GITHUB_CI = {env:GITHUB_CI:false} - PYTEST_MODULES = {env:PYTEST_MODULES:frontend container_workload} SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:test-datasite-1} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} commands = bash -c "echo Running with GITHUB_CI=$GITHUB_CI; date" python -c 'import syft as sy; sy.stage_protocol_changes()' k3d version - # Since cluster name cannot have underscore and environment variable cannot have hyphen - # we are passing a grouped name for node names - # bash -c "docker rm $(docker ps -aq) --force || true" - # Deleting current cluster - bash -c "k3d cluster delete testgateway1 || true" - bash -c "k3d cluster delete testdomain1 || true" + # Deleting Old Cluster + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" # Deleting registry & volumes bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker volume rm k3d-testgateway1-images --force || true" - bash -c "docker volume rm k3d-testdomain1-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" # Create registry tox -e dev.k8s.registry - # Creating testgateway1 cluster on port 9081 - bash -c '\ - export CLUSTER_NAME=testgateway1 CLUSTER_HTTP_PORT=9081 DEVSPACE_PROFILE=gateway && \ - tox -e dev.k8s.start && \ - tox -e dev.k8s.deploy' - # Creating testdomain1 cluster on port 9082 + # Creating test-datasite-1 cluster on port SERVER_PORT bash -c '\ - export CLUSTER_NAME=testdomain1 CLUSTER_HTTP_PORT=9082 && \ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} CLUSTER_HTTP_PORT=${SERVER_PORT} && \ tox -e dev.k8s.start && \ tox -e dev.k8s.deploy' @@ -685,69 +888,23 @@ commands = sleep 30 - # wait for front end - bash packages/grid/scripts/wait_for.sh service frontend --context k3d-testdomain1 --namespace syft - bash -c '(kubectl logs service/frontend --context k3d-testdomain1 --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - - # wait for test gateway 1 - bash packages/grid/scripts/wait_for.sh service mongo --context k3d-testgateway1 --namespace syft - bash packages/grid/scripts/wait_for.sh service backend --context k3d-testgateway1 --namespace syft - bash packages/grid/scripts/wait_for.sh service proxy --context k3d-testgateway1 --namespace syft - - # wait for test domain 1 - bash packages/grid/scripts/wait_for.sh service mongo --context k3d-testdomain1 --namespace syft - bash packages/grid/scripts/wait_for.sh service backend --context k3d-testdomain1 --namespace syft - bash packages/grid/scripts/wait_for.sh service proxy --context k3d-testdomain1 --namespace syft - bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-testdomain1 --namespace syft - - # Checking logs generated & startup of test-domain 1 - bash -c '(kubectl logs service/backend --context k3d-testdomain1 --namespace syft -f &) | grep -q "Application startup complete" || true' - # Checking logs generated & startup of testgateway1 - bash -c '(kubectl logs service/backend --context k3d-testgateway1 --namespace syft -f &) | grep -q "Application startup complete" || true' - - # frontend - bash -c 'if [[ "$PYTEST_MODULES" == *"frontend"* ]]; then \ - echo "Starting frontend"; date; \ - pytest tests/integration -m frontend -p no:randomly -k "test_serves_domain_frontend" --co; \ - pytest tests/integration -m frontend -vvvv -p no:randomly -p no:benchmark -o log_cli=True --capture=no -k "test_serves_domain_frontend"; \ - return=$?; \ - echo "Finished frontend"; date; \ - exit $return; \ - fi' + # wait for test-datasite-1 + bash packages/grid/scripts/wait_for.sh service postgres --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - # Integration + Gateway Connection Tests - # Gateway tests are not run in kuberetes, as currently,it does not have a way to configure - # high/low side warning flag. - bash -c "source ./scripts/get_k8s_secret_ci.sh; \ - pytest tests/integration/network -k 'not test_domain_gateway_user_code' -p no:randomly -vvvv" - - # Veilid Integration tests - bash -c "source ./scripts/get_k8s_secret_ci.sh; \ - pytest tests/integration/veilid -p no:randomly -vvvv" - - # Shutting down the gateway cluster to free up space, as the - # below code does not require gateway cluster - bash -c "CLUSTER_NAME=testgateway1 tox -e dev.k8s.destroy || true" - bash -c "docker volume rm k3d-testgateway1-images --force || true" - - ; container workload - ; bash -c 'if [[ "$PYTEST_MODULES" == *"container_workload"* ]]; then \ - ; echo "Starting Container Workload test"; date; \ - ; pytest tests/integration -m container_workload -p no:randomly --co; \ - ; pytest tests/integration -m container_workload -vvvv -p no:randomly -p no:benchmark -o log_cli=True --capture=no; \ - ; return=$?; \ - ; echo "Finished container workload"; date; \ - ; exit $return; \ - ; fi' + # Checking logs generated & startup of test-datasite 1 + bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' - bash -c "source ./scripts/get_k8s_secret_ci.sh; \ - pytest --nbmake notebooks/api/0.8 -p no:randomly -k 'not 10-container-images.ipynb' -vvvv --nbmake-timeout=1000" + bash -c "pytest -x --nbmake notebooks/api/0.8 -p no:randomly -k 'not 14-container-images.ipynb' -vvvv --nbmake-timeout=1000" # deleting clusters created - bash -c "CLUSTER_NAME=testdomain1 tox -e dev.k8s.destroy || true" + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker rm $(docker ps -aq) --force || true" - bash -c "docker volume rm k3d-testdomain1-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" [testenv:syft.build.helm] @@ -795,6 +952,51 @@ commands = bash -c 'cd packages/grid/helm/repo && \ helm repo index . --url https://openmined.github.io/PySyft/helm' + +[testenv:dev.k8s.ready] +description = Check readiness of k8s deployement +changedir = {toxinidir}/packages/grid +allowlist_externals = + bash + tox + curl +setenv = + CLUSTER_NAME = {env:CLUSTER_NAME:syft} + CLUSTER_HTTP_PORT = {env:SERVER_PORT:8080} +; Usage for posargs: names of the relevant services among {frontend backend proxy postgres seaweedfs registry} +commands = + bash -c "env; date; k3d version" + + # Frontend + bash -c "if echo '{posargs}' | grep -q 'frontend'; then \ + echo 'Checking readiness of frontend'; \ + ./scripts/wait_for.sh service frontend --context k3d-$CLUSTER_NAME --namespace syft && \ + (kubectl logs service/frontend --context k3d-$CLUSTER_NAME --namespace syft -f &) | grep -q -E 'Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/' || true; \ + fi" + + # Backend + bash -c "if echo '{posargs}' | grep -q 'backend'; then \ + echo 'Checking readiness of backend'; \ + ./scripts/wait_for.sh service backend --context k3d-$CLUSTER_NAME --namespace syft && \ + (kubectl logs service/backend --context k3d-$CLUSTER_NAME --namespace syft -f &) | grep -q 'Application startup complete' || true; \ + fi" + + # Postgres + bash -c "if echo '{posargs}' | grep -q 'postgres'; then echo 'Checking readiness of Postgres'; ./scripts/wait_for.sh service postgres --context k3d-$CLUSTER_NAME --namespace syft; fi" + + # Proxy + bash -c "if echo '{posargs}' | grep -q 'proxy'; then echo 'Checking readiness of proxy'; ./scripts/wait_for.sh service proxy --context k3d-$CLUSTER_NAME --namespace syft; fi" + + # Seaweedfs + bash -c "if echo '{posargs}' | grep -q 'seaweedfs'; then echo 'Checking readiness of SeaweedFS'; ./scripts/wait_for.sh service seaweedfs --context k3d-$CLUSTER_NAME --namespace syft; fi" + + # Registry + bash -c "if echo '{posargs}' | grep -q 'registry'; then echo 'Checking readiness of Registry'; ./scripts/wait_for.sh service registry --context k3d-$CLUSTER_NAME --namespace syft; fi" + + # Extra + bash -c "curl http://localhost:${CLUSTER_HTTP_PORT}/api/v2/metadata" + + [testenv:syft.test.helm] description = Test Helm Chart for Kubernetes changedir = {toxinidir}/packages/grid @@ -803,15 +1005,16 @@ allowlist_externals = bash tox setenv = - ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:k8s} - NODE_PORT = {env:NODE_PORT:8080} - NODE_URL = {env:NODE_URL:http://localhost} - EXCLUDE_NOTEBOOKS = {env:EXCLUDE_NOTEBOOKS:not 10-container-images.ipynb} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + SERVER_PORT = {env:SERVER_PORT:8080} + SERVER_URL = {env:SERVER_URL:http://localhost} + EXCLUDE_NOTEBOOKS = {env:EXCLUDE_NOTEBOOKS:not 14-container-images.ipynb} SYFT_VERSION = {env:SYFT_VERSION:local} EXTERNAL_REGISTRY = {env:EXTERNAL_REGISTRY:k3d-registry.localhost:5800} ; env vars for dev.k8s.start - CLUSTER_NAME = testdomain - CLUSTER_HTTP_PORT = {env:NODE_PORT:8080} + CLUSTER_NAME = testdatasite + CLUSTER_HTTP_PORT = {env:SERVER_PORT:8080} +; Usage for posargs: if you pass override to this tox command, then resourcesPreset will be overridden commands = bash -c "env; date; k3d version" @@ -821,21 +1024,26 @@ commands = bash -c 'if [[ $SYFT_VERSION == "local" ]]; then \ echo "Installing local helm charts"; \ - helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/values.dev.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + if [[ "{posargs}" == "override" ]]; then \ + echo "Overriding resourcesPreset"; \ + helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace --set server.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set postgres.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null; \ + else \ + helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + fi \ else \ echo "Installing helm charts from repo for syft version: ${SYFT_VERSION}"; \ helm repo add openmined https://openmined.github.io/PySyft/helm; \ helm repo update openmined; \ - helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/values.dev.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + if [[ "{posargs}" == "override" ]]; then \ + echo "Overriding resourcesPreset"; \ + helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace --set server.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set postgres.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null; \ + else \ + helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + fi \ fi' ; wait for everything else to be loaded - bash -c './scripts/wait_for.sh service frontend --context k3d-$CLUSTER_NAME --namespace syft' - bash -c '(kubectl logs service/frontend --context k3d-$CLUSTER_NAME --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - bash -c './scripts/wait_for.sh service mongo --context k3d-$CLUSTER_NAME --namespace syft' - bash -c './scripts/wait_for.sh service backend --context k3d-$CLUSTER_NAME --namespace syft' - bash -c './scripts/wait_for.sh service proxy --context k3d-$CLUSTER_NAME --namespace syft' - bash -c '(kubectl logs service/backend --context k3d-$CLUSTER_NAME --namespace syft -f &) | grep -q "Application startup complete" || true' + tox -e dev.k8s.ready -- frontend backend postgres proxy seaweedfs registry # Run Notebook tests tox -e e2e.test.notebook @@ -876,7 +1084,8 @@ commands = bash -c 'k3d --version' ; create registry - bash -c 'k3d registry create registry.localhost --port 5800 -v $HOME/.k3d-registry:/var/lib/registry || true' + bash -c 'docker volume create k3d-registry-vol || true' + bash -c 'k3d registry create registry.localhost --port 5800 -v k3d-registry-vol:/var/lib/registry --no-help || true' ; add patches to host bash -c 'if ! grep -q k3d-registry.localhost /etc/hosts; then sudo {envpython} scripts/patch_hosts.py --add-k3d-registry --fix-docker-hosts; fi' @@ -888,7 +1097,7 @@ commands = description = Patch CoreDNS to resolve k3d-registry.localhost changedir = {toxinidir} passenv=HOME,USER,CLUSTER_NAME -setenv = +setenv= CLUSTER_NAME = {env:CLUSTER_NAME:syft-dev} allowlist_externals = bash @@ -899,6 +1108,27 @@ commands = ; restarts coredns bash -c 'kubectl delete pod -n kube-system -l k8s-app=kube-dns --context k3d-${CLUSTER_NAME}' + +[testenv:dev.k8s.add.collector] +description = Install signoz/k8s-infra on Kubernetes cluster +changedir = {toxinidir} +passenv=HOME,USER,CLUSTER_NAME +setenv = + SIGNOZ_HOST=host.k3d.internal + CLUSTER_NAME = {env:CLUSTER_NAME:syft-dev} +allowlist_externals = + helm +commands = + helm install k8s-infra k8s-infra \ + --repo https://charts.signoz.io \ + --kube-context k3d-{env:CLUSTER_NAME} \ + --set global.deploymentEnvironment=local \ + --set clusterName={env:CLUSTER_NAME} \ + --set otelCollectorEndpoint=http://{env:SIGNOZ_HOST}:4317 \ + --set otelInsecure=true \ + --set presets.otlpExporter.enabled=true \ + --set presets.loggingExporter.enabled=true + [testenv:dev.k8s.start] description = Start local Kubernetes registry & cluster with k3d changedir = {toxinidir} @@ -906,6 +1136,7 @@ passenv = HOME, USER setenv = CLUSTER_NAME = {env:CLUSTER_NAME:syft-dev} CLUSTER_HTTP_PORT = {env:CLUSTER_HTTP_PORT:8080} + # SIGNOZ_PORT = {env:SIGNOZ_PORT:3301} allowlist_externals = bash sleep @@ -915,12 +1146,17 @@ commands = tox -e dev.k8s.registry ; for NodePort to work add the following --> -p "NodePort:NodePort@loadbalancer" - bash -c 'k3d cluster create ${CLUSTER_NAME} -p "${CLUSTER_HTTP_PORT}:80@loadbalancer" --registry-use k3d-registry.localhost:5800 && \ - kubectl --context k3d-${CLUSTER_NAME} create namespace syft || true' + bash -c 'k3d cluster create ${CLUSTER_NAME} \ + -p "${CLUSTER_HTTP_PORT}:80@loadbalancer" \ + --registry-use k3d-registry.localhost:5800 {posargs} && \ + kubectl --context k3d-${CLUSTER_NAME} create namespace syft || true' ; patch coredns tox -e dev.k8s.patch.coredns + ; add signoz/collector + tox -e dev.k8s.add.collector + ; dump cluster info tox -e dev.k8s.info @@ -930,14 +1166,22 @@ changedir = {toxinidir}/packages/grid passenv = HOME, USER, DEVSPACE_PROFILE setenv= CLUSTER_NAME = {env:CLUSTER_NAME:syft-dev} + TRACING = {env:TRACING:False} allowlist_externals = bash commands = ; deploy syft helm charts + bash -c 'echo "profile=$DEVSPACE_PROFILE"' + bash -c "echo Running with TRACING=$TRACING; date" bash -c '\ if [[ -n "${DEVSPACE_PROFILE}" ]]; then export DEVSPACE_PROFILE="-p ${DEVSPACE_PROFILE}"; fi && \ + if [[ "${TRACING}" == "True" ]]; then DEVSPACE_PROFILE="${DEVSPACE_PROFILE} -p tracing"; fi && \ + if [[ "${TRACING}" == "True" ]]; then echo "TRACING PROFILE ENABLED"; fi && \ devspace deploy -b --kube-context k3d-${CLUSTER_NAME} --no-warn ${DEVSPACE_PROFILE} --namespace syft --var CONTAINER_REGISTRY=k3d-registry.localhost:5800' + # if TRACING is enabled start signoz + ; bash -c 'if [[ "${TRACING}" == "True" ]]; then tox -e dev.k8s.install.signoz; fi' + [testenv:dev.k8s.hotreload] description = Start development with hot-reload in Kubernetes changedir = {toxinidir}/packages/grid @@ -978,6 +1222,122 @@ commands = bash -c 'devspace cleanup images --kube-context k3d-${CLUSTER_NAME} --no-warn --namespace syft --var CONTAINER_REGISTRY=k3d-registry.localhost:5800 || true' bash -c 'kubectl --context k3d-${CLUSTER_NAME} delete namespace syft --now=true || true' +[testenv:dev.k8s.render] +description = Dump devspace rendered chargs for debugging. Save in `packages/grid/out.render` +changedir = {toxinidir}/packages/grid +passenv = HOME, USER, DEVSPACE_PROFILE +setenv= + OUTPUT_DIR = {env:OUTPUT_DIR:./.devspace/rendered} +allowlist_externals = + bash +commands = + bash -c '\ + if [[ -n "${DEVSPACE_PROFILE}" ]]; then export DEVSPACE_PROFILE="-p ${DEVSPACE_PROFILE}"; fi && \ + rm -rf ${OUTPUT_DIR} && \ + mkdir -p ${OUTPUT_DIR} && \ + echo "profile: $DEVSPACE_PROFILE" && \ + devspace print ${DEVSPACE_PROFILE} > ${OUTPUT_DIR}/config.txt && \ + devspace deploy --render --skip-build --no-warn ${DEVSPACE_PROFILE} --namespace syft --var CONTAINER_REGISTRY=k3d-registry.localhost:5800 > ${OUTPUT_DIR}/chart.yaml' + +[testenv:dev.k8s.launch.gateway] +description = Launch a single gateway on K8s +passenv = HOME, USER +setenv= + CLUSTER_NAME = {env:CLUSTER_NAME:test-gateway-1} + CLUSTER_HTTP_PORT={env:CLUSTER_HTTP_PORT:9081} + DEVSPACE_PROFILE=gateway +allowlist_externals = + tox +commands = + tox -e dev.k8s.start + tox -e dev.k8s.{posargs:deploy} + + +[testenv:dev.k8s.launch.datasite.highlow] +description = Launch a high and a low side datasite on K8s +passenv = HOME, USER, DEVSPACE_PROFILE +setenv= + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + CLUSTER_NAME_HIGH = {env:CLUSTER_NAME_HIGH:test-datasite-high-1} + CLUSTER_NAME_LOW = {env:CLUSTER_NAME_LOW:test-datasite-low-1} + CLUSTER_HTTP_PORT_HIGH={env:CLUSTER_HTTP_PORT_HIGH:9081} + CLUSTER_HTTP_PORT_LOW={env:CLUSTER_HTTP_PORT_LOW:9083} + SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} + TEST_EXTERNAL_REGISTRY = {env:TEST_EXTERNAL_REGISTRY:k3d-registry.localhost:5800} + DEVSPACE_PROFILE={env:DEVSPACE_PROFILE} +allowlist_externals = + tox + echo + bash +commands = + bash -c 'echo USING DEVSPACE PROFILE: $DEVSPACE_PROFILE' + bash -c 'echo "Launching high datasite: $CLUSTER_NAME_HIGH" && \ + CLUSTER_NAME=$CLUSTER_NAME_HIGH CLUSTER_HTTP_PORT=$CLUSTER_HTTP_PORT_HIGH \ + tox -e dev.k8s.launch.datasite' + + bash -c 'packages/grid/scripts/wait_for.sh service backend --context k3d-{env:CLUSTER_NAME_HIGH} --namespace syft' + + bash -c 'echo "Launching low datasite: $CLUSTER_NAME_LOW" && \ + CLUSTER_NAME=$CLUSTER_NAME_LOW CLUSTER_HTTP_PORT=$CLUSTER_HTTP_PORT_LOW \ + DEVSPACE_PROFILE="$DEVSPACE_PROFILE -p datasite-low" tox -e dev.k8s.launch.datasite' + + bash -c 'echo "Waiting for services to be ready"' + + bash -c 'CLUSTER_NAME=$CLUSTER_NAME_HIGH CLUSTER_HTTP_PORT=$CLUSTER_HTTP_PORT_HIGH source ./scripts/get_k8s_secret_ci.sh \ + && CLUSTER_NAME=$CLUSTER_NAME_HIGH CLUSTER_HTTP_PORT=$CLUSTER_HTTP_PORT_HIGH ./scripts/display_credentials.sh' + + bash -c 'packages/grid/scripts/wait_for.sh service backend --context k3d-{env:CLUSTER_NAME_LOW} --namespace syft' + + bash -c 'CLUSTER_NAME=$CLUSTER_NAME_LOW CLUSTER_HTTP_PORT=$CLUSTER_HTTP_PORT_LOW source ./scripts/get_k8s_secret_ci.sh \ + && CLUSTER_NAME=$CLUSTER_NAME_LOW CLUSTER_HTTP_PORT=$CLUSTER_HTTP_PORT_LOW ./scripts/display_credentials.sh' + + +[testenv:dev.k8s.destroy.datasite.highlow] +description = Destroy a high and a low side datasite on K8s +passenv = HOME, USER +setenv= + CLUSTER_NAME_HIGH = {env:CLUSTER_NAME_HIGH:test-datasite-high-1} + CLUSTER_NAME_LOW = {env:CLUSTER_NAME_LOW:test-datasite-low-1} +allowlist_externals = + tox + echo + bash +commands = + bash -c 'echo "Destroying high datasite: $CLUSTER_NAME_HIGH" && \ + CLUSTER_NAME=$CLUSTER_NAME_HIGH tox -e dev.k8s.destroy' + + bash -c 'echo "Destroying low datasite: $CLUSTER_NAME_LOW" && \ + CLUSTER_NAME=$CLUSTER_NAME_LOW tox -e dev.k8s.destroy' + + +[testenv:dev.k8s.launch.datasite] +description = Launch a single datasite on K8s +passenv = HOME, USER +setenv= + CLUSTER_NAME = {env:CLUSTER_NAME:test-datasite-1} + CLUSTER_HTTP_PORT={env:CLUSTER_HTTP_PORT:9082} + DEVSPACE_PROFILE={env:DEVSPACE_PROFILE} +allowlist_externals = + tox + bash +commands = + bash -c "CLUSTER_NAME=${CLUSTER_NAME} tox -e dev.k8s.destroy" + tox -e dev.k8s.start + tox -e dev.k8s.{posargs:deploy} + +[testenv:dev.k8s.launch.enclave] +description = Launch a single Enclave on K8s +passenv = HOME, USER +setenv= + CLUSTER_NAME = {env:CLUSTER_NAME:test-enclave-1} + CLUSTER_HTTP_PORT={env:CLUSTER_HTTP_PORT:9083} + DEVSPACE_PROFILE=enclave +allowlist_externals = + tox +commands = + tox -e dev.k8s.start -- --volume /sys/kernel/security:/sys/kernel/security --volume /dev/tpmrm0:/dev/tpmrm0 + tox -e dev.k8s.{posargs:deploy} + [testenv:dev.k8s.destroy] description = Destroy local Kubernetes cluster changedir = {toxinidir}/packages/grid @@ -988,9 +1348,6 @@ allowlist_externals = tox bash commands = - ; purge deployment and dangling resources - tox -e dev.k8s.cleanup - ; destroy cluster bash -c '\ rm -rf .devspace; echo ""; \ @@ -1010,7 +1367,7 @@ commands = ; destroy registry bash -c 'k3d registry delete registry.localhost || true' - bash -c 'sudo rm -rf ~/.k3d-registry' + bash -c 'docker volume rm k3d-registry-vol --force || true' [testenv:backend.test.basecpu] description = Base CPU Docker Image Test @@ -1018,28 +1375,39 @@ changedir = {toxinidir}/packages allowlist_externals = docker bash + env setenv = PIP_PACKAGES = {env:PIP_PACKAGES:llama-index opendp} SYSTEM_PACKAGES = {env:SYSTEM_PACKAGES:curl wget} + BUILD_PLATFORM = {env:BUILD_PLATFORM:linux/amd64} commands = - bash -c 'docker buildx use default || true' + env + ; Build the base image - bash -c 'docker build -f ./grid/backend/worker_cpu.dockerfile . -t cpu-worker:latest' + bash -c 'docker buildx build \ + --platform $BUILD_PLATFORM \ + -f ./grid/backend/grid/images/worker_cpu.dockerfile . \ + -t cpu-worker:latest' bash -c 'docker rmi cpu-worker:latest' bash -c '\ - docker build \ - -f grid/backend/worker_cpu.dockerfile . \ + docker buildx build \ + --platform $BUILD_PLATFORM \ + -f grid/backend/grid/images/worker_cpu.dockerfile . \ -t cpu-worker:opendp \ --build-arg PIP_PACKAGES="$PIP_PACKAGES" \ --build-arg SYSTEM_PACKAGES="$SYSTEM_PACKAGES"' - bash -c 'for pkg in $PIP_PACKAGES; do docker run --rm cpu-worker:opendp pip list | grep "$pkg"; done' + ; bash -c 'for pkg in $PIP_PACKAGES; do docker run --rm cpu-worker:opendp pip list | grep $pkg || uv pip list | grep $pkg; done' + bash -c '\ + pip_pkgs=$(docker run --rm cpu-worker:opendp uv pip list || pip list); \ + for pkg in $PIP_PACKAGES; do echo $pip_pkgs | grep $pkg; done' bash -c 'for pkg in $SYSTEM_PACKAGES; do docker run --rm cpu-worker:opendp apk -e info "$pkg"; done' bash -c 'docker rmi cpu-worker:opendp' bash -c '\ - docker build \ - -f grid/backend/worker_cpu.dockerfile . \ + docker buildx build \ + --platform $BUILD_PLATFORM \ + -f grid/backend/grid/images/worker_cpu.dockerfile . \ -t cpu-worker:custom-cmd \ --build-arg SYSTEM_PACKAGES="perl wget curl make " \ --build-arg CUSTOM_CMD="""wget -O - "https://github.com/cowsay-org/cowsay/archive/refs/tags/v3.7.0.tar.gz" | tar xvzf - && cd cowsay-3.7.0 && make"""' @@ -1063,13 +1431,13 @@ allowlist_externals = pytest passenv = EXTERNAL_REGISTRY,EXTERNAL_REGISTRY_USERNAME,EXTERNAL_REGISTRY_PASSWORD setenv = - ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:k8s} - NODE_PORT = {env:NODE_PORT:8080} - NODE_URL = {env:NODE_URL:http://localhost} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + SERVER_PORT = {env:SERVER_PORT:8080} + SERVER_URL = {env:SERVER_URL:http://localhost} EXCLUDE_NOTEBOOKS = {env:EXCLUDE_NOTEBOOKS:} SYFT_VERSION = {env:SYFT_VERSION:local} commands = - bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE NODE_PORT=$NODE_PORT NODE_URL=$NODE_URL \ + bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE SERVER_PORT=$SERVER_PORT SERVER_URL=$SERVER_URL \ Excluding notebooks: $EXCLUDE_NOTEBOOKS SYFT_VERSION=$SYFT_VERSION \ EXTERNAL_REGISTRY=$EXTERNAL_REGISTRY; date" @@ -1087,4 +1455,304 @@ commands = uv pip install syft[data_science]==${SYFT_VERSION}; \ fi" - pytest notebooks/api/0.8 --nbmake -p no:randomly -vvvv --nbmake-timeout=1000 -k '{env:EXCLUDE_NOTEBOOKS:}' + pytest -x --nbmake --nbmake-timeout=1000 notebooks/api/0.8 -p no:randomly -vvvv -k '{env:EXCLUDE_NOTEBOOKS:}' + +[testenv:seaweedfs.test.unit] +description = Seaweedfs Unit Tests +deps = + -r{toxinidir}/packages/grid/seaweedfs/requirements.txt + -r{toxinidir}/packages/grid/seaweedfs/requirements.dev.txt +changedir = {toxinidir}/packages/grid/seaweedfs +allowlist_externals = + bash + pytest +commands = + bash -c 'ulimit -n 4096 || true' + pytest --disable-warnings + +[testenv:migration.prepare] +description = Prepare Migration Data +pip_pre = True +setenv = + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:{temp_dir}/migration} +deps = + nbmake + requests + syft==0.9.5 +allowlist_externals = + bash + python +commands = + ; Run notebooks to prepare migration data + bash -c 'python -c "import syft as sy; print(\"Migrating from syft version:\", sy.__version__)"' + pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb -vvvv + pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb -vvvv + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.blob' + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.yaml' + bash -c "echo 'Migration data prepared in ${MIGRATION_DATA_DIR}'" + +[testenv:migration.test] +description = Migration Test +setenv = + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:{temp_dir}/migration} +deps = + -e{toxinidir}/packages/syft[dev] + nbmake +; changedir = {toxinidir}/packages/syft +allowlist_externals = + bash + tox + pytest +commands = + tox -e migration.prepare + bash -c 'python -c "import syft as sy; print(\"Migrating to syft version:\", sy.__version__)"' + pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb -vvvv +commands_post = + bash -c 'rm -f ${MIGRATION_DATA_DIR}/migration.blob' + bash -c 'rm -f ${MIGRATION_DATA_DIR}/migration.yaml' + +[testenv:migration.k8s.prepare] +description = Prepare migration data using a k8s cluster +changedir = {toxinidir} +passenv=HOME, USER, EXTERNAL_REGISTRY_USERNAME, EXTERNAL_REGISTRY_PASSWORD +deps = + requests + nbmake + syft==0.9.1 +allowlist_externals = + bash + tox +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + SERVER_PORT = {env:SERVER_PORT:8080} + SERVER_URL = {env:SERVER_URL:http://localhost} + EXTERNAL_REGISTRY = {env:EXTERNAL_REGISTRY:k3d-registry.localhost:5800} + ; env vars for dev.k8s.start + CLUSTER_NAME = syft-migration-source + CLUSTER_HTTP_PORT = {env:SERVER_PORT:8080} + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:{temp_dir}/migration} + LATEST_SYFT_VERSION = 0.9.1 +commands = + bash -c "env; date; k3d version" + bash -c "k3d cluster delete ${CLUSTER_NAME} || true" + + tox -e dev.k8s.start + + # Deploy cluster from latest stable syft version with Helm + bash -c '\ + echo "Installing helm charts from repo for syft version: ${LATEST_SYFT_VERSION}"; \ + helm repo add openmined https://openmined.github.io/PySyft/helm; \ + helm repo update openmined; \ + helm install ${CLUSTER_NAME} openmined/syft --version ${LATEST_SYFT_VERSION} -f ./packages/grid/helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + ' + + ; wait for everything else to be loaded + tox -e dev.k8s.ready -- frontend backend mongo proxy seaweedfs registry + + bash -c 'python -c "import syft as sy; print(\"Migrating from syft version:\", sy.__version__)"' + + ; Run notebooks to prepare migration data + pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb -vvvv + pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb -vvvv + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.blob' + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.yaml' + bash -c "echo 'Migration data prepared in ${MIGRATION_DATA_DIR}'" + +commands_post = + bash -c "k3d cluster delete ${CLUSTER_NAME} || true" + + +[testenv:migration.k8s.test] +description = Migration Test on K8s +setenv = + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + GITHUB_CI = {env:GITHUB_CI:false} + SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:test-datasite-1} + SERVER_PORT = {env:SERVER_PORT:8081} + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:{temp_dir}/migration} + TEST_EXTERNAL_REGISTRY = {env:TEST_EXTERNAL_REGISTRY:k3d-registry.localhost:5800} +deps = + {[testenv:syft]deps} + nbmake +changedir = {toxinidir} +passenv=HOME, USER +allowlist_externals = + bash + tox + pytest + devspace + kubectl + grep + sleep + k3d + echo +commands = + # create migration data files on previous syft version + tox -e migration.k8s.prepare + + # Make migration.yaml available for devspace migration + bash -c 'cp ${MIGRATION_DATA_DIR}/migration.yaml packages/grid/helm/examples/dev/migration.yaml' + + # Start the new cluster on syft version we're migrating to + # set env variable for orchestra deployment type + bash -c "echo Running with GITHUB_CI=$GITHUB_CI; date" + python -c 'import syft as sy; sy.stage_protocol_changes()' + k3d version + + # Deleting Old Cluster + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" + + # Deleting registry & volumes + bash -c "k3d registry delete k3d-registry.localhost || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" + + # Create registry + tox -e dev.k8s.registry + + # Creating test-datasite-1 cluster on port SERVER_PORT + # NOTE set DEVSPACE_PROFILE=migrated-datasite will start the cluster with variables from migration.yaml + bash -c '\ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} \ + CLUSTER_HTTP_PORT=${SERVER_PORT} \ + DEVSPACE_PROFILE=migrated-datasite && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.deploy' + + # free up build cache after build of images + bash -c 'if [[ "$GITHUB_CI" != "false" ]]; then \ + docker image prune --all --force; \ + docker builder prune --all --force; \ + fi' + + sleep 30 + + ; # wait for test-datasite-1 + bash packages/grid/scripts/wait_for.sh service postgres --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' + + ; # Checking logs generated & startup of test-datasite 1 + bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' + + # Run migration tests + bash -c 'python -c "import syft as sy; print(\"Migrating to syft version:\", sy.__version__)"' + pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb -vvvv + +commands_post = + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c 'rm -f ${MIGRATION_DATA_DIR}/migration.blob' + bash -c 'rm -f ${MIGRATION_DATA_DIR}/migration.yaml' + +[testenv:migration.scenarios.prepare] +description = Prepare Migration Data +pip_pre = True +setenv = + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:notebooks/scenarios/bigquery/upgradability} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:python} + DEV_MODE = {env:DEV_MODE:True} + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks} + TEST_query_limit_size={env:test_query_limit_size:500000} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + NUM_TEST_USERS = {env:NUM_TEST_USERS:5} + NUM_TEST_JOBS = {env:NUM_TEST_JOBS:10} +deps = + nbmake + requests + syft[dev,datascience]==0.9.1 + ; {[testenv:syft]deps} + db-dtypes + google-cloud-bigquery + aiosmtpd +allowlist_externals = + bash + python +commands = + ; Run notebooks to prepare migration data + bash -c 'pwd' + bash -c 'python -c "import syft as sy; print(\"Migrating from syft version:\", sy.__version__)"' + bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; date" + + bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' ');\ + do \ + pytest -s -x --nbmake --nbmake-timeout=1000 "$subfolder" --ignore=notebooks/scenarios/bigquery/sync -p no:randomly -vvvv --log-cli-level=DEBUG --capture=no;\ + done" + + pytest -x --nbmake --nbmake-timeout=1000 notebooks/scenarios/bigquery/upgradability/1-dump-database-to-file.ipynb -vvvv + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.blob' + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.yaml' + bash -c "echo 'Migration data prepared in ${MIGRATION_DATA_DIR}'" + +[testenv:migration.scenarios.k8s.prepare] +description = Prepare Migration Data +pip_pre = True +setenv = + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:notebooks/scenarios/bigquery/upgradability/test} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} + EXTERNAL_REGISTRY = {env:EXTERNAL_REGISTRY:k3d-registry.localhost:5800} + TEST_EXTERNAL_REGISTRY = {env:TEST_EXTERNAL_REGISTRY:k3d-registry.localhost:5800} + DEV_MODE = {env:DEV_MODE:True} + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks} + TEST_query_limit_size={env:test_query_limit_size:500000} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + NUM_TEST_USERS = {env:NUM_TEST_USERS:5} + NUM_TEST_JOBS = {env:NUM_TEST_JOBS:10} +deps = + nbmake + requests + syft[dev,datascience]==0.9.1 + ; {[testenv:syft]deps} + db-dtypes + google-cloud-bigquery + aiosmtpd +allowlist_externals = + bash + python +commands = + ; Run notebooks to prepare migration data + bash -c 'pwd' + bash -c 'python -c "import syft as sy; print(\"Migrating from syft version:\", sy.__version__)"' + bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE DEV_MODE=$DEV_MODE TEST_NOTEBOOK_PATHS=$TEST_NOTEBOOK_PATHS; date" + + bash -c "for subfolder in $(echo ${TEST_NOTEBOOK_PATHS} | tr ',' ' ');\ + do \ + pytest -s -x --nbmake --nbmake-timeout=1000 "$subfolder" --ignore=notebooks/scenarios/bigquery/sync -p no:randomly -vvvv --log-cli-level=DEBUG --capture=no;\ + done" + + pytest -x --nbmake --nbmake-timeout=1000 notebooks/scenarios/bigquery/upgradability/1-dump-database-to-file.ipynb -vvvv + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.blob' + bash -c 'ls -l ${MIGRATION_DATA_DIR}/migration.yaml' + bash -c "echo 'Migration data prepared in ${MIGRATION_DATA_DIR}'" + +[testenv:migration.scenarios.test] +description = Migration Test +setenv = + MIGRATION_DATA_DIR = {env:MIGRATION_DATA_DIR:.} + ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:python} + DEV_MODE = {env:DEV_MODE:True} + TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:notebooks/scenarios/bigquery/upgradability/0.9.1_notebooks} + TEST_query_limit_size={env:test_query_limit_size:500000} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + NUM_TEST_USERS = {env:NUM_TEST_USERS:5} + NUM_TEST_JOBS = {env:NUM_TEST_JOBS:10} +deps = + -e{toxinidir}/packages/syft[dev] + nbmake + db-dtypes + google-cloud-bigquery + aiosmtpd +changedir = {toxinidir}/notebooks +allowlist_externals = + bash + tox + pytest +commands = + ; tox -e migration.prepare + bash -c 'python -c "import syft as sy; print(\"Migrating to syft version:\", sy.__version__)"' + pytest -x --nbmake --nbmake-timeout=1000 scenarios/bigquery/upgradability/2-migrate-for-scenarios.ipynb -vvvv --log-cli-level=DEBUG